code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import re
import os
import json
import time
import logging
from os.path import join as os_join
from typing import List, Dict, Any, Union, Optional
from argparse import ArgumentParser
from dataclasses import dataclass, asdict
import numpy as np
import pandas as pd
import datasets
import openai
from sklearn.metrics import classification_report
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from tenacity import retry, wait_random_exponential
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.preprocess import get_dataset
__all__ = ['ApiCaller', 'PromptMap', 'evaluate']
logger = get_logger('GPT3')
class ApiCaller:
"""
Make API call to Open API GPT3 model to get completions
"""
url = 'https://api.openai.com/v1/completions'
def __init__(
self, model: str = 'text-ada-001', batched: bool = False, delay: float = None,
token_path: str = None
):
self.model = model
self.batched = batched
self.delay = delay
token_path = token_path or os_join(u.proj_path, 'auth', 'open-ai.json')
assert os.path.exists(token_path), f'OpenAI token not found at {pl.i(token_path)}'
with open(token_path) as f:
auth = json.load(f)
api_key, org = auth['api-key'], auth['organization']
openai.api_key = api_key
# self.headers = {
# 'Content-Type': 'application/json',
# 'Authorization': f'Bearer {api_key}',
# 'OpenAI-Organization': org
# }
@retry(wait=wait_random_exponential(min=1, max=60 * 30)) # Wait for 30min
def completion(self, **kwargs):
if self.delay:
time.sleep(self.delay)
return openai.Completion.create(**kwargs)
def __call__(self, prompt: Union[str, List], rand_sleep: bool = True, **kwargs) -> Union[str, List[str]]:
if self.batched:
assert isinstance(prompt, list)
else:
assert isinstance(prompt, str)
payload = dict(
model=self.model,
temperature=0, # Generate w/ greedy decoding
stop='\n',
max_tokens=32
)
payload['prompt'] = prompt
payload.update(kwargs)
res = self.completion(**payload)
res = res.choices
if self.batched:
assert len(res) == len(prompt)
ret = [''] * len(prompt)
for e in res:
ret[e.index] = e.text
return ret
else:
assert len(res) == 1
res = res[0]
return res.text
def text2n_token(txt: str) -> int:
if not hasattr(text2n_token, 'token_pattern'):
text2n_token.token_pattern = re.compile(r'(?u)\b\w+\b') # taken from sklearn.CountVectorizer
return len(text2n_token.token_pattern.findall(txt))
def truncate_text(text: str = None, n: int = None) -> str:
return ' '.join(text.split()[:n])
class PromptMap:
"""
Create the GPT3 prompt given text and label options
Since we don't know number of tokens the prompt is tokenized into,
set artificial length limit based on number of words
"""
templates = sconfig('baselines.gpt2-nvidia.templates')
n_template = len(templates)
logger = get_logger('Prompt Map')
def __init__(
self, dataset_name: str = None, max_text_length: int = 1024, max_prompt_length: int = 1024 + 256,
logger_fl: logging.Logger = None
):
self.dataset_name = dataset_name
self.labels = sconfig(f'UTCD.datasets.{dataset_name}.splits.test.labels') # Take labels from the test split
self.n_cls = len(self.labels)
self.max_text_length = max_text_length
self.max_prompt_length = max_prompt_length
self.logger_fl = logger_fl
d_log = {
'dataset_name': dataset_name, 'labels': self.labels, '#class': self.n_cls,
'max_text_length': max_text_length, 'max_prompt_length': max_prompt_length
}
PromptMap.logger.info(f'Prompt Map initialized with: {pl.i(d_log)}')
if self.logger_fl:
self.logger_fl.info(f'Prompt Map initialized with: {pl.nc(d_log)}')
def __call__(self, text: str = None):
n_txt = text2n_token(text)
if n_txt > self.max_text_length:
text = truncate_text(text, n_txt)
PromptMap.logger.warning(f'Text too long and truncated: {pl.i(n_txt)} -> {pl.i(self.max_text_length)}')
idx_lbs = np.arange(self.n_cls)
np.random.shuffle(idx_lbs) # The order which the labels appears are random
label_options_str = ' , '.join(f'" {self.labels[idx]} "' for idx in idx_lbs)
idx_tpl = np.random.randint(PromptMap.n_template)
question = PromptMap.templates[idx_tpl].format(label_options_str)
prompt = self._to_prompt(question=question, text=text)
n_prompt = text2n_token(prompt)
if n_prompt > self.max_prompt_length:
n_txt_ = self.max_prompt_length - text2n_token(question) - 2 # 2 for `Text` and `Answer`
assert n_txt_ >= 50 # sanity check
text = truncate_text(text, n_txt_)
PromptMap.logger.warning(f'Prompt too long and text segment truncated: '
f'{pl.i(n_prompt)} -> {pl.i(self.max_prompt_length)}')
if self.logger_fl:
self.logger_fl.warning(f'Prompt too long and text segment truncated: '
f'{pl.nc(n_prompt)} -> {pl.nc(self.max_prompt_length)}')
prompt = self._to_prompt(question=question, text=text)
return prompt
@staticmethod
def _to_prompt(question: str = None, text: str = None):
# return f'Text: {text}\nQuestion: {question}\n Answer:' # TODO: This template works w/ `davinci` better?
return f'{question}\n Text: {truncate_text(text)} \n Answer:' # This template works w/ `curie` better
@dataclass
class GPT3EvalMeta:
text: str = None
prompt: str = None
generated: str = None
@dataclass
class _EvalSingleOut:
pred: int = None
true: int = None
meta: GPT3EvalMeta = None
class _EvalSingle:
def __init__(
self, pm: PromptMap = None, api_caller: ApiCaller = None,
label_options: List[str] = None, lb2id: Dict[str, int] = None,
logger_fl: logging.Logger = None, return_text: bool = False
):
self.pm = pm
self.ac = api_caller
self.batched = api_caller.batched
self.label_options = label_options
self.lb2id = lb2id
self.logger_fl = logger_fl
self.return_text = return_text
def __call__(self, e: Union[Dict[str, Any], List[Dict[str, Any]]], pbar=None) -> Union[_EvalSingleOut, List[_EvalSingleOut]]:
if self.batched:
d: List[Dict]
lst_txt, lst_lbs = [i['text'] for i in e], [i['labels'] for i in e]
assert isinstance(lst_txt[0], str) and isinstance(lst_lbs[0], list) # sanity check
prompts = [self.pm(txt) for txt in lst_txt]
res = self.ac(prompts)
ret = []
for txt, lbs, ppt, a in zip(lst_txt, lst_lbs, prompts, res):
ret.append(self._ret_single(text=txt, labels=lbs, prompt=ppt, answer=a, pbar=pbar))
return ret
else:
txt, lbs = e['text'], e['labels']
prompt = self.pm(txt)
answer = self.ac(prompt)
return self._ret_single(text=txt, labels=lbs, prompt=prompt, answer=answer, pbar=pbar)
def _ret_single(
self, text: str = None, labels: List[int] = None, prompt: str = None, answer: str = None, pbar=None
) -> _EvalSingleOut:
if pbar:
_d_log = dict(labels=[self.label_options[i] for i in labels], answer=[answer])
pbar.set_postfix({k: pl.i(v) for k, v in _d_log.items()})
answer = answer.lower().strip() # TODO: maybe GPT3 generates multiple answers?
ret: Dict[str, Any]
if answer in self.label_options:
ret = dict(pred=self.lb2id[answer], true=self.lb2id[answer])
else:
logger.warning(f'Generated {pl.i([answer])}, not one of label options')
self.logger_fl.warning(f'Generated {pl.nc([answer])}, not one of label options')
ret = dict(pred=-1, true=labels[0])
if self.return_text:
ret.update(meta=GPT3EvalMeta(text=text, prompt=prompt, generated=answer))
return _EvalSingleOut(**ret)
def evaluate(
model: str = 'text-ada-001', domain: str = 'in', dataset_name: str = 'all', concurrent: bool = False,
batched: Union[bool, int] = False, delay: float = None,
subsample: Union[bool, int] = False, subsample_seed: int = 77, store_meta: bool = False,
store_frequency: Optional[int] = None, resume: List[str] = None
):
ac = ApiCaller(model=model, batched=batched, delay=delay)
if dataset_name == 'all' and subsample:
raise NotImplementedError('Subsampling intended for single dataset')
dataset_names = utcd_util.get_eval_dataset_names(domain=domain, dataset_name=dataset_name)
_preds, _trues, _infs = None, None, []
if resume:
assert len(dataset_names) == 1 # sanity check, intended for resuming from single dataset
_preds, _trues, _infs = [], [], []
for r in resume:
with open(r, 'r') as fl:
meta = json.load(fl)
_preds.extend(meta['preds'])
_trues.extend(meta['trues'])
_infs.extend(meta['inferences'])
assert len(_preds) == len(_trues) == len(_infs) # sanity check
d = dict(md=model, dm=domain, dnm=dataset_name)
output_dir_nm = f'{now(for_path=True)}_Zeroshot-GPT3-Eval_{pl.pa(d)}'
output_path = os_join(u.eval_path, output_dir_nm, domain2eval_dir_nm(domain))
logger_fl = get_logger('GPT3 Eval', kind='file-write', file_path=os_join(output_path, f'eval.log'))
d_log: Dict[str, Any] = dict(
model_name=model, batched=batched, delay=delay, domain=domain, dataset_names=dataset_names,
output_path=output_path,
concurrent=concurrent, subsample=subsample, subsample_seed=subsample_seed, store_meta=store_meta
)
d_log['store_frequency'] = store_frequency
logger.info(f'Evaluating GPT3 model w/ {pl.i(d_log)}... ')
logger_fl.info(f'Evaluating GPT3 model w/ {d_log}... ')
os.makedirs(output_path, exist_ok=True)
for dnm in dataset_names:
n_txt = sconfig(f'UTCD.datasets.{dnm}.splits.test.n_text')
n_tgt = 5000 if isinstance(subsample, bool) else subsample
if subsample and n_txt > n_tgt:
dset = utcd_util.subsample_dataset(dataset_name=dnm, split='test', n_tgt=n_tgt, seed=subsample_seed)
else:
dset = get_dataset(dnm, splits='test')['test']
dset: datasets.Dataset
n_dset_total = len(dset)
n_dset_remain = n_dset_total
if resume:
n_ori = len(dset)
ran_txts = set(e['text'] for e in _infs)
def filt(sample: Dict[str, Any]) -> bool:
return sample['text'] not in ran_txts
dset = dset.filter(filt)
# sanity check, every text completed should be accounted for, exactly once
n_dset_remain = len(dset)
assert len(dset) + len(_infs) == n_ori
logger.info(f'{pl.i(len(_infs))} texts evaluated, resuming from {pl.i(n_ori)} => {pl.i(len(dset))} ')
logger_fl.info(f'{len(_infs)} texts evaluated, resuming from {n_ori} => {len(dset)} ')
pm = PromptMap(dataset_name=dnm, logger_fl=logger_fl)
label_options = [lb.lower() for lb in pm.labels]
lb2id = {lb: idx for idx, lb in enumerate(label_options)}
args = dict(pm=pm, api_caller=ac, label_options=label_options, lb2id=lb2id, logger_fl=logger_fl)
eval_single = _EvalSingle(**args, return_text=store_meta)
lst_meta = []
meta_path = os_join(output_path, f'{dnm}_meta.json')
store_frequency = store_frequency or 100
def write_meta(): # Writing completed inferences to file periodically, in case GPT3 eval gets stuck
n_completed = len(lst_meta)
if n_completed % store_frequency == 0 or n_completed == n_dset_remain:
logger.info(f'Writing eval instances to {pl.i(meta_path)}...')
logger_fl.info(f'Writing eval instances to {meta_path}...')
d_ = d_log
infs = [asdict(m) for m in lst_meta]
if resume:
infs = _infs + infs
if concurrent: # a numpy array created
__preds, __trues = preds, trues
total_completed = len(__preds)
else:
total_completed = len(_infs) + n_completed
__preds = preds[:total_completed].tolist()
__trues = trues[:total_completed].tolist()
assert len(__preds) == len(__trues) == len(infs) # sanity check
d_['#completed'] = total_completed
d_.update(dict(inferences=infs, preds=__preds, trues=__trues))
with open(meta_path, 'w') as f_:
json.dump(d_, f_, indent=4)
bsz = None
if batched:
bsz = 8 if isinstance(batched, bool) else batched
with_tqdm = dict(desc=f'Evaluating {pl.i(dnm)}', total=n_dset_remain)
if concurrent: # TODO: concurrent batched
preds, trues = (_preds, _trues) if resume else ([], [])
# order irrelevant
for e in conc_yield(eval_single, dset, with_tqdm=with_tqdm, mode='thread', n_worker=4):
preds.append(e.pred)
trues.append(e.true)
if store_meta:
lst_meta.append(e.meta)
write_meta()
else:
trues, preds = np.empty(n_txt, dtype=int), np.empty(n_txt, dtype=int)
if resume:
trues[:len(_trues)] = _trues
preds[:len(_preds)] = _preds
if batched:
with tqdm(**with_tqdm) as pbar:
idx = 0
for elms in DataLoader(dset, batch_size=bsz, shuffle=False, collate_fn=lambda x: x):
for e in eval_single(elms):
preds[idx] = e.pred
trues[idx] = e.true
if store_meta:
lst_meta.append(e.meta)
write_meta()
pbar.update(1)
idx += 1
assert idx == n_dset_remain # sanity check
else:
it = tqdm(dset, **with_tqdm)
for idx, elm in enumerate(it):
out = eval_single(elm, pbar=it)
preds[idx], trues[idx] = out.pred, out.true
if store_meta:
lst_meta.append(out.meta)
write_meta()
args = dict(
labels=[-1, *range(len(label_options))], target_names=['Label not in dataset', *label_options],
zero_division=0, output_dict=True
)
report = classification_report(trues, preds, **args)
csv_path = os_join(output_path, f'{dnm}.csv')
pd.DataFrame(report).transpose().to_csv(csv_path)
acc = f'{report["accuracy"]:.3f}'
logger.info(f'{pl.i(dnm)} accuracy: {pl.i(acc)}')
logger_fl.info(f'{dnm} accuracy: {acc}')
def parse_args():
parser = ArgumentParser()
models = ['text-ada-001', 'text-babbage-001', 'text-curie-001', 'text-davinci-002']
parser.add_argument('--model', type=str, choices=models, default='text-ada-001', help="""
GPT3 model from Open AI API, see `https://beta.openai.com/docs/models/gpt-3`
""")
parser.add_argument('--dataset', type=str, default='all', help="""
One of dataset name in UTCD or `all` for all datasets in a domain, see argument `domain`
""")
parser.add_argument('--domain', type=str, choices=['in', 'out'], default='in', help="""
One of [`in`, `out`] for in-domain, out-of-domain respectively
""")
parser.add_argument('--concurrent', type=bool, default=False, help="""
Make GPT3 completion requests concurrently
""")
parser.add_argument('--subsample', type=int, default=5000, help="""
Total #sample to subsample from the dataset
""")
parser.add_argument('--subsample_seed', type=int, default=77)
return parser.parse_args()
if __name__ == '__main__':
mic.output_width = 256
def set_openai_token():
with open(os_join(u.proj_path, 'auth', 'open-ai.json')) as f:
auth = json.load(f)
org = auth['organization']
api_key = auth['api-key']
openai.api_key = api_key
# set_openai_token()
# evaluate(model='text-ada-001', domain='in', dataset_name='emotion')
# evaluate(model='text-curie-001', domain='out', dataset_name='multi_eurlex', concurrent=True)
# evaluate(model='text-curie-001', domain='in', dataset_name='emotion', concurrent=True)
# evaluate(model='text-curie-001', domain='in', dataset_name='finance_sentiment', concurrent=True)
# evaluate(model='text-curie-001', domain='in', dataset_name='banking77', concurrent=True, subsample=True)
# evaluate(model='text-davinci-002', domain='in', dataset_name='emotion')
# evaluate(model='text-davinci-002', domain='out', dataset_name='finance_sentiment')
# evaluate(model='text-davinci-002', domain='out', dataset_name='consumer_finance')
# evaluate(model='text-davinci-002', domain='out', dataset_name='amazon_polarity', concurrent=True)
def subsample_large_dset():
run_args = dict(model='text-curie-001', subsample=True, store_meta=True, store_frequency=10)
# dnm = 'amazon_polarity'
# dnm = 'yelp'
# dnm_ = 'consumer_finance'
# dnm_ = 'slurp'
dnm_ = 'multi_eurlex' # TODO: doesn't work w/ batched requests???
# dnm_ = 'sgd'
rsm = [os_join(
u.eval_path, '2022-12-04_17-34-01_Zeroshot-GPT3-Eval_{md=text-curie-001, dm=out, dnm=multi_eurlex}',
'22-12-04_out-of-domain', f'{dnm_}_meta.json'
)]
evaluate(domain='out', dataset_name=dnm_, **run_args, concurrent=True, delay=12, resume=rsm)
# subsample_large_dset()
def command_prompt():
args = parse_args()
evaluate(
model=args.model, dataset_name=args.dataset, domain=args.domain, concurrent=args.concurrent,
subsample=args.subsample, subsample_seed=args.subsample_seed
)
command_prompt() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/gpt3.py | gpt3.py |
import math
from os.path import join as os_join
from argparse import ArgumentParser
from typing import List, Dict
import numpy as np
import pandas as pd
import torch.cuda
from transformers import BertTokenizer, BertForSequenceClassification
from transformers import TrainingArguments, Trainer
from datasets import Dataset
from tqdm.auto import tqdm
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.util.load_data import (
get_datasets, seq_cls_format, in_domain_data_path, out_of_domain_data_path
)
MODEL_NAME = 'BERT Seq CLS'
HF_MODEL_NAME = 'bert-base-uncased'
def parse_args():
parser = ArgumentParser()
subparser = parser.add_subparsers(dest='command')
parser_train = subparser.add_parser('train')
parser_test = subparser.add_parser('test')
parser_train.add_argument('--dataset', type=str, default='all')
parser_train.add_argument('--domain', type=str, choices=['in', 'out'], required=True)
parser_train.add_argument('--normalize_aspect', type=bool, default=False)
parser_train.add_argument('--learning_rate', type=float, default=5e-5)
parser_train.add_argument('--batch_size', type=int, default=16)
parser_train.add_argument('--epochs', type=int, default=3)
parser_test.add_argument('--dataset', type=str, default='all')
parser_test.add_argument('--domain', type=str, choices=['in', 'out'], required=True)
parser_test.add_argument('--model_name_or_path', type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
import os
import transformers
import datasets
args = parse_args()
seed = sconfig('random-seed')
if args.command == 'train':
logger = get_logger(f'{MODEL_NAME} Train')
dataset_name, domain, normalize_aspect = args.dataset, args.domain, args.normalize_aspect
lr, bsz, n_ep = args.learning_rate, args.batch_size, args.epochs
ca(dataset_domain=domain)
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
dset_args = dict(domain=domain)
if normalize_aspect:
dset_args['normalize_aspect'] = seed
data = get_datasets(**dset_args)
if dataset_name == 'all':
train_dset, test_dset, labels = seq_cls_format(data, all=True)
else:
train_dset, test_dset, labels = seq_cls_format(data[dataset_name])
d_log = {'#train': len(train_dset), '#test': len(test_dset), 'labels': list(labels.keys())}
logger.info(f'Loaded {pl.i(domain_str)} datasets {pl.i(dataset_name)} with {pl.i(d_log)} ')
num_labels = len(labels)
tokenizer = BertTokenizer.from_pretrained(HF_MODEL_NAME)
model = BertForSequenceClassification.from_pretrained(HF_MODEL_NAME, return_dict=True, num_labels=num_labels)
tokenizer.add_special_tokens(dict(eos_token=utcd_util.EOT_TOKEN)) # end-of-turn for SGD
model.resize_token_embeddings(len(tokenizer))
def tokenize_function(examples):
return tokenizer(examples['text'], padding='max_length', truncation=True)
train_dset = Dataset.from_pandas(pd.DataFrame(train_dset))
test_dset = Dataset.from_pandas(pd.DataFrame(test_dset))
# small batch size cos samples are very long in some datasets
map_args = dict(batched=True, batch_size=16, num_proc=os.cpu_count())
train_dset = train_dset.map(tokenize_function, **map_args)
test_dset = test_dset.map(tokenize_function, **map_args)
warmup_steps = math.ceil(len(train_dset) * n_ep * 0.1) # 10% of train data for warm-up
dir_nm = map_model_output_path(
model_name=MODEL_NAME.replace(' ', '-'), output_path=f'{domain}-{dataset_name}', mode=None,
sampling=None, normalize_aspect=normalize_aspect
)
output_path = os_join(utcd_util.get_base_path(), u.proj_dir, u.model_dir, dir_nm)
proj_output_path = os_join(u.base_path, u.proj_dir, u.model_dir, dir_nm, 'trained')
d_log = {'batch size': bsz, 'epochs': n_ep, 'warmup steps': warmup_steps, 'save path': output_path}
logger.info(f'Launched training with {pl.i(d_log)}... ')
training_args = TrainingArguments( # TODO: learning rate
output_dir=output_path,
learning_rate=lr,
num_train_epochs=n_ep,
per_device_train_batch_size=bsz,
per_device_eval_batch_size=bsz,
warmup_steps=warmup_steps,
weight_decay=0.01,
logging_dir='./logs',
load_best_model_at_end=True,
logging_steps=100000,
save_steps=100000,
evaluation_strategy='steps'
)
trainer = Trainer(
model=model, args=training_args,
train_dataset=train_dset, eval_dataset=test_dset, compute_metrics=compute_metrics
)
transformers.set_seed(seed)
trainer.train()
trainer.save_model(proj_output_path)
tokenizer.save_pretrained(proj_output_path)
elif args.command == 'test':
dataset_name, domain, model_path = args.dataset, args.domain, args.model_name_or_path
bsz = 32
split = 'test'
dataset_names = utcd_util.get_dataset_names(domain)
if dataset_name != 'all':
assert dataset_name in dataset_names
dataset_names = [dataset_name]
output_path = os_join(model_path, 'eval')
lg_nm = f'{MODEL_NAME} Eval'
logger = get_logger(lg_nm)
lg_fl = os_join(output_path, f'{now(for_path=True)}_{lg_nm}, dom={domain}.log')
logger_fl = get_logger(lg_nm, kind='file-write', file_path=lg_fl)
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
logger.info(f'Evaluating {pl.i(domain_str)} datasets {pl.i(dataset_names)} on model {pl.i(model_path)}... ')
logger_fl.info(f'Evaluating {domain_str} datasets {dataset_names} on model {model_path}... ')
data = get_datasets(in_domain_data_path if domain == 'in' else out_of_domain_data_path)
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForSequenceClassification.from_pretrained(model_path)
model.eval()
device = 'cpu'
if torch.cuda.is_available():
model = model.cuda()
device = 'cuda'
lb2id: Dict[str, int] = dict() # see `load_data.seq_cls_format`
if dataset_name == 'all':
for dset in data.values():
for label in dset['labels']:
if label not in lb2id:
lb2id[label] = len(lb2id)
else:
for label in data[dataset_name]['labels']:
if label not in lb2id:
lb2id[label] = len(lb2id)
_lbs = list(lb2id.keys())
logger.info(f'Loaded labels: {pl.i(_lbs)}')
logger_fl.info(f'Loaded labels: {_lbs}')
def tokenize(examples):
return tokenizer(examples['text'], padding='max_length', truncation=True)
for dnm in dataset_names:
pairs: Dict[str, List[str]] = data[dnm][split]
asp = sconfig(f'UTCD.datasets.{dnm}.aspect')
logger.info(f'Evaluating {pl.i(asp)} dataset {pl.i(dnm)}... ')
logger_fl.info(f'Evaluating {asp} dataset {dnm}... ')
n_txt = sconfig(f'UTCD.datasets.{dnm}.splits.{split}.n_text')
arr_preds, arr_labels = np.empty(n_txt, dtype=int), np.empty(n_txt, dtype=int)
logger.info(f'Loading {pl.i(n_txt)} samples... ')
logger_fl.info(f'Loading {n_txt} samples... ')
df = pd.DataFrame([dict(text=txt, label=[lb2id[lb] for lb in lb]) for txt, lb in pairs.items()])
dset = Dataset.from_pandas(df)
datasets.set_progress_bar_enabled(False)
map_args = dict(batched=True, batch_size=64, num_proc=os.cpu_count(), remove_columns=['text'])
dset = dset.map(tokenize, **map_args)
datasets.set_progress_bar_enabled(True)
gen = group_n(range(len(dset)), n=bsz)
n_ba = math.ceil(n_txt / bsz)
logger.info(f'Evaluating... ')
logger_fl.info(f'Evaluating... ')
it = tqdm(gen, desc=dnm, unit='ba', total=n_ba)
for i, idxs in enumerate(it):
inputs = dset[idxs]
labels = inputs.pop('label')
inputs = {k: torch.tensor(v, device=device) for k, v in inputs.items()}
with torch.no_grad():
outputs = model(**inputs)
preds = torch.argmax(outputs[0], dim=1)
for i_, (pred, lbs) in enumerate(zip(preds, labels), start=i*bsz):
arr_preds[i_] = pred = pred.item()
arr_labels[i_] = pred if pred in lbs else lbs[0]
args = dict(
zero_division=0, target_names=list(lb2id.keys()), labels=list(range(len(lb2id))), output_dict=True
) # disables warning
df, acc = eval_res2df(arr_labels, arr_preds, report_args=args, pretty=False)
logger.info(f'{pl.i(dnm)} Classification Accuracy: {pl.i(acc)}')
logger_fl.info(f'{dnm} Classification Accuracy: {acc}')
out = os_join(output_path, f'{dnm}.csv')
df.to_csv(out)
logger.info(f'{pl.i(dnm)} Eval CSV written to {pl.i(out)}')
logger_fl.info(f'{dnm} Eval CSV written to {out}') | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/bert.py | bert.py |
import os
import pickle
from os.path import join as os_join
from typing import List
import numpy as np
import pandas as pd
import torch.cuda
from sklearn.metrics import classification_report
from transformers import pipeline
from tqdm.auto import tqdm
from zeroshot_classifier.util.load_data import get_datasets
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
logger = get_logger('BART')
def evaluate(
model_name: str = 'facebook/bart-large-mnli', domain: str = 'in', dataset_name: str = 'all',
split_index: int = None, n_splits: int = None
):
bsz = 32
all_dset = dataset_name == 'all'
if not all_dset:
_dom = sconfig(f'UTCD.datasets.{dataset_name}.domain')
if domain is not None:
domain = _dom
else:
assert domain == _dom
if all_dset:
dataset_names = utcd_util.get_dataset_names(domain)
else:
dataset_names = [dataset_name]
output_dir_nm = f'{now(for_path=True)}_Zeroshot-BART'
output_path = os_join(u.eval_path, output_dir_nm, domain2eval_dir_nm(domain))
os.makedirs(output_path, exist_ok=True)
is_split = split_index is not None and n_splits is not None
split_str, split_str_c = None, None
if is_split:
assert 0 <= split_index < n_splits
if dataset_name == 'all':
raise ValueError(f'Splitting intended for single dataset only')
split_str = f'{split_index+1}_{n_splits}'
split_str_c = f'{pl.i(split_index)}/{pl.i(n_splits)}'
log_fnm = f'{now(for_path=True)}_BART_{domain}_{dataset_name}'
if is_split:
log_fnm = f'{log_fnm}_{split_str}'
log_fnm = f'{log_fnm}_Eval'
logger_fl = get_logger('BART Eval', kind='file-write', file_path=os_join(output_path, f'{log_fnm}.log'))
d_log = dict(
model_name=model_name, domain=domain, dataset_names=dataset_names, batch_size=bsz, output_path=output_path,
split_index=split_index, n_splits=n_splits
)
logger.info(f'Evaluating GPT3 model w/ {pl.i(d_log)}... ')
logger_fl.info(f'Evaluating BART model w/ {d_log}... ')
device = 0 if torch.cuda.is_available() else -1 # See `transformers::pipelines::base`
model = pipeline('zero-shot-classification', model=model_name, device=device)
data = get_datasets(domain=domain, dataset_names=dataset_names)
split = 'test'
for dnm in dataset_names: # loop through all datasets
dset = data[dnm]
pairs = dset[split]
d_info = sconfig(f'UTCD.datasets.{dnm}.splits.{split}')
n_txt, label_options = d_info['n_text'], d_info['labels']
d_log = {'#text': n_txt, '#label': len(label_options), 'labels': label_options}
logger.info(f'Evaluating {pl.i(dnm)} w/ {pl.i(d_log)}...')
lb2id = {lb: idx for idx, lb in enumerate(label_options)}
if is_split:
it_txts = iter(split_n(pairs.keys(), n=n_splits))
txts = None
for i in range(n_splits):
txts = next(it_txts)
if i == split_index:
break
n_txt = len(txts)
logger.info(f'Loaded split {split_str_c} w/ {pl.i(n_txt)} texts...')
logger_fl.info(f'Loaded split {split_str} w/ {n_txt} texts...')
else:
txts = pairs.keys()
txts = (txt for txt in txts)
trues, preds = np.empty(n_txt, dtype=int), np.empty(n_txt, dtype=int)
it = tqdm(model(txts, label_options, batch_size=bsz), desc=f'Evaluating {pl.i(dnm)}', total=n_txt)
for i, out in enumerate(it):
txt, labels, scores = out['sequence'], out['labels'], out['scores']
idx_pred = max(enumerate(scores), key=lambda x: x[1])[0] # Index of the highest score
pred = lb2id[labels[idx_pred]]
lbs_true = [lb2id[lb] for lb in pairs[txt]]
if pred in lbs_true:
preds[i] = trues[i] = pred
else:
preds[i], trues[i] = -1, lbs_true[0]
if is_split:
fnm = f'{now(for_path=True)}_{dnm}_split_{split_str} predictions.pkl'
path = os_join(output_path, fnm)
with open(path, 'wb') as f:
pickle.dump(dict(trues=trues, preds=preds), f)
logger.info(f'Partial predictions saved to {pl.i(path)}')
logger_fl.info(f'Partial predictions saved to {path}')
else:
args = dict(
labels=[-1, *range(len(label_options))], target_names=['Label not in dataset', *label_options],
zero_division=0, output_dict=True
)
report = classification_report(trues, preds, **args)
acc = f'{report["accuracy"]:.3f}'
logger.info(f'{pl.i(dnm)} accuracy: {pl.i(acc)}')
logger_fl.info(f'{dnm} accuracy: {acc}')
path = os_join(output_path, f'{dnm}.csv')
pd.DataFrame(report).transpose().to_csv(path)
def merge_splits_and_evaluate(domain: str = 'in', dataset_name: str = None, paths: List[str] = None):
trues, preds = [], []
for p in paths:
with open(p, 'rb') as f:
d = pickle.load(f)
trues.append(d['trues'])
preds.append(d['preds'])
trues, preds = np.concatenate(trues), np.concatenate(preds)
d_info = sconfig(f'UTCD.datasets.{dataset_name}.splits.test')
n_txt, label_options = d_info['n_text'], d_info['labels']
assert trues.size == preds.size == n_txt
args = dict(
labels=[-1, *range(len(label_options))], target_names=['Label not in dataset', *label_options],
zero_division=0, output_dict=True
)
report = classification_report(trues, preds, **args)
acc = f'{report["accuracy"]:.3f}'
logger.info(f'{pl.i(dataset_name)} accuracy: {pl.i(acc)}')
output_dir_nm = f'{now(for_path=True)}_Zeroshot-BART'
output_path = os_join(u.eval_path, output_dir_nm, domain2eval_dir_nm(domain))
os.makedirs(output_path, exist_ok=True)
path = os_join(output_path, f'{dataset_name}.csv')
df = pd.DataFrame(report).transpose()
df.to_csv(path)
return df
if __name__ == '__main__':
# evaluate(domain='in')
# in the order of #text
# evaluate(domain='out', dataset_name='finance_sentiment')
# evaluate(domain='out', dataset_name='snips')
# evaluate(domain='out', dataset_name='banking77')
# evaluate(domain='out', dataset_name='patent')
# evaluate(domain='out', dataset_name='multi_eurlex')
# evaluate(domain='out', dataset_name='nlu_evaluation')
# evaluate(domain='out', dataset_name='yelp')
# evaluate(domain='out', dataset_name='amazon_polarity')
def chore_merge_splits():
dir_nms = [
'2022-10-19_04-07-56_Zeroshot-BART',
'2022-10-19_04-10-14_Zeroshot-BART',
'2022-10-19_04-12-11_Zeroshot-BART',
'2022-10-19_04-14-26_Zeroshot-BART'
]
fnms = [
'2022-10-19_16-09-25_consumer_finance_split_1_4 predictions',
'2022-10-19_17-13-13_consumer_finance_split_2_4 predictions',
'2022-10-19_19-12-10_consumer_finance_split_3_4 predictions',
'2022-10-19_20-55-40_consumer_finance_split_4_4 predictions'
]
paths = [
os_join(u.eval_path, dir_nm, '22-10-19_out-of-domain', f'{fnm}.pkl') for dir_nm, fnm in zip(dir_nms, fnms)
]
merge_splits_and_evaluate(domain='out', dataset_name='consumer_finance', paths=paths)
chore_merge_splits()
# evaluate(domain='out', dataset_name='consumer_finance', split_index=3, n_splits=4) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/bart.py | bart.py |
import os
import math
import random
from os.path import join as os_join
from typing import List, Dict
import numpy as np
from torch.utils.data import DataLoader
import transformers
from sentence_transformers import SentenceTransformer, models, losses, util as sbert_util
from tqdm import tqdm
from stefutil import *
from zeroshot_classifier.util import *
from zeroshot_classifier.util.load_data import get_datasets, binary_cls_format
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.models.architecture import BiEncoder
from zeroshot_classifier.models._bert_based_models import HF_MODEL_NAME, parse_args
MODEL_NAME = 'Bi-Encoder'
if __name__ == "__main__":
seed = sconfig('random-seed')
args = parse_args()
cmd = args.command
log_nm = f'{MODEL_NAME} {args.command.capitalize()}'
logger = get_logger(log_nm)
if cmd == 'train':
output_path, output_dir, sampling, mode = args.output, args.output_dir, args.sampling, args.mode
normalize_aspect = args.normalize_aspect
lr, bsz, n_ep = args.learning_rate, args.batch_size, args.epochs
init_model_name_or_path = args.init_model_name_or_path
# best_metric = 'accuracy'
best_metric = 'loss'
output_path = map_model_output_path(
model_name=MODEL_NAME.replace(' ', '-'), output_path=output_path, output_dir=output_dir,
mode=mode, sampling=sampling, normalize_aspect=normalize_aspect
)
logger_fl = get_logger(log_nm, kind='file-write', file_path=os_join(output_path, 'training.log'))
dset_args = dict(normalize_aspect=seed) if normalize_aspect else dict()
data = get_datasets(domain='in', **dset_args)
dataset_names = [dnm for dnm, d_dset in sconfig('UTCD.datasets').items() if d_dset['domain'] == 'in']
logger.info(f'Processing datasets {pl.i(dataset_names)} for training... ')
logger_fl.info(f'Processing datasets {pl.nc(dataset_names)} for training... ')
train, val, test = [], [], []
it = tqdm(dataset_names, desc=f'Formatting into Binary CLS w/ {pl.i(dict(sampling=sampling, mode=mode))}')
for dataset_name in it:
dset = data[dataset_name]
args = dict(sampling=sampling, mode=mode)
for split, ds in zip(['train', 'val', 'test'], [train, val, test]):
it.set_postfix(dnm=f'{pl.i(dataset_name)}-{pl.i(split)}')
ds.extend(binary_cls_format(dset, **args, split=split))
d_log = dict(init_model_name_or_path=init_model_name_or_path)
md_nm = init_model_name_or_path
if mode == 'explicit':
assert init_model_name_or_path != HF_MODEL_NAME # sanity check
if init_model_name_or_path != HF_MODEL_NAME:
# loading from explicit pre-training local weights,
# the classification head would be ignored for classifying 3 classes
path = os_join(get_base_path(), u.proj_dir, u.model_dir, init_model_name_or_path)
if os.path.exists(path):
md_nm = path
d_log['files'] = os.listdir(path)
logger.info(f'Loading model with {pl.i(d_log)}...')
logger_fl.info(f'Loading model with {pl.nc(d_log)}...')
word_embedding_model = models.Transformer(
init_model_name_or_path, max_seq_length=512, tokenizer_args=dict(use_fast=False)
)
add_tok_arg = utcd_util.get_add_special_tokens_args(word_embedding_model.tokenizer, train_strategy=mode)
if add_tok_arg:
logger.info(f'Adding special tokens {pl.i(add_tok_arg)} to tokenizer... ')
logger_fl.info(f'Adding special tokens {pl.nc(add_tok_arg)} to tokenizer... ')
word_embedding_model.tokenizer.add_special_tokens(special_tokens_dict=add_tok_arg)
word_embedding_model.auto_model.resize_token_embeddings(len(word_embedding_model.tokenizer))
pooling_model = models.Pooling(
word_embedding_dimension=word_embedding_model.get_word_embedding_dimension(),
pooling_mode_mean_tokens=True
)
model = BiEncoder(modules=[word_embedding_model, pooling_model])
random.seed(seed)
random.shuffle(train)
train_dataloader = DataLoader(train, shuffle=True, batch_size=bsz)
val_dataloader = DataLoader(val, shuffle=False, batch_size=bsz)
train_loss = losses.CosineSimilarityLoss(model)
warmup_steps = math.ceil(len(train_dataloader) * n_ep * 0.1) # 10% of train data for warm-up
d_log = {
'#data': len(train), 'learning_rate': lr, 'batch size': bsz, 'epochs': n_ep, 'warmup steps': warmup_steps,
'best_model_metric': best_metric, 'output path': output_path
}
logger.info(f'Training w/ {pl.i(d_log)}... ')
logger_fl.info(f'Training w/ {pl.nc(d_log)}... ')
transformers.set_seed(seed)
model.fit(
train_objectives=[(train_dataloader, train_loss)],
val_dataloader=val_dataloader,
epochs=n_ep,
optimizer_params=dict(lr=lr),
warmup_steps=warmup_steps,
output_path=output_path,
logger_fl=logger_fl,
best_model_metric=best_metric
)
elif cmd == 'test':
split = 'test'
mode, domain, model_name_or_path, bsz = args.mode, args.domain, args.model_name_or_path, args.batch_size
out_path = os_join(u.eval_path, model_name_or_path, domain2eval_dir_nm(domain))
os.makedirs(out_path, exist_ok=True)
dataset_names = utcd_util.get_dataset_names(domain)
data = get_datasets(domain=domain)
model_path = os_join(get_base_path(), u.proj_dir, u.model_dir, model_name_or_path)
if not os.path.exists(model_path):
model_path = model_name_or_path # A huggingface model
logger.info(f'Loading model from path {pl.i(model_path)}... ')
model = SentenceTransformer(model_path)
md_nm = model.__class__.__qualname__
d_log = dict(
model=md_nm, mode=mode, domain=domain, datasets=dataset_names, model_name_or_path=model_name_or_path,
batch_size=bsz
)
logger = get_logger(f'{MODEL_NAME} Eval')
logger.info(f'Evaluating {MODEL_NAME} with {pl.i(d_log)} and saving to {pl.i(out_path)}... ')
for dnm in dataset_names:
dset = data[dnm]
pairs: Dict[str, List[str]] = dset[split]
aspect = dset['aspect']
label_options = sconfig(f'UTCD.datasets.{dnm}.splits.{split}.labels')
label2id = {lbl: i for i, lbl in enumerate(label_options)}
mode2map = TrainStrategy2PairMap(train_strategy=mode)
txts = [mode2map.map_text(t, aspect=aspect) for t in pairs.keys()]
label_options = [mode2map.map_label(lb, aspect=aspect) for lb in label_options]
n_txt = sconfig(f'UTCD.datasets.{dnm}.splits.{split}.n_text')
arr_preds, arr_labels = np.empty(n_txt, dtype=int), np.empty(n_txt, dtype=int)
d_log = {'#text': n_txt, '#label': len(label_options), 'labels': label_options}
logger.info(f'Evaluating {pl.i(dnm)} with {pl.i(d_log)}...')
logger.info('Encoding texts...')
txt_embeds = model.encode(txts, batch_size=bsz, show_progress_bar=True)
logger.info('Encoding labels...')
lb_opn_embeds = model.encode(label_options, batch_size=bsz, show_progress_bar=True)
for i, (_, labels) in enumerate(tqdm(pairs.items(), desc=f'Evaluating {pl.i(dnm)}')):
scores = [sbert_util.cos_sim(txt_embeds[i], v).item() for v in lb_opn_embeds]
pred = np.argmax(scores)
label_ids = [label2id[lb] for lb in labels]
true = pred if pred in label_ids else label_ids[0]
arr_preds[i], arr_labels[i] = pred, true
args = dict(zero_division=0, target_names=label_options, output_dict=True) # disables warning
df, acc = eval_res2df(arr_labels, arr_preds, report_args=args)
logger.info(f'{pl.i(dnm)} Classification Accuracy: {pl.i(acc)}')
df.to_csv(os_join(out_path, f'{dnm}.csv')) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/bi-encoder.py | bi-encoder.py |
import math
import logging
import random
import pandas as pd
from argparse import ArgumentParser
from tqdm import tqdm
from pathlib import Path
from os.path import join
from sklearn.metrics import classification_report
from torch.utils.data import DataLoader
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.cross_encoder.evaluation import CESoftmaxAccuracyEvaluator
from zeroshot_classifier.util.load_data import (
get_datasets, binary_cls_format, nli_cls_format, get_nli_data, nli_template,
in_domain_data_path, out_of_domain_data_path
)
random.seed(42)
def parse_args():
parser = ArgumentParser()
subparser = parser.add_subparsers(dest='command')
parser_train = subparser.add_parser('train')
parser_test = subparser.add_parser('test')
parser_pretrain = subparser.add_parser('pre_train')
# set train arguments
parser_train.add_argument('--output', type=str, required=True)
parser_train.add_argument('--sampling', type=str, choices=['rand', 'vect'], required=True)
parser_train.add_argument('--mode', type=str, choices=['vanilla', 'implicit', 'explicit'], default='vanilla')
parser_train.add_argument('--base_model', type=str, required=True)
parser_train.add_argument('--batch_size', type=int, default=16)
parser_train.add_argument('--epochs', type=int, default=3)
# set test arguments
parser_test.add_argument('--model_path', type=str, required=True)
parser_test.add_argument('--domain', type=str, choices=['in', 'out'] ,required=True)
parser_test.add_argument('--mode', type=str, choices=['vanilla', 'implicit', 'explicit'], default='vanilla')
# set pre-train arguments
parser_pretrain.add_argument('--output', type=str, required=True)
parser_pretrain.add_argument('--batch_size', type=int, default=16)
parser_pretrain.add_argument('--epochs', type=int, default=3)
return parser.parse_args()
logger = logging.getLogger(__name__)
if __name__ == "__main__":
args = parse_args()
if args.command == 'pre_train':
train, dev = get_nli_data()
train_batch_size = args.batch_size
num_epochs = args.epochs
model_save_path = args.output
model = CrossEncoder('bert-base-uncased', num_labels=3)
train_dataloader = DataLoader(train, shuffle=True, batch_size=train_batch_size)
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(dev, name='AllNLI-dev')
# Train the model
model.fit(train_dataloader=train_dataloader,
epochs=num_epochs,
evaluator=evaluator,
evaluation_steps=10000,
warmup_steps=warmup_steps,
output_path=model_save_path)
if args.command == 'train':
data = get_datasets(in_domain_data_path)
# get keys from data dict
datasets = list(data.keys())
train = []
test = []
for dataset in datasets:
if args.mode == 'vanilla':
train += binary_cls_format(data[dataset], dataset_name=dataset, sampling=args.sampling, mode=args.mode)
test += binary_cls_format(data[dataset], train=False, mode=args.mode)
elif args.mode == 'implicit':
train += nli_cls_format(data[dataset], name=dataset, sampling=args.sampling)
test += nli_cls_format(data[dataset], name=dataset, train=False)
train_batch_size = args.batch_size
num_epochs = args.epochs
model_save_path = join(args.output, args.sampling)
# cos pretrained with 3 classes
model = CrossEncoder(args.base_model, num_labels=2, automodel_args=dict(ignore_mismatched_sizes=True))
# Add end of turn token for sgd
model.tokenizer.add_special_tokens({'eos_token': '[eot]'})
model.model.resize_token_embeddings(len(model.tokenizer))
random.shuffle(train)
train_dataloader = DataLoader(train, shuffle=False, batch_size=train_batch_size)
evaluator = CESoftmaxAccuracyEvaluator.from_input_examples(test, name='UTCD-test')
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1) # 10% of train data for warm-up
logger.info("Warmup-steps: {}".format(warmup_steps))
# Train the model
model.fit(train_dataloader=train_dataloader,
evaluator=evaluator,
epochs=num_epochs,
evaluation_steps=100000,
warmup_steps=warmup_steps,
output_path=model_save_path)
if args.command == 'test':
pred_path = join(args.model_path, 'preds/{}/'.format(args.domain))
result_path = join(args.model_path, 'results/{}/'.format(args.domain))
Path(pred_path).mkdir(parents=True, exist_ok=True)
Path(result_path).mkdir(parents=True, exist_ok=True)
if args.domain == 'in':
data = get_datasets(in_domain_data_path)
elif args.domain == 'out':
data = get_datasets(out_of_domain_data_path)
# get keys from data dict
datasets = list(data.keys())
# load model
model = CrossEncoder(args.model_path)
label_map = ["false", "true"]
for dataset in datasets:
examples = data[dataset]["test"]
labels = data[dataset]['labels']
preds = []
gold = []
correct = 0
# loop through each test example
print("Evaluating dataset: {}".format(dataset))
for index, (text, gold_labels) in enumerate(tqdm(examples.items())):
query = [(text, label) for label in labels] if args.mode == 'vanilla' else [(text, nli_template(label, data[dataset]['aspect'])) for label in labels]
results = model.predict(query, apply_softmax=True)
# compute which pred is higher
pred = labels[results[:,1].argmax()]
preds.append(pred)
if pred in gold_labels:
correct += 1
gold.append(pred)
else:
gold.append(gold_labels[0])
print('{} Dataset Accuracy = {}'.format(dataset, correct/len(examples)))
report = classification_report(gold, preds, output_dict=True)
df = pd.DataFrame(report).transpose()
df.to_csv('{}/{}.csv'.format(result_path, dataset)) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/bert_nli.py | bert_nli.py |
import os
import math
import itertools
from os.path import join as os_join
from typing import List, Tuple, Dict, Iterable
from collections import OrderedDict
import pandas as pd
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizer
from sentence_transformers.readers import InputExample
from sklearn.metrics import classification_report
from tqdm import tqdm
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.util.load_data import get_datasets, encoder_cls_format, in_domain_data_path, out_of_domain_data_path
import zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.bi as js_bi
# Cannot import like this cos `bi.py` already imported, could cause duplicate `config_setup` call, loading 2 models
MODEL_NAME = 'dual-bi-encoder'
MD_NM_OUT = 'Dual Bi-encoder'
def get_train_args() -> Dict:
# Keep the same as in `zeroshot_classifier.models.bi-encoder`
return dict( # To override `jskit.encoders.bi` defaults
output_dir=os_join(utcd_util.get_base_path(), PROJ_DIR, MODEL_DIR, MODEL_NAME, now(for_path=True)),
train_batch_size=16, # pe `bi-encoder.py` default
eval_batch_size=32,
learning_rate=2e-5, # not specified by `bi-encoder.py`, go with default `SentenceTransformer`
num_train_epochs=3, # per `bi-encoder.py` default
weight_decay=1e-2, # not specified by `bi-encoder.py`, go with default `SentenceTransformer`
# not specified by `bi-encoder.py`, go with default `SentenceTransformer`, which uses `transformers.AdamW`
adam_epsilon=1e-6,
warmup_ratio=1e-1, # per `bi-encoder.py`
) # Note that `jskit::train_model` internally uses a linear warmup scheduler, as in `bi-encoder.py`
def ie_dset2js_dset(dset: List[InputExample]) -> Tuple[List, List, List]:
"""
Convert the dataset format, from `sentence_transformers::InputExample` to the input format as in jskit training
:return:
"""
n_tr = len(dset)
def batched_map(edges: Tuple[int, int]) -> Tuple[List, List, List]: # see zeroshot_classifier.util.load_data`
cands_tr_, conts_tr_, lbs_tr_ = [], [], []
for i in range(*edges):
ie: InputExample = dset[i]
cands_tr_.append(ie.texts[0])
conts_tr_.append(ie.texts[1])
lbs_tr_.append(ie.label)
return cands_tr_, conts_tr_, lbs_tr_
n_cpu = os.cpu_count()
if n_cpu > 1 and n_tr > 2**12:
preprocess_batch = round(n_tr / n_cpu / 2)
strts = list(range(0, n_tr, preprocess_batch))
ends = strts[1:] + [n_tr] # inclusive begin, exclusive end
cands_tr, conts_tr, lbs_tr = [], [], []
for cd, ct, lb in conc_map(batched_map, zip(strts, ends)):
cands_tr.extend(cd), conts_tr.extend(ct), lbs_tr.extend(lb)
assert len(cands_tr) == n_tr
else:
cands_tr, conts_tr, lbs_tr = batched_map((0, n_tr))
return cands_tr, conts_tr, lbs_tr
def run_train(sampling: str = 'rand'):
logger = get_logger(f'Train {MD_NM_OUT}')
logger.info('Training launched... ')
d_dset = get_datasets(in_domain_data_path)
dnms = [dnm for dnm in d_dset.keys() if dnm != 'all']
logger.info(f'Gathering datasets: {pl.i(dnms)}... ')
dset_tr = sum(
(encoder_cls_format(
d_dset[dnm]['train'], name=dnm, sampling=sampling, neg_sample_for_multi=True, show_warnings=False
)
for dnm in dnms), start=[]
)
# dset_vl = sum(( # looks like `jskit.encoders.bi` doesn't support eval during training
# encoder_cls_format(dset["test"], name=dnm, train=False) for dnm, dset in d_dset if dnm != 'all'
# ), start=[])
n_tr = len(dset_tr)
cands_tr, conts_tr, lbs_tr = ie_dset2js_dset(dset_tr)
# n = 10
# for c, t, l in zip(cands_tr[:n], conts_tr[:n], lbs_tr[:n]): # Sanity check
# mic(c, t, l)
train_args = get_train_args()
out_dir, bsz_tr, bsz_vl, lr, n_ep, decay, eps, warmup_ratio = (train_args[k] for k in (
'output_dir', 'train_batch_size', 'eval_batch_size', 'learning_rate', 'num_train_epochs',
'weight_decay', 'adam_epsilon', 'warmup_ratio'
))
assert n_tr % 3 == 0
n_step = math.ceil(n_tr/3 / bsz_tr) * n_ep # As 3 candidates per text, but only 1 for training
train_params = dict(
train_batch_size=bsz_tr, eval_batch_size=bsz_vl, num_train_epochs=n_ep, learning_rate=lr, weight_decay=decay,
warmup_steps=round(n_step*warmup_ratio), adam_epsilon=eps
) # to `str` per `configparser` API
not_shared_str = '' # for not shared weights, see [ongoing-issue](https://github.com/Jaseci-Labs/jaseci/issues/150)
js_bi.set_config(
training_parameters={k: str(v) for k, v in train_params.items()},
model_parameters=dict(shared=not_shared_str)
)
tkzer_cnm, model_cnm = js_bi.model.__class__.__qualname__, js_bi.tokenizer.__class__.__qualname__
shared = get(js_bi.config, 'MODEL_PARAMETERS.shared')
gas, sz_cand, sz_cont = (js_bi.config['TRAIN_PARAMETERS'][k] for k in (
'gradient_accumulation_steps', 'max_candidate_length', 'max_contexts_length'
))
d_model = OrderedDict([
('model name', model_cnm), ('tokenizer name', tkzer_cnm), ('shared weights', shared != not_shared_str),
('candidate size', sz_cand), ('context size', sz_cont)
])
train_args |= dict(n_step=n_step, gradient_accumulation_steps=gas)
logger.info(f'Starting training on model {pl.i(d_model)} with training args: {pl.i(train_args)}, '
f'dataset size: {pl.i(n_tr)}... ')
model_ = zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.train.train_model(
model_train=js_bi.model,
tokenizer=js_bi.tokenizer,
contexts=conts_tr,
candidates=cands_tr,
labels=lbs_tr,
output_dir=out_dir
)
def load_model() -> Tuple[BertTokenizer, zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.models.BiEncoder]:
path = os_join(utcd_util.get_base_path(), PROJ_DIR, MODEL_DIR, MODEL_NAME, '2022-03-21_15-46-17')
js_bi.load_model(path)
return js_bi.tokenizer, js_bi.model
class MyEvalDataset(zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.tokenizer.EvalDataset):
def __init__(self, return_text=False, **kwargs):
super().__init__(**kwargs)
self.txt = kwargs['texts']
self.return_text = return_text
def __getitem__(self, index):
itm = super().__getitem__(index)
return (self.txt[index], itm) if self.return_text else itm
class EncoderWrapper:
"""
For evaluation, a wrapper around jskit::BiEncoder
reference: `js_util.evaluate.py`
"""
def __init__(self, model: zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.models.BiEncoder, tokenizer: BertTokenizer):
self.tokenizer, self.model = tokenizer, model
self.max_cont_length = zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.evaluate.max_contexts_length
self.max_cand_length = zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.evaluate.max_candidate_length
def __call__(
self, texts: List[str], embed_type: str, batch_size: int = 32, device: str = None, return_text=False
) -> Tuple[int, Iterable[torch.Tensor]]:
"""
Yields batched embeddings in the order of `txts`
"""
assert embed_type in ['context', 'candidate']
if embed_type == "context":
dset_args = dict(context_transform=zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.tokenizer.SelectionJoinTransform(
tokenizer=self.tokenizer, max_len=self.max_cont_length
))
else:
dset_args = dict(candidate_transform=zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.tokenizer.SelectionSequentialTransform(
tokenizer=self.tokenizer, max_len=self.max_cand_length
))
dset = MyEvalDataset(texts=texts, **dset_args, mode=embed_type, return_text=return_text)
def collate(samples):
if return_text:
txts, ins = zip(*samples)
return list(txts), dset.eval_str(ins)
else:
return dset.eval_str(samples)
dl = DataLoader(dset, batch_size=batch_size, collate_fn=collate, shuffle=False, pin_memory=True)
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def callback():
for inputs in dl:
txt, inputs = inputs if return_text else (None, inputs)
inputs = tuple(t.to(device) for t in inputs)
input_ids, attention_masks = inputs
inputs = dict(get_embedding=embed_type, mode='get_embed')
if embed_type == 'context':
inputs |= dict(context_input_ids=input_ids, context_input_masks=attention_masks)
else:
inputs |= dict(candidate_input_ids=input_ids, candidate_input_masks=attention_masks)
with torch.no_grad():
outputs = self.model(**inputs).squeeze(1) # TODO: cos made changes to BiEncoder
yield (txt, outputs) if return_text else outputs
return len(dl), callback()
def evaluate_trained(domain: str = 'in', candidate_batch_size: int = 256, context_batch_size: int = 32):
ca(domain=domain)
tokenizer, model = load_model()
model.eval()
ew = EncoderWrapper(model, tokenizer)
d_dset = get_datasets(in_domain_data_path if domain == 'in' else out_of_domain_data_path)
dataset_names = [dnm for dnm in d_dset.keys() if dnm != 'all']
domain_str = f'{domain} domain'
output_dir = os_join(BASE_PATH, PROJ_DIR, 'evaluations', MODEL_NAME, f'{now(for_path=True)}, {domain_str}')
model_cnm = model.__class__.__qualname__
d_model = OrderedDict([
('model name', model_cnm), ('trained #epoch', 3),
('context limit', zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.evaluate.max_contexts_length),
('candidate limit', zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.evaluate.max_candidate_length),
])
d_eval = OrderedDict([
('datasets', dataset_names),
('context max batch size', context_batch_size),
('candidate max batch size', candidate_batch_size)
])
logger_name = f'{MD_NM_OUT} Evaluation'
logger = get_logger(logger_name, typ='stdout')
logger_fl = get_logger(
f'{logger_name} file-write', typ='file-write',
file_path=os_join(output_dir, f'{logger_name}, {domain_str}.log')
)
logger.info(f'Running evaluation {pl.i(domain_str)} on model {pl.i(d_model)}, with {pl.i(d_eval)}... ')
logger_fl.info(f'Running evaluation {domain_str} on model {pl.nc(d_model)}, with {pl.nc(d_eval)}... ')
for dnm in dataset_names:
dset = d_dset[dnm]['test']
_dset = sorted(dset) # map from unique text to all possible labels; sort by text then label
txt2lbs = {k: set(lb for txt, lb in v) for k, v in itertools.groupby(_dset, key=lambda pair: pair[0])}
idx2lb = labels = sorted(set().union(*[v for v in txt2lbs.values()]))
lb2idx = {lb: i for i, lb in enumerate(labels)}
vects_lb = torch.cat([e for e in ew(labels, embed_type='candidate', batch_size=candidate_batch_size)[1]])
lst_preds, lst_labels = [], []
n, it = ew(list(txt2lbs.keys()), embed_type='candidate', return_text=True, batch_size=context_batch_size)
logger.info(f'Running evaluation on dataset {pl.i(dnm)}, with labels {pl.i(labels)}, '
f'of {pl.i(len(txt2lbs))} unique texts in {pl.i(n)} batches... ')
logger_fl.info(
f'Running evaluation on dataset {dnm}, with labels {labels}, '
f'of {len(txt2lbs)} unique texts in {n} batches... ')
for txts, vects_txt in tqdm(it, total=n):
lg.its = vects_txt @ vects_lb.t()
preds = lg.its.argmax(dim=-1)
def get_true_label(pred, txt):
pos_lb_pool = txt2lbs[txt]
if idx2lb[pred] in pos_lb_pool:
return pred
else: # Pick one true label arbitrarily if it doesn't match prediction
return next(lb2idx[lb] for lb in pos_lb_pool)
lbs = torch.tensor(
[get_true_label(p, txt) for p, txt in zip(preds.tolist(), txts)], dtype=torch.long, device=preds.device
)
lst_preds.append(preds)
lst_labels.append(lbs)
preds_all, labels_all = torch.cat(lst_preds).cpu().numpy(), torch.cat(lst_labels).cpu().numpy()
df = pd.DataFrame(
classification_report(labels_all, preds_all, target_names=labels, output_dict=True)
).transpose()
path = os_join(output_dir, f'{dnm}.csv')
df.to_csv(path)
logger.info(f'Evaluation on {pl.i(dnm)} written to CSV at {pl.i(path)}')
logger_fl.info(f'Evaluation on {dnm} written to CSV at {path}')
if __name__ == '__main__':
import transformers
from icecream import mic
seed = sconfig('random-seed')
js_bi.set_seed(seed)
transformers.set_seed(seed)
def import_check():
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.bi import (
config as bi_enc_config, set_seed,
tokenizer, model
)
mic(config_parser2dict(bi_enc_config))
mic(tokenizer, type(model))
# import_check()
# run_train()
# evaluate_trained(context_batch_size=256)
evaluate_trained(domain='out') | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/dual_bi_encoder.py | dual_bi_encoder.py |
import os
import configparser
import torch
from typing import List
from fastapi import HTTPException
from transformers import BertModel, BertConfig, BertTokenizer
# ========================== Begin of modified ==========================
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils import CONFIG_PATH
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.evaluate import get_embeddings, get_inference
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.models import BiEncoder
# ========================== End of modified ==========================
import traceback
import numpy as np
# ========================== Begin of modified ==========================
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils.train import train_model
# import jaseci.actions.remote_actions as jra
# ========================== End of modified ==========================
import random
config = configparser.ConfigParser()
model, model_name, shared, seed, tokenizer = None, None, None, None, None
save_restart = False
output_dir = "log_output"
DEFAULT_MODEL_NAME = 'pytorch_model.bin'
DEFAULT_MODEL_PATH = os.path.join(output_dir, 'pytorch_model.bin')
# device = torch.device("cpu")
# uncomment this if you wish to use GPU to train
# this is commented out because this causes issues with
# unittest on machines with GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# funtion to set seed for the module
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# function for config setup
# ========================== Begin of modified ==========================
# config.read('utils/config.cfg')
config.read(CONFIG_PATH)
# ========================== Begin of modified ==========================
def config_setup():
"""
Loading configurations from utils/config.cfg and initialize tokenizer and model
"""
global seed, model, save_restart, tokenizer, device, shared, config
model_name = config['MODEL_PARAMETERS']['MODEL_NAME']
shared = config['MODEL_PARAMETERS']['SHARED']
seed = int(config['TRAIN_PARAMETERS']['SEED'])
if model is None:
bert_config = BertConfig()
tokenizer = BertTokenizer.from_pretrained(model_name,
do_lower_case=True, clean_text=False)
if shared == "True":
cont_bert = BertModel(bert_config)
cand_bert = cont_bert
print("shared model created")
else:
cont_bert = BertModel(bert_config)
cand_bert = BertModel(bert_config)
print("non shared model created")
model = BiEncoder(config=bert_config,
cont_bert=cont_bert, cand_bert=cand_bert, shared=shared)
elif save_restart:
torch.save(model.state_dict(), DEFAULT_MODEL_PATH)
bert_config = BertConfig()
tokenizer = BertTokenizer.from_pretrained(model_name,
do_lower_case=True, clean_text=False)
cont_bert = BertModel(bert_config)
cand_bert = BertModel(bert_config)
model = BiEncoder(config=bert_config,
cont_bert=cont_bert, cand_bert=cand_bert, shared=shared)
save_restart = False
model.to(device)
set_seed(seed)
# ========================== Begin of modified ==========================
# config_setup() # Will call it myself
# ========================== End of modified ==========================
# API for getting the cosine similarity
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def cosine_sim(vec_a: list, vec_b: list, meta):
"""
Caculate the cosine similarity score of two given vectors
Param 1 - First vector
Param 2 - Second vector
Return - float between 0 and 1
"""
result = np.dot(vec_a, vec_b) / (np.linalg.norm(vec_a) *
np.linalg.norm(vec_b))
return result.astype(float)
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def infer(contexts: List, candidates: List):
"""
Take list of context, candidate and return nearest candidate to the context
"""
global model
model.eval()
predicted_candidate = get_inference(model, tokenizer,
contexts=contexts,
candidates=candidates)
return predicted_candidate
# API for training
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def train(contexts: List, candidates: List, labels: List[int]):
"""
Take list of context, candidate, labels and trains the model
"""
global model
model.train()
try:
model = train_model(
model_train=model,
tokenizer=tokenizer,
contexts=contexts,
candidates=candidates,
labels=labels,
output_dir="model_output"
)
return "Model Training is complete."
except Exception as e:
print(e)
print(traceback.print_exc())
raise HTTPException(status_code=500, detail=str(e))
# API for geting Context Embedding
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'], aliases=['encode_context'])
# ========================== End of modified ==========================
def get_context_emb(contexts: List):
"""
Take list of context and returns the embeddings
"""
global model, tokenizer
model.eval()
embedding = get_embeddings(
model=model, tokenizer=tokenizer, text_data=contexts, embed_type="context")
return embedding
# API for geting Candidates Embedding
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'], aliases=['encode_candidate'])
# ========================== End of modified ==========================
def get_candidate_emb(candidates: List):
"""
Take list of candidates and returns the embeddings
"""
global model, tokenizer
model.eval()
embedding = get_embeddings(
model, tokenizer, text_data=candidates, embed_type="candidate")
return embedding
# API for setting the training and model parameters
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def set_config(training_parameters, model_parameters):
"""
Update the configuration file with any new incoming parameters
"""
global config, save_restart
# ========================== Begin of modified ==========================
config.read(CONFIG_PATH)
# ========================== End of modified ==========================
if training_parameters:
config['TRAIN_PARAMETERS'].update(training_parameters)
if model_parameters:
config['MODEL_PARAMETERS'].update(model_parameters)
save_restart = True
with open(CONFIG_PATH, 'w') as configfile:
config.write(configfile)
config_setup()
return "Config setup is complete."
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def save_model(model_path: str):
"""
saves the model to the provided model_path
"""
global model, tokenizer, shared, config
try:
if not model_path.isalnum():
raise HTTPException(
status_code=400,
detail='Invalid model name. Only alphanumeric chars allowed.'
)
if not os.path.exists(model_path):
os.makedirs(model_path)
if shared == "True":
model.config.to_json_file(model_path + "/config.json")
tokenizer.save_vocabulary(model_path)
torch.save(model.cand_bert.state_dict(),
model_path+"/pytorch_model.bin")
with open(model_path+"/config.cfg", 'w') as fp:
config.write(fp)
else:
cand_bert_path = os.path.join(model_path)+"/cand_bert/"
cont_bert_path = os.path.join(model_path)+"/cont_bert/"
if not os.path.exists(cand_bert_path):
os.makedirs(cand_bert_path)
if not os.path.exists(cont_bert_path):
os.makedirs(cont_bert_path)
model.cand_bert.config_dict.to_json_file(cand_bert_path + "config.json")
model.cont_bert.config_dict.to_json_file(cont_bert_path + "config.json")
tokenizer.save_vocabulary(cand_bert_path)
tokenizer.save_vocabulary(cont_bert_path)
torch.save(model.cand_bert.state_dict(),
cand_bert_path+"pytorch_model.bin")
torch.save(model.cont_bert.state_dict(),
cont_bert_path+"pytorch_model.bin")
with open(model_path+"/config.cfg", 'w') as fp:
config.write(fp)
return (f'[Saved model at] : {model_path}')
except Exception as e:
print(traceback.print_exc())
raise HTTPException(status_code=500, detail=str(e))
# ========================== Begin of modified ==========================
# @jra.jaseci_action(act_group=['bi_enc'])
# ========================== End of modified ==========================
def load_model(model_path):
"""
loads the model from the provided model_path
"""
global device, model, tokenizer, shared, config
if not os.path.exists(model_path):
raise HTTPException(
status_code=404,
detail='Model path is not available'
)
try:
config.read(model_path+'/config.cfg')
shared = config['MODEL_PARAMETERS']['SHARED']
if shared == "True":
bert_config = BertConfig()
tokenizer = BertTokenizer.from_pretrained(os.path.join(
model_path, "vocab.txt"), do_lower_case=True, clean_text=False)
cont_bert_state_dict = torch.load(
model_path+"/pytorch_model.bin", map_location="cpu")
cont_bert = BertModel.from_pretrained(
model_path, state_dict=cont_bert_state_dict)
cand_bert = cont_bert
else:
cand_bert_path = os.path.join(model_path, "cand_bert")
cont_bert_path = os.path.join(model_path, "cont_bert")
# ========================== Begin of added ==========================
tokenizer_path = os.path.join(model_path, 'tokenizer')
# ========================== End of added ==========================
print('Loading parameters from', cand_bert_path)
cont_bert_state_dict = torch.load(
cont_bert_path+"/pytorch_model.bin", map_location="cpu")
cand_bert_state_dict = torch.load(
cand_bert_path+"/pytorch_model.bin", map_location="cpu")
cont_bert = BertModel.from_pretrained(
cont_bert_path, state_dict=cont_bert_state_dict)
cand_bert = BertModel.from_pretrained(
cand_bert_path, state_dict=cand_bert_state_dict)
# ========================== Begin of modified ==========================
tokenizer = BertTokenizer.from_pretrained(
tokenizer_path, do_lower_case=True, clean_text=False)
# ========================== End of modified ==========================
bert_config = BertConfig.from_json_file(
os.path.join(cand_bert_path, 'config.json'))
model = BiEncoder(config=bert_config,
cont_bert=cont_bert, cand_bert=cand_bert, shared=shared)
model.to(device)
return (f'[loaded model from] : {model_path}')
except Exception as e:
print(traceback.print_exc())
raise HTTPException(status_code=500, detail=str(e))
# ========================== Begin of modified ==========================
# if __name__ == "__main__":
# jra.launch_server(port=8000)
# ========================== End of modified ========================== | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/bi.py | bi.py |
from fastapi.responses import JSONResponse
from transformers import BertConfig, BertTokenizer
import torch.nn.functional as F
import torch
import configparser
import os
from Utilities import models, evaluate, train
import jaseci.actions.remote_actions as jra
config = configparser.ConfigParser()
model, model_name, shared, seed, tokenizer = None, None, None, None, None
save_restart = False
output_dir = "log_output"
state_save_path = os.path.join(output_dir, 'pytorch_model.bin')
def config_setup():
global model, model_name, shared, seed, save_restart, tokenizer, config
config.read('Utilities/config.cfg')
model_name = config['MODEL_PARAMETERS']['MODEL_NAME']
shared = config['MODEL_PARAMETERS']['SHARED']
seed = config['TRAIN_PARAMETERS']['SEED']
poly_m = config['MODEL_PARAMETERS']['POLY_M']
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if model is None:
bert_config = BertConfig()
tokenizer = BertTokenizer.from_pretrained(
model_name,
do_lower_case=True)
model = models.PolyEncoderModelShared(
config=bert_config,
model_name=model_name,
poly_m=poly_m,
shared=shared)
elif save_restart:
torch.save(model.state_dict(), state_save_path)
bert_config = BertConfig()
tokenizer = BertTokenizer.from_pretrained(
model_name,
do_lower_case=True)
model = models.PolyEncoderModelShared(
config=bert_config,
model_name=model_name,
poly_m=poly_m,
shared=shared)
save_restart = False
model.to(device)
config_setup()
@jra.jaseci_action(act_group=['poly_enc'], aliases=['get_poly_cos_sim'])
def cosSimilarityScore(context_embedding, candidate_embedding):
tensors = (context_embedding, candidate_embedding)
context_vecs, candidates_vec = (torch.tensor(
t, dtype=torch.float) for t in tensors)
candidates_vec = candidates_vec.view(1, 1, -1).expand(1, 1, 64)
final_context_vec = models.dot_attention(
candidates_vec,
context_vecs,
context_vecs,
None, None)
final_context_vec = F.normalize(final_context_vec, 2, -1)
dot_product = torch.sum(final_context_vec * candidates_vec, -1)
cos_similarity = (dot_product + 1) / 2
return JSONResponse(content={"cos_score": cos_similarity.item()})
@jra.jaseci_action(act_group=['poly_enc'], aliases=['inference'])
def getinference(contexts, candidates):
global model
model.eval()
predicted_label = evaluate.get_inference(
model,
tokenizer,
context=contexts,
candidate=candidates)
return JSONResponse(content={"label": predicted_label})
@jra.jaseci_action(act_group=['poly_enc'], aliases=['train'])
def trainModel(contexts, candidates):
global model
model.train()
try:
model = train.train_model(model, tokenizer, contexts, candidates)
return JSONResponse(content="Model Training is comnpleted",
status_code=200)
except Exception as e:
print(e)
return JSONResponse(content="Error Occured", status_code=500)
@jra.jaseci_action(act_group=['poly_enc'], aliases=['getcontextembedding'])
def getContextEmbedding(contexts):
global model, tokenizer
model.eval()
embedding = evaluate.get_context_embedding(
model,
tokenizer,
contexts)
return JSONResponse(content={
"context_embed": embedding.cpu().numpy().tolist()})
@jra.jaseci_action(act_group=['poly_enc'], aliases=['getcandidateembedding'])
def getCandidateEmbedding(candidates):
global model, tokenizer
model.eval()
embedding = evaluate.get_candidate_embedding(model, tokenizer, candidates)
return JSONResponse(content={
"candidate_embed": embedding.cpu().numpy().tolist()})
@jra.jaseci_action(act_group=['poly_enc'], aliases=['setconfig'])
def setConfig(training_parameters, model_parameters):
global config, save_restart
config.read('Utilities/config.cfg')
train_param = config['TRAIN_PARAMETERS']
model_param = config['MODEL_PARAMETERS']
if training_parameters:
if "MAX_CONTEXTS_LENGTH" in training_parameters:
train_param["MAX_CONTEXTS_LENGTH"] = training_parameters[
'MAX_CONTEXTS_LENGTH']
if "MAX_RESPONSE_LENGTH" in training_parameters:
train_param["MAX_RESPONSE_LENGTH"] = training_parameters[
'MAX_RESPONSE_LENGTH']
if "TRAIN_BATCH_SIZE" in training_parameters:
train_param["TRAIN_BATCH_SIZE"] = training_parameters[
'TRAIN_BATCH_SIZE']
if "EVAL_BATCH_SIZE" in training_parameters:
train_param["EVAL_BATCH_SIZE"] = training_parameters[
'EVAL_BATCH_SIZE']
if "MAX_HISTORY" in training_parameters:
train_param["MAX_HISTORY"] = training_parameters['MAX_HISTORY']
if "LEARNING_RATE" in training_parameters:
train_param["LEARNING_RATE"] = training_parameters['LEARNING_RATE']
if "WEIGHT_DECAY" in training_parameters:
train_param["WEIGHT_DECAY"] = training_parameters['WEIGHT_DECAY']
if "WARMUP_STEPS" in training_parameters:
train_param["WARMUP_STEPS"] = training_parameters['WARMUP_STEPS']
if "ADAM_EPSILON" in training_parameters:
train_param["ADAM_EPSILON"] = training_parameters['ADAM_EPSILON']
if "MAX_GRAD_NORM" in training_parameters:
train_param["MAX_GRAD_NORM"] = training_parameters['MAX_GRAD_NORM']
if "NUM_TRAIN_EPOCHS" in training_parameters:
train_param["NUM_TRAIN_EPOCHS"] = training_parameters[
'NUM_TRAIN_EPOCHS']
if "SEED" in training_parameters:
train_param["SEED"] = training_parameters['SEED']
if "GRADIENT_ACCUMULATION_STEPS" in training_parameters:
train_param["GRADIENT_ACCUMULATION_STEPS"] = training_parameters[
'GRADIENT_ACCUMULATION_STEPS']
if "FP16" in training_parameters:
train_param["FP16"] = training_parameters['FP16']
if "FP16_OPT_LEVEL" in training_parameters:
train_param["FP16_OPT_LEVEL"] = training_parameters[
'FP16_OPT_LEVEL']
if "GPU" in training_parameters:
train_param["GPU"] = training_parameters['GPU']
if model_parameters:
if "SHARED" in model_parameters:
model_param["SHARED"] = model_parameters["SHARED"]
if "MODEL_NAME" in model_parameters:
model_param["MODEL_NAME"] = model_parameters["MODEL_NAME"]
if "POLY_M" in model_parameters:
model_param["POLY_M"] = model_parameters["POLY_M"]
save_restart = True
with open("Utilities/config.cfg", 'w') as configfile:
config.write(configfile)
config_setup()
return JSONResponse(content="config setup completed") | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/poly.py | poly.py |
from typing import List
from sentence_transformers import InputExample, losses
from sentence_transformers import SentenceTransformer, models
from sentence_transformers.util import cos_sim
from torch import nn
from torch.utils.data import DataLoader
import nlpaug.augmenter.word as naw
import torch
import math
from datetime import datetime
import numpy as np
from fastapi.responses import JSONResponse
import jaseci.actions.remote_actions as jra
model_name = "bert-base-uncased"
device = "cuda" if torch.cuda.is_available() else "cpu"
num_epochs = 2
model_save_path = 'output/sent_' + \
model_name.replace("/", "-")+'-' + \
datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
def create_model(model_name="bert-base-uncased", max_seq_length=256):
word_embedding_model = models.Transformer(
model_name, max_seq_length=max_seq_length)
pooling_model = models.Pooling(
word_embedding_model.get_word_embedding_dimension())
dense_model = models.Dense(
in_features=pooling_model.get_sentence_embedding_dimension(),
out_features=256, activation_function=nn.Tanh())
model = SentenceTransformer(
modules=[word_embedding_model, pooling_model, dense_model])
return model
model = create_model(model_name=model_name, max_seq_length=256)
def get_aug_sample(text1, text2):
# Synonym replacement using BERT ####
aug = naw.ContextualWordEmbsAug(
model_path=model_name, action="insert", device=device)
aug_samples = []
for sample1, sample2 in zip(text1, text2):
augmented_texts = aug.augment([sample1, sample2])
print(augmented_texts)
inp_example = InputExample(texts=augmented_texts, label=0)
aug_samples.append(inp_example)
print("Textual augmentation completed....")
print("Number of silver pairs generated: {}".format(len(aug_samples)))
return aug_samples
@jra.jaseci_action(act_group=['sent_enc'], aliases=['train_model'])
def train(text1: List[str], text2: List[str]):
global model, model_name, device
try:
gold_samples = []
for sample1, sample2 in zip(text1, text2):
inp_example = InputExample(texts=[sample1, sample2], label=1)
gold_samples.append(inp_example)
# generate Samples for 0 labels
aug_samples = get_aug_sample(text1, text2)
# Define your train dataset, the dataloader and the train loss
train_dataloader = DataLoader(
gold_samples + aug_samples, shuffle=True, batch_size=16)
train_loss = losses.ContrastiveLoss(model)
# Configure the training.
# 10% of train data for warm-up
warmup_steps = math.ceil(len(train_dataloader) * num_epochs * 0.1)
print("Warmup-steps: {}".format(warmup_steps))
# Tune the model
model.fit(train_objectives=[
(train_dataloader, train_loss)], epochs=num_epochs,
warmup_steps=warmup_steps,
output_path=model_save_path
)
return JSONResponse(content="Model Training is comnpleted",
status_code=200)
except Exception as e:
print(e)
return JSONResponse(content=f"Error Occured {str(e)}",
status_code=500)
@ jra.jaseci_action(act_group=['sent_enc'], aliases=['inference'])
def predict(text1: str, text2: List[str]):
try:
sim = np.zeros(len(text2))
text_embeddings = model.encode(text1)
label_embeddings = model.encode(text2)
for i in range(len(text2)):
sim[i] = cos_sim(text_embeddings, label_embeddings[i])
print(sim)
return JSONResponse(
{
"label": text2[np.argmax(sim)],
"conf_score": sim[np.argmax(sim)]
}
)
except Exception as e:
print(e)
return JSONResponse(content=f"Error Occured : {str(e)}",
status_code=500)
@jra.jaseci_action(act_group=['sent_enc'], aliases=['getembeddings'])
def getEmbedding(text):
global model
model.eval()
try:
embeddings = model.encode(text)
return JSONResponse(content={"embed":
np.squeeze(
np.asarray(embeddings)).tolist()},
status_code=200)
except Exception as e:
print(e)
return JSONResponse(content=f"Error Occured : {str(e)}",
status_code=500)
@jra.jaseci_action(act_group=['sent_enc'], aliases=['get_cos_sim'])
def cosine_sim(vec_a: list, vec_b: list, meta):
"""
Caculate the cosine similarity score of two given vectors
Param 1 - First vector
Param 2 - Second vector
Return - float between 0 and 1
"""
result = np.dot(vec_a, vec_b) / (np.linalg.norm(vec_a) *
np.linalg.norm(vec_b))
return result.astype(float) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/sent_enc.py | sent_enc.py |
import torch
from torch.utils.data import DataLoader
import os
import time
from tqdm import tqdm
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
# ========================== Begin of modified ==========================
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils import CONFIG_PATH
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils import tokenizer as token_util
# ========================== End of modified ==========================
import configparser
config = configparser.ConfigParser()
device, max_contexts_length, max_candidate_length, train_batch_size, \
eval_batch_size, max_history, learning_rate, weight_decay, warmup_steps, \
adam_epsilon, max_grad_norm, fp16, fp16_opt_level, gpu, \
gradient_accumulation_steps, num_train_epochs = None, None, None, None, \
None, None, None, None, None, None, None, None, None, None, None, None
# training setup
def config_setup():
global device, basepath, max_contexts_length, max_candidate_length, \
train_batch_size, eval_batch_size, max_history, learning_rate, \
weight_decay, warmup_steps, adam_epsilon, max_grad_norm, fp16, \
fp16_opt_level, gpu, gradient_accumulation_steps, num_train_epochs, shared
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
# ========================== Begin of modified ==========================
config.read(CONFIG_PATH)
# ========================== End of modified ==========================
max_contexts_length = int(
config['TRAIN_PARAMETERS']['MAX_CONTEXTS_LENGTH'])
max_candidate_length = int(
config['TRAIN_PARAMETERS']['MAX_CANDIDATE_LENGTH'])
train_batch_size = int(config['TRAIN_PARAMETERS']['TRAIN_BATCH_SIZE'])
eval_batch_size = int(config['TRAIN_PARAMETERS']['EVAL_BATCH_SIZE'])
max_history = int(config['TRAIN_PARAMETERS']['MAX_HISTORY'])
learning_rate = float(config['TRAIN_PARAMETERS']['LEARNING_RATE'])
weight_decay = float(config['TRAIN_PARAMETERS']['WEIGHT_DECAY'])
warmup_steps = int(config['TRAIN_PARAMETERS']['WARMUP_STEPS'])
adam_epsilon = float(config['TRAIN_PARAMETERS']['ADAM_EPSILON'])
max_grad_norm = float(config['TRAIN_PARAMETERS']['MAX_GRAD_NORM'])
gradient_accumulation_steps = int(
config['TRAIN_PARAMETERS']['GRADIENT_ACCUMULATION_STEPS'])
num_train_epochs = int(config['TRAIN_PARAMETERS']['NUM_TRAIN_EPOCHS'])
fp16 = bool(config['TRAIN_PARAMETERS']['FP16'])
fp16_opt_level = str(config['TRAIN_PARAMETERS']['FP16_OPT_LEVEL'])
gpu = int(config['TRAIN_PARAMETERS']['GPU'])
shared = bool(config['MODEL_PARAMETERS']['SHARED'])
output_dir = "log_output"
train_dir = ""
model = None
global_step, tr_loss, nb_tr_steps, epoch, device, basepath, shared = None, None, \
None, None, None, None, None
# training function
def train_model(model_train, tokenizer, contexts, candidates, labels, output_dir, val=False):
config_setup()
global model, global_step, tr_loss, nb_tr_steps, epoch, device, basepath, shared
model = model_train
context_transform = token_util.SelectionJoinTransform(
tokenizer=tokenizer,
max_len=int(max_contexts_length)
)
# ========================== Begin of added ==========================
# `SelectionJoinTransform` modifies the tokenizer by adding a special token
# => the context model embedding size also needs to increase
# this modified `tokenizer` will tokenize both context and candidate,
# for now, keep context model embedding unchanged
# => user responsible that the special token added does not appear in
model.cont_bert.resize_token_embeddings(len(tokenizer))
model.cand_bert.resize_token_embeddings(len(tokenizer))
# ========================== End of added ==========================
candidate_transform = token_util.SelectionSequentialTransform(
tokenizer=tokenizer,
max_len=int(max_candidate_length)
)
train_dataset = token_util.SelectionDataset(
contexts=contexts,
candidates=candidates,
labels=labels,
context_transform=context_transform,
candidate_transform=candidate_transform
)
train_dataloader = DataLoader(
train_dataset,
batch_size=train_batch_size,
collate_fn=train_dataset.batchify_join_str,
shuffle=True
)
t_total = len(train_dataloader) // train_batch_size * \
(max(5, num_train_epochs))
epoch_start = 1
global_step = 0
bert_dir = output_dir+"/bert"
resp_bert_dir = output_dir+"/resp_bert"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(bert_dir):
os.makedirs(bert_dir)
if not os.path.exists(resp_bert_dir):
os.makedirs(resp_bert_dir)
log_wf = open(os.path.join(output_dir, 'log.txt'),
'a', encoding='utf-8')
if shared:
state_save_path = os.path.join(output_dir, 'pytorch_model.bin')
else:
# ========================== Begin of modified ==========================
# outdated already
# state_save_path = os.path.join(bert_dir, 'pytorch_model.bin')
# state_save_path_1 = os.path.join(
# resp_bert_dir, 'pytorch_model.bin')
cand_bert_path = os.path.join(output_dir, 'cand_bert')
cont_bert_path = os.path.join(output_dir, 'cont_bert')
tokenizer_path = os.path.join(output_dir, 'tokenizer')
os.makedirs(cand_bert_path, exist_ok=True)
os.makedirs(cont_bert_path, exist_ok=True)
# ========================== End of modified ==========================
state_save_path = os.path.join(output_dir, 'pytorch_model.bin')
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in model.named_parameters() if not any(
nd in n for nd in no_decay)],
"weight_decay": weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(
nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=learning_rate, eps=adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total
)
fp16 = False
if fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
'''Please install apex from https://www.github.com/nvidia/apex
to use fp16 training''')
model, optimizer = amp.initialize(
model, optimizer, opt_level=fp16_opt_level)
print_freq = 1
# ========================== Begin of modified ==========================
loss_print_freq = int(1e4)
# ========================== End of modified ==========================
eval_freq = min(len(train_dataloader), 1000)
print('Print freq:', print_freq, "Eval freq:", eval_freq)
train_start_time = time.time()
print(f"train_start_time : {train_start_time}")
for epoch in range(epoch_start, int(num_train_epochs) + 1):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
with tqdm(total=len(train_dataloader)) as bar:
for step, batch in enumerate(train_dataloader, start=1):
model.train()
optimizer.zero_grad()
batch = tuple(t.to(device) for t in batch)
context_token_ids_list_batch, context_input_masks_list_batch, \
candidate_token_ids_list_batch, candidate_input_masks_list_batch, labels_batch = batch
loss = model(context_token_ids_list_batch, context_input_masks_list_batch,
candidate_token_ids_list_batch, candidate_input_masks_list_batch,
labels_batch)
# ========================== Begin of modified ==========================
# print(f"loss is : {loss}")
if step % loss_print_freq == 0:
print(f'epoch {epoch}, step {step}, training loss {loss}')
# ========================== End of modified ==========================
tr_loss += loss.item()
nb_tr_examples += context_token_ids_list_batch.size(0)
nb_tr_steps += 1
if fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), max_grad_norm)
optimizer.step()
if global_step < warmup_steps:
scheduler.step()
model.zero_grad()
global_step += 1
if step % print_freq == 0:
bar.update(min(print_freq, step))
time.sleep(0.02)
# ========================== Begin of modified ==========================
# print(global_step, tr_loss / nb_tr_steps)
# ========================== End of modified ==========================
log_wf.write('%d\t%f\n' %
(global_step, tr_loss / nb_tr_steps))
log_wf.flush()
pass
if shared is True:
torch.save(model.state_dict(), state_save_path)
else:
# ========================== Begin of modified ==========================
# print('[Saving at]', state_save_path)
# log_wf.write('[Saving at] %s\n' % state_save_path)
# torch.save(model.resp_bert.state_dict(), state_save_path_1)
# torch.save(model.bert.state_dict(), state_save_path)
# See https://github.com/Jaseci-Labs/jaseci/issues/152
print(f'[Saving at] {tokenizer_path}, {cand_bert_path} and {cont_bert_path}')
log_wf.write(f'[Saving at] {tokenizer_path}, {cand_bert_path} and {cont_bert_path}\n')
# md_fnm = 'pytorch_model.bin'
# torch.save(model.cont_bert.state_dict(), os.path.join(cont_bert_path, md_fnm))
# torch.save(model.cand_bert.state_dict(), os.path.join(cand_bert_path, md_fnm))
tokenizer.save_pretrained(tokenizer_path)
model.cont_bert.save_pretrained(cont_bert_path)
model.cand_bert.save_pretrained(cand_bert_path)
# ========================== End of modified ==========================
return model | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/utils/train.py | train.py |
import torch
from torch.utils.data import DataLoader
# ========================== Begin of modified ==========================
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils import CONFIG_PATH
from zeroshot_classifier.models.dual_bi_encoder.jskit.encoders.utils import tokenizer as token_util
# ========================== End of modified ==========================
import configparser
config = configparser.ConfigParser()
max_history, max_contexts_length, max_candidate_length, device = None, \
None, None, None
# inference parameters setup
def config_setup():
global max_history, max_contexts_length, max_candidate_length, device
# ========================== Begin of modified ==========================
config.read(CONFIG_PATH)
# ========================== End of modified ==========================
max_history = int(config['TRAIN_PARAMETERS']['MAX_HISTORY'])
max_contexts_length = int(
config['TRAIN_PARAMETERS']['MAX_CONTEXTS_LENGTH'])
max_candidate_length = int(
config['TRAIN_PARAMETERS']['MAX_CANDIDATE_LENGTH'])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device('cpu')
config_setup()
def get_inference(model, tokenizer, contexts, candidates):
global max_history, max_contexts_length, max_candidate_length, device
context_transform = token_util.SelectionJoinTransform(
tokenizer=tokenizer,
max_len=max_contexts_length
)
candidate_transform = token_util.SelectionSequentialTransform(
tokenizer=tokenizer,
max_len=max_candidate_length
)
labels = [0] * len(candidates)
test_data = token_util.SelectionDataset(
contexts=contexts,
candidates=candidates,
context_transform=context_transform,
candidate_transform=candidate_transform, mode="eval",
labels=labels
)
val_dataloader = DataLoader(
test_data,
batch_size=1,
collate_fn=test_data.batchify_join_str,
shuffle=False
)
for step, batch in enumerate(val_dataloader, start=1):
batch = tuple(t.to(device) for t in batch)
context_token_ids_list_batch, context_input_masks_list_batch,\
candidate_token_ids_list_batch, candidate_input_masks_list_batch,\
labels_batch = batch
with torch.no_grad():
logits = model(context_token_ids_list_batch, context_input_masks_list_batch,
candidate_token_ids_list_batch, candidate_input_masks_list_batch, mode="eval")
_, prediction = torch.max(logits, dim=-1)
return candidates[prediction]
# function provides embedding for context and candidate
def get_embeddings(model, tokenizer, text_data, embed_type="context"):
global max_history, max_contexts_length, max_candidate_length, device
if embed_type == "context":
context_transform = token_util.SelectionJoinTransform(
tokenizer=tokenizer,
max_len=max_contexts_length)
context_data = token_util.EvalDataset(
text_data, context_transform=context_transform, candidate_transform=None, mode=embed_type)
else:
candidate_transform = token_util.SelectionSequentialTransform(
tokenizer=tokenizer,
max_len=max_candidate_length)
context_data = token_util.EvalDataset(
text_data, context_transform=None, candidate_transform=candidate_transform, mode=embed_type)
dataloader = DataLoader(context_data, batch_size=1,
collate_fn=context_data.eval_str, shuffle=False, num_workers=0)
for step, batch in enumerate(dataloader, start=1):
batch = tuple(t.to(device) for t in batch)
token_ids_list_batch, input_masks_list_batch = batch
with torch.no_grad():
if embed_type == "context":
embeddings = model(context_input_ids=token_ids_list_batch,
context_input_masks=input_masks_list_batch, get_embedding=embed_type, mode="get_embed")
else:
embeddings = model(candidate_input_ids=token_ids_list_batch,
candidate_input_masks=input_masks_list_batch, get_embedding=embed_type, mode="get_embed")
embeddings = embeddings.squeeze(0)
return embeddings.squeeze(0).detach().tolist() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/utils/evaluate.py | evaluate.py |
import torch
from torch.utils.data import Dataset
# this class transforms data in required training format and inference for text and tokens
class SelectionDataset(Dataset):
def __init__(self, contexts, candidates, context_transform, candidate_transform, labels=None, mode='train'):
self.context_transform = context_transform
self.candidate_transform = candidate_transform
self.data_source = []
self.mode = mode
group = {
'context': None,
'candidates': [],
'labels': []
}
if mode == "eval":
for text in contexts:
for cand, lbl in zip(candidates, labels):
group['candidates'].append(cand)
group['labels'].append(lbl)
group['context'] = [text]
self.data_source.append(group)
else:
for text, cand, lbl in zip(contexts, candidates, labels):
if lbl == 1 and len(group['candidates']) > 0:
# ========================== Begin of added ==========================
# Sanity check for 2 negative samples, ensures collator `batchify_join_str` works
assert len(group['candidates']) == 3
# ========================== End of added ==========================
self.data_source.append(group)
group = {
'context': None,
'candidates': [],
'labels': []
}
group['candidates'].append(cand)
group['labels'].append(lbl)
group['context'] = [text]
if len(group['candidates']) > 0:
self.data_source.append(group)
# print(self.data_source)
group = {
'context': None,
'candidates': [],
'labels': []
}
if len(self.data_source) < 2 and mode != "eval":
group['context'] = ["This is sample text"]
group['candidates'].append("sampletext")
group['labels'].append(1)
if len(candidates) > 1:
group['context'] = ["This is a sample text"]
group['candidates'].append("notsampletext")
group['labels'].append(0)
self.data_source.append(group)
def __len__(self):
return len(self.data_source)
def __getitem__(self, index):
group = self.data_source[index]
context, candidates, labels = group['context'], group['candidates'], group['labels']
transformed_context = self.context_transform(
context) # [token_ids],[seg_ids],[masks]
transformed_candidates = self.candidate_transform(
candidates) # [token_ids],[seg_ids],[masks]
ret = transformed_context, transformed_candidates, labels
return ret
def batchify_join_str(self, batch):
contexts_token_ids_list_batch, contexts_input_masks_list_batch, \
candidates_token_ids_list_batch, candidates_input_masks_list_batch = [], [], [], []
labels_batch = []
for sample in batch:
(contexts_token_ids_list, contexts_input_masks_list), (
candidates_token_ids_list, candidates_input_masks_list) = sample[:2]
contexts_token_ids_list_batch.append(contexts_token_ids_list)
contexts_input_masks_list_batch.append(
contexts_input_masks_list)
candidates_token_ids_list_batch.append(candidates_token_ids_list)
candidates_input_masks_list_batch.append(
candidates_input_masks_list)
labels_batch.append(sample[-1])
long_tensors = [contexts_token_ids_list_batch, contexts_input_masks_list_batch,
candidates_token_ids_list_batch, candidates_input_masks_list_batch]
contexts_token_ids_list_batch, contexts_input_masks_list_batch, \
candidates_token_ids_list_batch, candidates_input_masks_list_batch = (
torch.tensor(t, dtype=torch.long) for t in long_tensors)
labels_batch = torch.tensor(labels_batch, dtype=torch.long)
return contexts_token_ids_list_batch, contexts_input_masks_list_batch, \
candidates_token_ids_list_batch, candidates_input_masks_list_batch, labels_batch
# this class transforms data to generate embeddings
class EvalDataset(Dataset):
def __init__(self, texts, context_transform=None, candidate_transform=None, mode="context"):
self.context_transform = context_transform
self.candidate_transform = candidate_transform
self.data_source = []
self.mode = mode
# ========================== Begin of modified ==========================
# group = {
# "text": []
# }
# for text in texts:
# group['text'].append(text)
# self.data_source.append(group)
self.data_source = list(texts)
# ========================== End of modified ==========================
def __len__(self):
return len(self.data_source)
def __getitem__(self, index):
# ========================== Begin of modified ==========================
# group = self.data_source[index]
# text = group["text"]
text = [self.data_source[index]] # single text, to fix into the transform code
# ========================== End of modified ==========================
if self.mode == "context":
transformed_text = self.context_transform(
text) # [token_ids],[masks]
else:
transformed_text = self.candidate_transform(
text) # [token_ids],[masks]
return transformed_text
def eval_str(self, batch):
token_ids_list_batch, input_masks_list_batch = [], []
for sample in batch:
token_ids_list, input_masks_list = sample
token_ids_list_batch.append(token_ids_list)
input_masks_list_batch.append(
input_masks_list)
long_tensors = [token_ids_list_batch,
input_masks_list_batch]
token_ids_list_batch, input_masks_list_batch = (
torch.tensor(t, dtype=torch.long) for t in long_tensors)
return token_ids_list_batch, input_masks_list_batch
# this class is for creating token data for candidate
class SelectionSequentialTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
def __call__(self, texts):
input_ids_list, segment_ids_list, input_masks_list, contexts_masks_list = [], [], [], []
for text in texts:
tokenized_dict = self.tokenizer.encode_plus(
text, max_length=self.max_len, pad_to_max_length=True)
input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask']
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
input_ids_list.append(input_ids)
input_masks_list.append(input_masks)
return input_ids_list, input_masks_list
# this class is for creating token data for context
class SelectionJoinTransform(object):
def __init__(self, tokenizer, max_len):
self.tokenizer = tokenizer
self.max_len = max_len
self.cls_id = self.tokenizer.convert_tokens_to_ids('[CLS]')
self.sep_id = self.tokenizer.convert_tokens_to_ids('[SEP]')
self.tokenizer.add_special_tokens(['\n'], special_tokens=True)
self.pad_id = 0
def __call__(self, texts):
# another option is to use [SEP], but here we follow the discussion at:
# https://github.com/facebookresearch/ParlAI/issues/2306#issuecomment-599180186
context = '\n'.join(texts)
tokenized_dict = self.tokenizer.encode_plus(context)
input_ids, input_masks = tokenized_dict['input_ids'], tokenized_dict['attention_mask']
input_ids = input_ids[-self.max_len:]
input_ids[0] = self.cls_id
input_masks = input_masks[-self.max_len:]
input_ids += [self.pad_id] * (self.max_len - len(input_ids))
input_masks += [0] * (self.max_len - len(input_masks))
assert len(input_ids) == self.max_len
assert len(input_masks) == self.max_len
return input_ids, input_masks | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/utils/tokenizer.py | tokenizer.py |
import torch
import torch.nn as nn
from transformers import BertModel, BertPreTrainedModel
import torch.nn.functional as F
def dot_attention(q, k, v, v_mask=None, dropout=None):
attention_weights = torch.matmul(q, k.transpose(-1, -2))
if v_mask is not None:
extended_v_mask = (1.0 - v_mask.unsqueeze(1)) * -100000.0
attention_weights += extended_v_mask
attention_weights = F.softmax(attention_weights, -1)
if dropout is not None:
attention_weights = dropout(attention_weights)
output = torch.matmul(attention_weights, v)
return output
class PolyEncoderModelShared(nn.Module):
def __init__(self, config, model_name, shared: bool, *inputs, **kwargs):
super(PolyEncoderModelShared, self).__init__()
if shared is True:
self.cont_model = BertModel.from_pretrained(model_name)
self.cand_model = self.cont_model
else:
self.cont_model = BertModel.from_pretrained(model_name)
self.cand_model = BertModel.from_pretrained(model_name)
self.vec_dim = 64
self.poly_m = int(kwargs['poly_m'])
self.poly_code_embeddings = nn.Embedding(
self.poly_m + 1, config.hidden_size)
try:
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.context_fc = nn.Linear(config.hidden_size, self.vec_dim)
self.candidate_fc = nn.Linear(config.hidden_size, self.vec_dim)
except Exception:
self.dropout = nn.Dropout(config.dropout)
self.context_fc = nn.Linear(config.dim, self.vec_dim)
self.candidate_fc = nn.Linear(config.dim, self.vec_dim)
def forward(self, context_data=None, candidate_data=None, labels=None,
eval=False, get_embedding=False):
if get_embedding and context_data is not None:
context_input_ids, context_segment_ids, \
context_input_masks = context_data[
"context_input_ids"], \
context_data['context_segment_ids'], \
context_data["context_input_masks"]
res_cnt, seq_length = context_input_ids.unsqueeze(0).shape
state_vecs = self.cont_model(context_input_ids.unsqueeze(0),
context_input_masks.unsqueeze(0),
context_segment_ids.unsqueeze(0))[0]
poly_code_ids = torch.arange(
self.poly_m, dtype=torch.long, device=context_input_ids.device)
poly_code_ids += 1
poly_code_ids = poly_code_ids.unsqueeze(0).expand(1, self.poly_m)
poly_codes = self.poly_code_embeddings(poly_code_ids)
context_vec = dot_attention(
poly_codes, state_vecs, state_vecs,
context_input_masks.unsqueeze(0), self.dropout)
context_vec = self.context_fc(self.dropout(context_vec))
context_vec = F.normalize(context_vec, 2, -1)
return context_vec
elif get_embedding and candidate_data is not None:
candidates_input_ids, candidates_segment_ids, \
candidates_input_masks = candidate_data[
"candidate_input_ids"], \
candidate_data['candidates_segment_ids'], \
candidate_data["candidate_input_masks"]
res_cnt, seq_length = candidates_input_ids.shape
batch_size = 1
candidates_input_ids = candidates_input_ids.view(-1, seq_length)
candidates_input_masks = candidates_input_masks.view(
-1, seq_length)
candidates_segment_ids = candidates_segment_ids.view(
-1, seq_length)
state_vecs = self.cand_model(
candidates_input_ids, candidates_input_masks,
candidates_segment_ids)[0]
poly_code_ids = torch.zeros(
batch_size * res_cnt, 1, dtype=torch.long,
device=candidates_input_ids.device)
poly_codes = self.poly_code_embeddings(poly_code_ids)
candidates_vec = dot_attention(
poly_codes, state_vecs, state_vecs, candidates_input_ids,
self.dropout)
candidates_vec = candidates_vec.view(batch_size, res_cnt, -1)
candidates_vec = self.context_fc(self.dropout(candidates_vec))
candidates_vec = F.normalize(candidates_vec, 2, -1)
return candidates_vec
context_input_ids, context_segment_ids, \
context_input_masks = context_data["context_input_ids"], \
context_data['context_segment_ids'], \
context_data["context_input_masks"]
candidates_input_ids, candidates_segment_ids, \
candidates_input_masks = candidate_data["candidate_input_ids"], \
candidate_data['candidates_segment_ids'], \
candidate_data["candidate_input_masks"]
# only select the first candidate (whose lbl==1)
if labels is not None:
candidates_input_ids = candidates_input_ids[:, 0, :].unsqueeze(1)
candidates_segment_ids = candidates_segment_ids[:, 0, :].unsqueeze(
1)
candidates_input_masks = candidates_input_masks[:, 0, :].unsqueeze(
1)
state_vecs = self.cont_model(
context_input_ids, context_input_masks, context_segment_ids)[0]
batch_size, res_cnt, seq_length = candidates_input_ids.shape
poly_code_ids = torch.arange(
self.poly_m, dtype=torch.long, device=context_input_ids.device)
poly_code_ids += 1
poly_code_ids = poly_code_ids.unsqueeze(
0).expand(batch_size, self.poly_m)
poly_codes = self.poly_code_embeddings(poly_code_ids)
context_vecs = dot_attention(
poly_codes, state_vecs, state_vecs,
context_input_masks, self.dropout)
# candidate encoder
candidates_input_ids = candidates_input_ids.view(-1, seq_length)
candidates_input_masks = candidates_input_masks.view(-1, seq_length)
candidates_segment_ids = candidates_segment_ids.view(-1, seq_length)
state_vecs = self.cand_model(
candidates_input_ids, candidates_input_masks,
candidates_segment_ids)[0] # [bs, length, dim]
poly_code_ids = torch.zeros(
batch_size * res_cnt, 1, dtype=torch.long,
device=context_input_ids.device)
poly_codes = self.poly_code_embeddings(poly_code_ids)
candidates_vec = dot_attention(
poly_codes, state_vecs, state_vecs,
candidates_input_masks, self.dropout)
candidates_vec = candidates_vec.view(batch_size, res_cnt, -1)
# Norm here first, which is equivalent to getting context_vec
# and candidate_vec in some way
context_vecs = self.context_fc(self.dropout(context_vecs))
context_vecs = F.normalize(context_vecs, 2, -1) # [bs, m, dim]
candidates_vec = self.candidate_fc(self.dropout(candidates_vec))
candidates_vec = F.normalize(candidates_vec, 2, -1)
# poly final context vector aggregation
if labels is not None:
candidates_vec = candidates_vec.view(
1, batch_size, -1).expand(batch_size, batch_size, self.vec_dim)
final_context_vec = dot_attention(
candidates_vec, context_vecs, context_vecs, None, self.dropout)
# [bs, res_cnt, dim], res_cnt==bs when training
final_context_vec = F.normalize(final_context_vec, 2, -1)
# [bs, res_cnt], res_cnt==bs when training
dot_product = torch.sum(final_context_vec * candidates_vec, -1)
if labels is not None:
mask = torch.eye(context_input_ids.size(0)).to(
context_input_ids.device)
loss = F.log_softmax(dot_product * 5, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
else:
cos_similarity = (dot_product + 1) / 2
return cos_similarity
class BiEncoder(BertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
# if shared is true it creates only one model (Siamese type)
if kwargs['shared'] is True:
self.cont_bert = kwargs['cont_bert']
self.cand_bert = self.cont_bert
else:
self.cont_bert = kwargs['cont_bert']
self.cand_bert = kwargs['cand_bert']
def forward(self, context_input_ids=None, context_input_masks=None,
candidate_input_ids=None, candidate_input_masks=None,
labels=None, get_embedding=None, mode="train", pooling="mean"):
# only select the first candidate (whose lbl==1)
if labels is not None:
candidate_input_ids = candidate_input_ids[:, 0, :].unsqueeze(1)
candidate_input_masks = candidate_input_masks[:, 0, :].unsqueeze(1)
# gets the context embedding
if get_embedding == "context" or mode == "train" or mode == "eval":
context_vec = self.cont_bert(context_input_ids, context_input_masks)[
0] # [bs,dim]
if pooling == "mean":
# Mean pooling
output_vectors = []
input_mask_expanded = context_input_masks.unsqueeze(
-1).expand(context_vec.size()).float()
sum_embeddings = torch.sum(
context_vec * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
output_vectors.append(sum_embeddings / sum_mask)
context_vec = torch.cat(output_vectors, 1)
if get_embedding == "context":
return context_vec
# gets the candidate embedding
if get_embedding == "candidate" or mode == "train" or mode == "eval":
batch_size, res_cnt, seq_length = candidate_input_ids.shape
candidate_input_ids = candidate_input_ids.view(-1, seq_length)
candidate_input_masks = candidate_input_masks.view(-1, seq_length)
candidate_vec = self.cand_bert(candidate_input_ids, candidate_input_masks)[
0] # [bs,dim]
if pooling == "mean":
# Mean pooling
output_vectors = []
input_mask_expanded = candidate_input_masks.unsqueeze(
-1).expand(candidate_vec.size()).float()
sum_embeddings = torch.sum(
candidate_vec * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
output_vectors.append(sum_embeddings / sum_mask)
candidate_vec = torch.cat(output_vectors, 1)
candidate_vec = candidate_vec.view(batch_size, res_cnt, -1)
if get_embedding == "candidate":
return candidate_vec
if labels is not None and mode == "train":
candidate_vec = candidate_vec.squeeze(1)
dot_product = torch.matmul(
context_vec, candidate_vec.t()) # [bs, bs]
mask = torch.eye(context_input_ids.size(0)).to(
context_input_ids.device)
loss = F.log_softmax(dot_product, dim=-1) * mask
loss = (-loss.sum(dim=1)).mean()
return loss
else:
context_vec = context_vec.unsqueeze(1)
dot_product = torch.matmul(
context_vec, candidate_vec.permute(0, 2, 1)).squeeze()
return dot_product | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/dual_bi_encoder/jskit/encoders/utils/models.py | models.py |
from os.path import join as os_join
from argparse import ArgumentParser
import torch
import transformers
from transformers import GPT2TokenizerFast, GPT2ForSequenceClassification
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.preprocess import get_explicit_dataset
from zeroshot_classifier.models.gpt2 import MODEL_NAME as GPT2_MODEL_NAME, HF_MODEL_NAME, ZsGPT2Tokenizer
from zeroshot_classifier.models.explicit.explicit_v2 import *
MODEL_NAME = EXPLICIT_GPT2_MODEL_NAME
TRAIN_STRATEGY = 'explicit'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--normalize_aspect', type=bool, default=True)
parser.add_argument('--learning_rate', type=float, default=4e-5)
parser.add_argument('--gradient_accumulation_steps', type=int, default=8)
parser.add_argument('--batch_size', type=int, default=4)
parser.add_argument('--epochs', type=int, default=8)
return parser.parse_args()
if __name__ == '__main__':
seed = sconfig('random-seed')
def train(
resume: str = None, normalize_aspect=True,
learning_rate=4e-5, batch_size: int = 4, gradient_accumulation_steps: int = 8, epochs: int = 8,
output_dir: str = None
):
logger = get_logger(f'{MODEL_NAME} Train')
logger.info('Setting up training... ')
lr, bsz, gas, n_ep = learning_rate, batch_size, gradient_accumulation_steps, epochs
logger.info('Loading tokenizer & model... ')
tokenizer = GPT2TokenizerFast.from_pretrained(HF_MODEL_NAME)
tokenizer.add_special_tokens(special_tokens_dict=dict(
pad_token=ZsGPT2Tokenizer.pad_token_, additional_special_tokens=[utcd_util.EOT_TOKEN]
))
config = transformers.GPT2Config.from_pretrained(HF_MODEL_NAME)
config.pad_token_id = tokenizer.pad_token_id # Needed for Seq CLS
config.num_labels = len(sconfig('UTCD.aspects'))
model = GPT2ForSequenceClassification.from_pretrained(HF_MODEL_NAME, config=config)
# Include `end-of-turn` token for sgd, cannot set `eos` for '<|endoftext|>' already defined in GPT2
model.resize_token_embeddings(len(tokenizer))
logger.info('Loading data... ')
dnm = 'UTCD-in' # concatenated 9 in-domain datasets in UTCD
dset_args = dict(dataset_name=dnm, tokenizer=tokenizer, shuffle_seed=seed)
if normalize_aspect:
dset_args.update(dict(normalize_aspect=seed, splits=['train', 'eval', 'test']))
dsets = get_explicit_dataset(**dset_args)
tr, vl, ts = dsets['train'], dsets['eval'], dsets['test']
logger.info(f'Loaded #example {pl.i({k: len(v) for k, v in dsets.items()})}')
transformers.set_seed(seed)
path = map_model_output_path(
model_name=MODEL_NAME.replace(' ', '-'), mode='explicit',
sampling=None, normalize_aspect=normalize_aspect, output_dir=output_dir
)
train_args = dict(
output_dir=path,
learning_rate=lr,
per_device_train_batch_size=bsz, # to fit in memory, bsz 32 to keep same with Bin Bert pretraining
per_device_eval_batch_size=bsz,
gradient_accumulation_steps=gradient_accumulation_steps,
fp16=torch.cuda.is_available(),
num_train_epochs=n_ep,
dataloader_num_workers=4
)
if normalize_aspect:
train_args.update(dict(
load_best_model_at_end=True,
metric_for_best_model='eval_loss',
greater_is_better=False
))
train_args = get_train_args(model_name=GPT2_MODEL_NAME, **train_args)
trainer_args = dict(
model=model, args=train_args, train_dataset=tr, eval_dataset=vl, compute_metrics=compute_metrics
)
trainer = ExplicitTrainer(name=f'{MODEL_NAME} Train', with_tqdm=True, **trainer_args)
logger.info('Launching Training... ')
if resume:
trainer.train(resume_from_checkpoint=resume)
else:
trainer.train()
save_path = os_join(trainer.args.output_dir, 'trained')
trainer.save_model(save_path)
tokenizer.save_pretrained(save_path)
logger.info(f'Tokenizer & Model saved to {pl.i(save_path)}')
# train()
def command_prompt():
args = parse_args()
train(**vars(args))
command_prompt() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/explicit/gpt2_pretrain.py | gpt2_pretrain.py |
import os
import math
import logging
import datetime
from os.path import join
from os.path import join as os_join
from typing import List, Type, Dict, Optional, Union
from argparse import ArgumentParser
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.optim import Optimizer, AdamW
from torch.utils.data import DataLoader
from transformers import (
set_seed,
BertConfig, BertModel, BertPreTrainedModel, BertTokenizer,
get_scheduler
)
from sklearn.metrics import classification_report
from tqdm import tqdm, trange
from torch.utils.tensorboard import SummaryWriter
from zeroshot_classifier.util.load_data import (
get_datasets, binary_explicit_format, in_domain_data_path, out_of_domain_data_path
)
from stefutil import *
from zeroshot_classifier.util import *
logger = logging.getLogger(__name__)
set_seed(42)
CLS_LOSS_ONLY = True # TODO: debugging
class BertZeroShotExplicit(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.binary_cls = nn.Linear(self.config.hidden_size, 2)
self.aspect_cls = None if CLS_LOSS_ONLY else nn.Linear(self.config.hidden_size, 3)
# Initialize weights and apply final processing
self.post_init()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
# calls `from_pretrained` from class `PreTrainedModel`
obj = super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
return obj
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
binary_logits = self.binary_cls(pooled_output)
# if CLS_LOSS_ONLY:
# with torch.no_grad():
# aspect_logits = self.aspect_cls(pooled_output)
# else:
# aspect_logits = self.aspect_cls(pooled_output)
aspect_logits = None if CLS_LOSS_ONLY else self.aspect_cls(pooled_output)
loss = None
logits = {'cls': binary_logits, 'aspect': aspect_logits}
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return loss, logits, outputs.hidden_states, outputs.attentions
class ExplicitCrossEncoder:
def __init__(self, name="bert-base-uncased", device: Union[str, torch.device] = 'cuda', max_length=None) -> None:
self.config = BertConfig.from_pretrained(name)
self.model = BertZeroShotExplicit(self.config)
self.tokenizer = BertTokenizer.from_pretrained(name)
self.device = device
self.max_length = max_length
self.writer = None
self.model_meta = dict(model='BinaryBERT', mode='explicit')
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
obj = cls()
obj.model = BertZeroShotExplicit.from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
return obj
def smart_batching_collate(self, batch):
texts = [[] for _ in range(len(batch[0].texts))]
labels = []
aspects = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text.strip())
labels.append(example.label)
aspects.append(example.aspect)
tokenized = self.tokenizer(*texts, padding=True, truncation='longest_first', return_tensors="pt", max_length=self.max_length)
labels = torch.tensor(labels, dtype=torch.long).to(self.device)
aspects = torch.tensor(aspects, dtype=torch.long).to(self.device)
for name in tokenized:
tokenized[name] = tokenized[name].to(self.device)
return tokenized, labels, aspects
def smart_batching_collate_text_only(self, batch):
texts = [[] for _ in range(len(batch[0]))]
for example in batch:
for idx, text in enumerate(example):
texts[idx].append(text.strip())
tokenized = self.tokenizer(*texts, padding=True, truncation='longest_first', return_tensors="pt", max_length=self.max_length)
for name in tokenized:
tokenized[name] = tokenized[name].to(self.device)
return tokenized
def fit(
self,
train_dataloader: DataLoader,
epochs: int = 1,
scheduler: str = 'linear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = AdamW,
optimizer_params: Dict[str, object] = {'lr': 2e-5},
weight_decay: float = 0.01,
output_path: str = None,
max_grad_norm: float = 1,
show_progress_bar: bool = True
):
os.makedirs(output_path, exist_ok=True)
mdl, md = self.model_meta['model'], self.model_meta['mode']
log_fnm = f'{now(for_path=True)}, {mdl}, md={md}, #ep={epochs}'
self.writer = SummaryWriter(os_join(output_path, f'tb - {log_fnm}'))
train_dataloader.collate_fn = self.smart_batching_collate
self.model.to(self.device)
# Prepare optimizers
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
num_training_steps = int(len(train_dataloader) * epochs)
lr_scheduler = get_scheduler(
name=scheduler, optimizer=optimizer, num_warmup_steps=warmup_steps, num_training_steps=num_training_steps
)
def _get_lr() -> float:
return lr_scheduler.get_last_lr()[0]
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
tr_loss = 0
self.model.zero_grad()
self.model.train()
with tqdm(train_dataloader, desc="Iteration", smoothing=0.05, disable=not show_progress_bar) as it:
for features, labels, aspects in it:
model_predictions = self.model(**features, return_dict=True)
pooled_output = model_predictions[1]
loss_fct = CrossEntropyLoss()
# if CLS_LOSS_ONLY:
# with torch.no_grad():
# task_loss_value = loss_fct(pooled_output['aspect'].view(-1, 3), aspects.view(-1))
# else:
# task_loss_value = loss_fct(pooled_output['aspect'].view(-1, 3), aspects.view(-1))
task_loss_value = None
if not CLS_LOSS_ONLY:
task_loss_value = loss_fct(pooled_output['aspect'].view(-1, 3), aspects.view(-1))
binary_loss_value = loss_fct(pooled_output['cls'].view(-1, 2), labels.view(-1))
cls_loss = binary_loss_value.detach().item()
asp_loss = None if CLS_LOSS_ONLY else task_loss_value.detach().item()
it.set_postfix(cls_loss=cls_loss, asp_loss=asp_loss)
step = training_steps + epoch * len(train_dataloader)
self.writer.add_scalar('Train/learning rate', _get_lr(), step)
self.writer.add_scalar('Train/Binary Classification Loss', cls_loss, step)
if not CLS_LOSS_ONLY:
self.writer.add_scalar('Train/Aspect Classification Loss', asp_loss, step)
if CLS_LOSS_ONLY:
loss = binary_loss_value
else:
loss = task_loss_value + binary_loss_value
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
training_steps += 1
tr_loss += loss.item()
average_loss = tr_loss/training_steps
print(f'Epoch: {epoch+1}\nAverage loss: {average_loss:f}\n Current Learning Rate: {lr_scheduler.get_last_lr()}')
self.save(output_path)
def predict(self, sentences: List[List[str]], batch_size: int = 32):
inp_dataloader = DataLoader(sentences, batch_size=batch_size, collate_fn=self.smart_batching_collate_text_only, shuffle=False)
show_progress_bar = (logger.getEffectiveLevel() == logging.INFO or logger.getEffectiveLevel() == logging.DEBUG)
show_progress_bar = False
iterator = inp_dataloader
if show_progress_bar:
iterator = tqdm(inp_dataloader, desc="Batches")
pred_scores = []
self.model.eval()
self.model.to(self.device)
with torch.no_grad():
for features in iterator:
model_predictions = self.model(**features, return_dict=True)
logits = model_predictions[1]['cls']
if len(logits[0]) > 1:
logits = torch.nn.functional.softmax(logits, dim=1)
pred_scores.extend(logits)
# pred_scores = np.asarray([score.cpu().detach().numpy() for score in pred_scores])
pred_scores = torch.stack(pred_scores, dim=0).detach().cpu()
return pred_scores
def save(self, path):
"""
Saves all model and tokenizer to path
"""
if path is None:
return
logger.info("Save model to {}".format(path))
self.model.save_pretrained(path)
self.tokenizer.save_pretrained(path)
def parse_args():
modes = [
'vanilla',
'implicit',
'implicit-on-text-encode-aspect', # encode each of the 3 aspects as 3 special tokens, followed by text
'implicit-on-text-encode-sep', # encode aspects normally, but add special token between aspect and text
'explicit'
]
parser = ArgumentParser()
subparser = parser.add_subparsers(dest='command')
parser_train = subparser.add_parser('train')
parser_test = subparser.add_parser('test')
# set train arguments
parser_train.add_argument('--output', type=str, required=True)
parser_train.add_argument('--sampling', type=str, choices=['rand', 'vect'], required=True)
parser_train.add_argument('--mode', type=str, choices=modes, default='vanilla')
parser_train.add_argument('--batch_size', type=int, default=16)
parser_train.add_argument('--epochs', type=int, default=3)
parser.add_argument("--learning_rate",
default=2e-5,
type=float,
help="The initial learning rate for Adam.")
# set test arguments
parser_test.add_argument('--domain', type=str, choices=['in', 'out'], required=True)
parser_test.add_argument('--mode', type=str, choices=modes, default='vanilla')
parser_test.add_argument('--batch_size', type=int, default=32)
parser_test.add_argument('--model_path', type=str, required=True)
return parser.parse_args()
if __name__ == "__main__":
import transformers
transformers.logging.set_verbosity_error() # disables `longest_first` warning
args = parse_args()
mode = args.mode
assert mode == 'explicit'
if args.command == 'train':
bsz, lr, n_ep = args.batch_size, args.learning_rate, args.epochs
sampling = args.sampling
dirs = args.output.split(os.sep)
dir_nm_last = f'{now(for_path=True)}-{dirs[-1]}-{sampling}-{args.mode}'
save_path = os_join(*dirs[:-1], dir_nm_last)
_logger = get_logger('BinaryBERT Explicit Training')
d_log = dict(mode=mode, sampling=sampling, batch_size=bsz, epochs=n_ep, learning_rate=lr, save_path=save_path)
_logger.info(f'Running training on {pl.i(d_log)}.. ')
dvc = 'cuda' if torch.cuda.is_available() else 'cpu'
_logger.info('Loading data & model... ')
# n_sample = 1024 * 8 # TODO: debugging
n_sample = None
data = get_datasets(in_domain_data_path, n_sample=n_sample)
# get keys from data dict
datasets = list(data.keys())
train = binary_explicit_format(data)
dl = DataLoader(train, shuffle=True, batch_size=bsz)
model = ExplicitCrossEncoder('bert-base-uncased', device=dvc)
warmup_steps_ = math.ceil(len(dl) * n_ep * 0.1) # 10% of train data for warm-up
_logger.info(f'Launched training on {pl.i(len(train))} samples and {pl.i(warmup_steps_)} warmup steps... ')
model.fit(
train_dataloader=dl,
epochs=n_ep,
warmup_steps=warmup_steps_,
optimizer_params={'lr': lr},
output_path=save_path
)
if args.command == 'test':
mode, domain, model_path, bsz = args.mode, args.domain, args.model_path, args.batch_size
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
date = datetime.datetime.now().strftime('%m.%d.%Y')
date = date[:-4] + date[-2:] # 2-digit year
out_path = join(model_path, 'eval', f'{domain_str}, {date}')
os.makedirs(out_path, exist_ok=True)
data = get_datasets(in_domain_data_path if domain == 'in' else out_of_domain_data_path)
model = ExplicitCrossEncoder.from_pretrained(model_path) # load model
sep_token = sconfig('training.implicit-on-text.encode-sep.aspect-sep-token')
aspect2aspect_token = sconfig('training.implicit-on-text.encode-aspect.aspect2aspect-token')
logger = get_logger('Binary Bert Eval')
d_log = dict(mode=mode, domain=domain, batch_size=bsz, path=model_path)
logger.info(f'Evaluating Binary Bert with {pl.i(d_log)} and saving to {pl.i(out_path)}... ')
eval_loss: Dict[str, np.array] = dict() # a sense of how badly the model makes the prediction
dataset_names = [dnm for dnm, d_dset in sconfig('UTCD.datasets').items() if d_dset['domain'] == domain]
for dnm in dataset_names: # loop through all datasets
# if 'consumer' not in dnm:
# continue
dset = data[dnm]
split = 'test'
txts, aspect = dset[split], dset['aspect']
d_dset = sconfig(f'UTCD.datasets.{dnm}.splits.{split}')
label_options, multi_label = d_dset['labels'], d_dset['multi_label']
n_options = len(label_options)
label2id = {lbl: i for i, lbl in enumerate(label_options)}
n_txt = sconfig(f'UTCD.datasets.{dnm}.splits.{split}.n_text')
d_log = {'#text': n_txt, '#label': n_options}
logger.info(f'Evaluating {pl.i(dnm)} with {pl.i(d_log)}...')
arr_preds, arr_labels = np.empty(n_txt, dtype=int), np.empty(n_txt, dtype=int)
txt_n_lbs2query = None
if mode in ['vanilla', 'explicit']:
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[txt, lb] for lb in lbs]
elif mode == 'implicit':
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[txt, f'{lb} {aspect}'] for lb in lbs]
elif mode == 'implicit-on-text-encode-aspect':
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[f'{aspect2aspect_token[aspect]} {txt}', lb] for lb in lbs]
elif mode == 'implicit-on-text-encode-sep':
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[f'{aspect} {sep_token} {txt}', lb] for lb in lbs]
gen = group_n(txts.items(), n=bsz)
# loop through each test example
for i_grp, group in enumerate(tqdm(gen, desc=dnm, unit='group', total=math.ceil(n_txt/bsz))):
txts_, lst_labels = zip(*group)
lst_labels: List[List[int]] = [[label2id[lb] for lb in labels] for labels in lst_labels]
query = sum([txt_n_lbs2query(t, label_options) for t in txts_], start=[]) # (n_options x bsz, 2)
# probability for positive class
logits = model.predict(query, batch_size=bsz)[:, 1]
logits = logits.reshape(-1, n_options)
preds = logits.argmax(axis=1)
trues = torch.empty_like(preds)
for i, pred, labels in zip(range(bsz), preds, lst_labels):
# if false prediction, pick one of the correct labels arbitrarily
trues[i] = pred if pred in labels else labels[0]
idx_strt = i_grp*bsz
arr_preds[idx_strt:idx_strt+bsz], arr_labels[idx_strt:idx_strt+bsz] = preds.cpu(), trues.cpu()
args = dict(zero_division=0, target_names=label_options, output_dict=True) # disables warning
report = classification_report(arr_labels, arr_preds, **args)
acc = f'{report["accuracy"]:.3f}'
logger.info(f'{pl.i(dnm)} Classification Accuracy: {pl.i(acc)}')
df = pd.DataFrame(report).transpose()
df.to_csv(join(out_path, f'{dnm}.csv')) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/explicit/binary_bert.py | binary_bert.py |
from os.path import join as os_join
from transformers import TrainingArguments, SchedulerType
from transformers.training_args import OptimizerNames
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
import zeroshot_classifier.models.binary_bert
__all__ = ['EXPLICIT_BERT_MODEL_NAME', 'EXPLICIT_GPT2_MODEL_NAME', 'get_train_args']
_bert_md_nm = zeroshot_classifier.models.binary_bert.MODEL_NAME
_gpt2_md_nm = zeroshot_classifier.models.gpt2.MODEL_NAME
EXPLICIT_BERT_MODEL_NAME = f'Aspect Pretrain {_bert_md_nm}'
EXPLICIT_GPT2_MODEL_NAME = f'Aspect Pretrain {_gpt2_md_nm}'
def get_train_args(model_name: str, dir_name: str = None, **kwargs) -> TrainingArguments:
ca.check_mismatch('Model Name', model_name, [_bert_md_nm, _gpt2_md_nm])
debug = False
if debug:
args = dict(
batch_size=16,
learning_rate=1e-4,
weight_decay=0,
lr_scheduler_type=SchedulerType.CONSTANT,
num_train_epochs=4
)
else:
# Keep the same as in Binary BERT vanilla training
args = dict(
learning_rate=2e-5,
per_device_train_batch_size=16,
per_device_eval_batch_size=64,
weight_decay=1e-2,
num_train_epochs=3,
lr_scheduler_type=SchedulerType.COSINE,
)
if 'batch_size' in args:
bsz = args.pop('batch_size')
args['per_device_train_batch_size'] = bsz
args['per_device_eval_batch_size'] = bsz
md_nm = model_name.replace(' ', '-')
dir_nm = dir_name or f'{now(for_path=True)}_{md_nm}'
args.update(dict(
output_dir=os_join(utcd_util.get_base_path(), u.proj_dir, u.model_dir, dir_nm),
do_train=True, do_eval=True,
evaluation_strategy='epoch',
eval_accumulation_steps=128, # Saves GPU memory
warmup_ratio=1e-1,
adam_epsilon=1e-6,
log_level='warning',
logging_strategy='steps',
logging_steps=1,
save_strategy='epoch',
optim=OptimizerNames.ADAMW_TORCH,
report_to='none' # I have my own tensorboard logging
))
args.update(kwargs)
return TrainingArguments(**args) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/explicit/explicit_v2.py | explicit_v2.py |
from os.path import join as os_join
from argparse import ArgumentParser
import torch
from torch.utils.data import DataLoader
from transformers import BertTokenizer, BertTokenizerFast, AutoModelForSequenceClassification
from tqdm.auto import tqdm
from stefutil import *
from zeroshot_classifier.util import *
import zeroshot_classifier.util.utcd as utcd_util
from zeroshot_classifier.preprocess import get_explicit_dataset
from zeroshot_classifier.models.binary_bert import MODEL_NAME as BERT_MODEL_NAME, HF_MODEL_NAME
from zeroshot_classifier.models.explicit.explicit_v2 import *
MODEL_NAME = EXPLICIT_BERT_MODEL_NAME
TRAIN_STRATEGY = 'explicit'
def parse_args():
parser = ArgumentParser()
parser.add_argument('--output_dir', type=str, default=None)
parser.add_argument('--normalize_aspect', type=bool, default=True)
parser.add_argument('--learning_rate', type=float, default=2e-5)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--epochs', type=int, default=3)
return parser.parse_args()
if __name__ == '__main__':
import transformers
seed = sconfig('random-seed')
def train(
resume: str = None, normalize_aspect=True, learning_rate: float = 2e-5, batch_size: int = 32,
epochs: int = 8,output_dir: str = None
):
logger = get_logger(f'{MODEL_NAME} Train')
logger.info('Setting up training... ')
lr, bsz, n_ep = learning_rate, batch_size, epochs
logger.info('Loading tokenizer & model... ')
tokenizer = BertTokenizerFast.from_pretrained(HF_MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(HF_MODEL_NAME, num_labels=len(sconfig('UTCD.aspects')))
tokenizer.add_special_tokens(dict(eos_token=utcd_util.EOT_TOKEN)) # end-of-turn for SGD
model.resize_token_embeddings(len(tokenizer))
logger.info('Loading data... ')
dnm = 'UTCD-in' # concatenated 9 in-domain datasets in UTCD
dset_args = dict(dataset_name=dnm, tokenizer=tokenizer, shuffle_seed=seed)
if normalize_aspect:
dset_args.update(dict(normalize_aspect=seed, splits=['train', 'eval', 'test']))
dsets = get_explicit_dataset(**dset_args)
tr, vl, ts = dsets['train'], dsets['eval'], dsets['test']
logger.info(f'Loaded #example {pl.i({k: len(v) for k, v in dsets.items()})}')
transformers.set_seed(seed)
path = map_model_output_path(
model_name=MODEL_NAME.replace(' ', '-'), mode='explicit',
sampling=None, normalize_aspect=normalize_aspect, output_dir=output_dir
)
train_args = dict(
output_dir=path,
learning_rate=lr,
per_device_train_batch_size=bsz,
per_device_eval_batch_size=bsz,
num_train_epochs=n_ep,
dataloader_num_workers=4
)
if normalize_aspect:
train_args.update(dict(
load_best_model_at_end=True,
metric_for_best_model='eval_loss',
greater_is_better=False
))
train_args = get_train_args(model_name=BERT_MODEL_NAME, **train_args)
trainer_args = dict(
model=model, args=train_args, train_dataset=tr, eval_dataset=vl, compute_metrics=compute_metrics
)
trainer = ExplicitTrainer(name=f'{MODEL_NAME} Train', with_tqdm=True, **trainer_args)
logger.info('Launching Training... ')
if resume:
trainer.train(resume_from_checkpoint=resume)
else:
trainer.train()
save_path = os_join(trainer.args.output_dir, 'trained')
trainer.save_model(save_path)
tokenizer.save_pretrained(save_path)
logger.info(f'Tokenizer & Model saved to {pl.i(save_path)}')
# train()
def evaluate(domain: str = 'in', batch_size: int = 32):
ca(dataset_domain=domain)
dir_nm = '2022-05-19_23-33-50/checkpoint-411132'
path = os_join(utcd_util.get_base_path(), u.proj_dir, u.model_dir, MODEL_NAME.replace(' ', '-'), dir_nm)
tokenizer = BertTokenizer.from_pretrained(HF_MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(path)
if torch.cuda.is_available():
model.cuda()
model.eval()
dnms = [dnm for dnm, d_dset in sconfig('UTCD.datasets').items() if d_dset['domain'] == domain]
def collate_fn(batch): # as in speed sanity check
ret = {k: torch.stack([torch.tensor(b[k]) for b in batch]) for k in batch[0] if k != 'labels'}
ret['labels'] = torch.tensor([b['labels'] for b in batch])
return ret
for dnm in dnms:
vl = get_explicit_dataset(dataset_name=dnm, tokenizer=tokenizer, splits='test')[0]
n_sample = len(vl)
dl = DataLoader(vl, batch_size=batch_size, shuffle=False, pin_memory=True, collate_fn=collate_fn)
lst_preds, lst_labels = [], []
with tqdm(dl, desc=f'Eval {dnm}', unit='ba') as it:
for inputs in it:
if torch.cuda.is_available():
inputs = {k: v.cuda() for k, v in inputs.items()}
outputs = model(**inputs)
logits = outputs.logits.detach()
labels = inputs['labels'].detach()
preds = torch.argmax(logits, dim=-1)
acc_ = (preds == labels).float().mean().item()
it.set_postfix(acc=acc_)
lst_preds.append(preds)
lst_labels.append(labels)
preds = torch.cat(lst_preds, dim=0)
labels = torch.cat(lst_labels, dim=0)
acc__ = (preds == labels).float().mean().item()
mic(dnm, n_sample, acc__)
# evaluate(domain='out', batch_size=32)
def command_prompt():
args = parse_args()
train(**vars(args))
command_prompt() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/explicit/binary_bert_pretrain.py | binary_bert_pretrain.py |
from typing import Tuple
import torch
import torch.nn as nn
from transformers import BertTokenizer, BertConfig, BertForSequenceClassification
from stefutil import *
def load_sliced_binary_bert(
model_name: str = 'bert-base-uncased', max_position_embeddings: int = 512
) -> Tuple[BertTokenizer, nn.Module]:
"""
:param model_name: A hugging face model name
:param max_position_embeddings: Max model token size
Intended for loading a pretrained 512-token BERT model,
with smaller max token length by chopping off later positional embeddings
"""
conf = BertConfig.from_pretrained(model_name)
n_tok_ori = conf.max_position_embeddings
assert max_position_embeddings < n_tok_ori, \
f'Intended for a {pl.i("max_position_embeddings")} smaller than original model size of {pl.i(n_tok_ori)}, ' \
f'but got {pl.i(max_position_embeddings)}'
conf.max_position_embeddings = max_position_embeddings
tokenizer = BertTokenizer.from_pretrained(model_name, model_max_length=max_position_embeddings)
model = BertForSequenceClassification.from_pretrained(model_name, config=conf, ignore_mismatched_sizes=True)
# Should observe 2 warnings, one expected warning for initializing BertSeqCls from pre-trained Bert
# One is for the mismatched position embedding
# for overriding the positional embedding; Another SeqCls warning here
model_dummy = BertForSequenceClassification.from_pretrained(model_name)
state_d = model_dummy.bert.embeddings.position_embeddings.state_dict()
assert set(state_d.keys()) == {'weight'} # sanity check
weight_pretrained = state_d['weight']
assert weight_pretrained.shape == (n_tok_ori, conf.hidden_size)
del model_dummy
del state_d
with torch.no_grad():
# Keep the first tokens
model.bert.embeddings.position_embeddings.weight[:] = weight_pretrained[:max_position_embeddings]
return tokenizer, model
if __name__ == '__main__':
load_sliced_binary_bert(max_position_embeddings=256) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/architecture/binary_bert.py | binary_bert.py |
import logging
import os
import json
from typing import Dict, Tuple, Iterable, Type, Callable
import torch
from torch import nn
from torch.optim import Optimizer
from torch.utils.data import DataLoader
from sentence_transformers import SentenceTransformer
from sentence_transformers.util import batch_to_device
from sentence_transformers.model_card_templates import ModelCardTemplate
from sentence_transformers.cross_encoder import CrossEncoder
from sentence_transformers.evaluation import SentenceEvaluator
from sentence_transformers.losses import CosineSimilarityLoss
from tqdm.autonotebook import tqdm, trange
from stefutil import *
__all__ = ['BinaryBertCrossEncoder', 'BiEncoder']
class BinaryBertCrossEncoder(CrossEncoder):
logger = get_logger('Bin BERT Train')
def fit(
self,
train_dataloader: DataLoader = None,
evaluator: SentenceEvaluator = None,
# ========================== Begin of added ==========================
val_dataloader: DataLoader = None,
logger_fl: logging.Logger = None,
best_model_metric: str = 'loss',
# ========================== End of added ==========================
epochs: int = 1, loss_fct=None,
activation_fct=nn.Identity(),
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = torch.optim.AdamW,
optimizer_params: Dict[str, object] = {'lr': 2e-5},
weight_decay: float = 0.01,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True
):
# ========================== Begin of added ==========================
ca.check_mismatch('Eval Metric for Best Model', best_model_metric, ['loss', 'accuracy'])
# ========================== End of added ==========================
train_dataloader.collate_fn = self.smart_batching_collate
# ========================== Begin of added ==========================
if val_dataloader:
val_dataloader.collate_fn = self.smart_batching_collate
# ========================== End of added ==========================
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.model.to(self._target_device)
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
self.best_score = -9999999
num_train_steps = int(len(train_dataloader) * epochs)
# Prepare optimizers
param_optimizer = list(self.model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
if isinstance(scheduler, str):
scheduler = SentenceTransformer._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
if loss_fct is None:
loss_fct = nn.BCEWithLogitsLoss() if self.config.num_labels == 1 else nn.CrossEntropyLoss()
# ========================== Begin of added ==========================
curr_best_model = {'epoch': 0, 'best_loss': float('inf'), 'best_acc': -float('inf'), 'path': None}
pretty = MlPrettier(ref=dict(step=len(train_dataloader), epoch=epochs))
# ========================== End of added ==========================
skip_scheduler = False
# ========================== Begin of modified ==========================
# for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
for epoch in range(epochs):
epoch_str = f'Epoch {pl.i(epoch+1)}/{pl.i(epochs)}'
epoch_str_nc = f'Epoch {epoch+1}/{epochs}'
# ========================== End of modified ==========================
training_steps = 0
self.model.zero_grad()
self.model.train()
# ========================== Begin of modified ==========================
desc = f'Training {epoch_str}'
it = tqdm(train_dataloader, desc=desc, unit='ba', smoothing=0.05, disable=not show_progress_bar)
for features, labels in it:
# ========================== End of modified ==========================
if use_amp:
with autocast():
model_predictions = self.model(**features, return_dict=True)
logits = activation_fct(model_predictions.logits)
if self.config.num_labels == 1:
logits = logits.view(-1)
loss_value = loss_fct(logits, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
model_predictions = self.model(**features, return_dict=True)
logits = activation_fct(model_predictions.logits)
if self.config.num_labels == 1:
logits = logits.view(-1)
loss_value = loss_fct(logits, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
# ========================== Begin of added ==========================
# TODO: not sure why 2 lr vals, w/ same value
d_log = dict(loss=loss_value.item(), lr=scheduler.get_last_lr()[0])
it.set_postfix({k: pl.i(v) for k, v in pretty(d_log).items()})
d_log = dict(epoch=epoch+1, step=training_steps+1, **d_log)
logger_fl.info(pl.nc(pretty(d_log)))
# ========================== End of added ==========================
if not skip_scheduler:
scheduler.step()
training_steps += 1
# ========================== Begin of added ==========================
if val_dataloader is not None:
self.model.eval()
val_loss = 0
val_steps = 0
n_correct, n = 0, 0
desc = f'Evaluating {epoch_str}'
it = tqdm(val_dataloader, desc=desc, unit='ba', smoothing=0.05, disable=not show_progress_bar)
for features, labels in it:
with torch.no_grad():
model_predictions = self.model(**features, return_dict=True)
logits = activation_fct(model_predictions.logits)
if self.config.num_labels == 1:
logits = logits.view(-1)
n_correct += (logits.argmax(dim=1) == labels).sum().item()
n += labels.numel()
val_loss += loss_fct(logits, labels).item()
val_steps += 1
val_loss /= val_steps
acc = n_correct / n
d_log = pretty(dict(epoch=epoch+1, eval_loss=val_loss, eval_acc=acc))
BinaryBertCrossEncoder.logger.info(pl.i(d_log))
logger_fl.info(pl.nc(d_log))
if best_model_metric == 'loss':
best_val = val_loss
prev_val = curr_best_model['best_loss']
better = best_val < prev_val
else: # `accuracy`
best_val = acc
prev_val = curr_best_model['best_acc']
better = best_val > prev_val
if better:
curr_best_model['epoch'] = epoch+1
curr_best_model['best_loss' if best_model_metric == 'loss' else 'best_acc'] = best_val
if save_best_model:
curr_best_model['path'] = output_path
self.save(output_path)
BinaryBertCrossEncoder.logger.info(f'Best model found at {epoch_str} w/ '
f'{pl.i(best_model_metric)}={pl.i(best_val)} ')
logger_fl.info(f'Best model found at {epoch_str_nc} w/ {best_model_metric}={best_val} ')
# ========================== End of added ==========================
# ========================== Begin of modified ==========================
# No evaluator, but output path: save final model version
if val_dataloader is None and output_path is not None:
self.save(output_path)
# ========================== End of modified ==========================
class BiEncoder(SentenceTransformer):
logger = get_logger('Bi-Encoder Train')
def fit(
self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]] = None,
# ========================== Begin of added ==========================
val_dataloader: DataLoader = None,
logger_fl: logging.Logger = None,
best_model_metric: str = 'loss',
# ========================== End of added ==========================
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = torch.optim.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0
):
# ========================== Begin of added ==========================
ca.check_mismatch('Eval Metric for Best Model', best_model_metric, ['loss', 'accuracy'])
# ========================== End of added ==========================
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"evaluator": "validation_loss", "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
# ========================== Begin of added ==========================
if val_dataloader:
val_dataloader.collate_fn = self.smart_batching_collate
# ========================== End of added ==========================
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# ========================== Begin of added ==========================
curr_best_model = {'epoch': 0, 'best_loss': float('inf'), 'best_acc': -float('inf'), 'path': None}
pretty = MlPrettier(ref=dict(step=steps_per_epoch, epoch=epochs))
# ========================== End of added ==========================
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
# ========================== Begin of added ==========================
# for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
for epoch in range(epochs):
epoch_str = f'Epoch {pl.i(epoch+1)}/{pl.i(epochs)}'
epoch_str_nc = f'Epoch {epoch+1}/{epochs}'
# ========================== End of added ==========================
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
# ========================== Begin of modified ==========================
desc = f'Training {epoch_str}'
it = trange(steps_per_epoch, desc=desc, unit='ba', smoothing=0.05, disable=not show_progress_bar)
for _ in it:
# ========================== End of modified ==========================
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
labels = labels.to(self._target_device)
features = list(map(lambda batch: batch_to_device(batch, self._target_device), features))
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
# ========================== Begin of added ==========================
d_log = dict(loss=loss_value.item(), lr=scheduler.get_last_lr()[0])
it.set_postfix({k: pl.i(v) for k, v in pretty(d_log).items()})
d_log = dict(epoch=epoch+1, step=training_steps+1, **d_log)
# for k, v in d_log.items():
# mic(k, v, type(k), type(v))
logger_fl.info(pl.nc(pretty(d_log)))
# ========================== End of added ==========================
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
# ========================== Begin of added ==========================
if val_dataloader is not None:
self.eval()
val_loss = 0
val_steps = 0
n_correct, n = 0, 0
assert len(loss_models) == 1 # sanity check
loss_model = loss_models[0]
assert isinstance(loss_model, CosineSimilarityLoss)
desc = f'Evaluating {epoch_str}'
it = tqdm(val_dataloader, desc=desc, unit='ba', smoothing=0.05, disable=not show_progress_bar)
for features, labels in it:
with torch.no_grad():
# See `CosineSimilarityLoss.forward`
embeddings = [loss_model.model(f)['sentence_embedding'] for f in features]
output = loss_model.cos_score_transformation(torch.cosine_similarity(embeddings[0], embeddings[1]))
loss_value = loss_model.loss_fct(output, labels.view(-1))
pred = (output > 0.5).long()
n_correct += (pred == labels).sum().item()
n += labels.numel()
val_loss += loss_value.item()
val_steps += 1
val_loss /= val_steps
acc = n_correct / n
d_log = pretty(dict(epoch=epoch+1, eval_loss=val_loss, eval_acc=acc))
BiEncoder.logger.info(pl.i(d_log))
logger_fl.info(pl.nc(d_log))
if best_model_metric == 'loss':
best_val = val_loss
prev_val = curr_best_model['best_loss']
better = best_val < prev_val
else: # `accuracy`
best_val = acc
prev_val = curr_best_model['best_acc']
better = best_val > prev_val
if better:
curr_best_model['epoch'] = epoch+1
curr_best_model['best_loss' if best_model_metric == 'loss' else 'best_acc'] = best_val
if save_best_model:
curr_best_model['path'] = output_path
self.save(output_path)
BiEncoder.logger.info(f'Best model found at {epoch_str} w/ '
f'{pl.i(best_model_metric)}={pl.i(best_val)} ')
logger_fl.info(f'Best model found at {epoch_str_nc} w/ {best_model_metric}={best_val} ')
# ========================== End of added ==========================
# ========================== Begin of modified ==========================
# No evaluator, but output path: save final model version
if val_dataloader is None and output_path is not None:
self.save(output_path)
# ========================== End of modified ==========================
if checkpoint_path is not None:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/models/architecture/sbert.py | sbert.py |
import os
from os.path import join as os_join
from typing import List, Tuple, Dict, Callable, Union, Any, Optional
from transformers import PreTrainedTokenizerBase
import datasets
from datasets import Dataset, DatasetDict, ClassLabel
from stefutil import *
from zeroshot_classifier.util import *
from zeroshot_classifier.util import load_data
import zeroshot_classifier.util.utcd as utcd_util
__all__ = ['get_dataset', 'get_explicit_dataset']
logger = get_logger('Dataset')
def _get_num_proc(dsets: Union[DatasetDict, Dict[str, Dataset]]) -> Optional[int]:
n_cpu = os.cpu_count()
if n_cpu >= 2 and min(len(d) for d in dsets.values()) > 4096:
return n_cpu
class _FilterSplit:
def __init__(
self, hf_dataset: DatasetDict, asp_norm_dataset: Dict[str, load_data.SplitDataset],
dataset_id2name: Dict[int, str], split: str = None
):
self.hf_dataset = hf_dataset
self.asp_norm_dataset = asp_norm_dataset
self.dataset_id2name = dataset_id2name
self.split = split
def __call__(self, example):
dnm = self.dataset_id2name[example['dataset_id']]
return example['text'] in self.asp_norm_dataset[dnm][self.split]
def filter_split(
hf_dataset: DatasetDict, asp_norm_dataset: Dict[str, load_data.SplitDataset],
dataset_id2name: Dict[int, str], split: str = None, **filter_args
) -> Dataset:
ret = hf_dataset['train'].filter(_FilterSplit(hf_dataset, asp_norm_dataset, dataset_id2name, split), **filter_args)
n = len(ret)
# sanity check, same # of pairs as in Chris' `load_data`
assert n == sum(len(ds[split]) for ds in asp_norm_dataset.values())
logger.info(f'#{pl.i(split)} pairs: {pl.i(n)}')
return ret
def get_dataset(
dataset_name='ag_news', normalize_aspect: bool = False,
map_func: Union[Dict[str, Callable], Callable] = None, filter_func: Callable = None,
remove_columns: Union[str, List[str]] = None,
n_sample: int = None, shuffle_seed: int = None, fast=True, from_disk=True,
splits: Union[str, List[str], Tuple[str, ...]] = ('train', 'test'), pbar: bool = False
) -> DatasetDict:
logger.info(f'Loading dataset {pl.i(dataset_name)}... ')
if not pbar:
datasets.set_progress_bar_enabled(False)
if from_disk:
path = os_join(utcd_util.get_base_path(), u.proj_dir, u.dset_dir, 'processed', dataset_name)
dsets = datasets.load_from_disk(path)
if normalize_aspect: # TODO: ugly but works
n_proc = _get_num_proc(dsets) if fast else None
logger.info(f'Normalizing training data by #sample per aspect with {pl.i(normalize_aspect)}...')
_data = load_data.get_datasets(domain='in', normalize_aspect=normalize_aspect)
# apply #sample normalization to the training set
id2nm = sconfig('UTCD.dataset_id2name')
args = dict(hf_dataset=dsets, asp_norm_dataset=_data, dataset_id2name=id2nm, num_proc=n_proc)
# Local function not good for dataset caching
dsets['train'], dsets['eval'] = filter_split(**args, split='train'), filter_split(**args, split='eval')
else:
dsets = datasets.load_dataset(dataset_name)
if isinstance(splits, str):
splits = [splits]
dsets = {s: dsets[s] for s in splits}
# ordering of filter, shuffle, then select determined for debugging
n_proc = _get_num_proc(dsets) if fast else None
if filter_func is not None:
logger.info('Filtering...')
dsets = {s: dset.filter(filter_func, num_proc=n_proc) for s, dset in dsets.items()}
if shuffle_seed:
logger.info(f'Shuffling with seed {pl.i(shuffle_seed)}...')
dsets = {s: dset.shuffle(seed=shuffle_seed) for s, dset in dsets.items()}
if n_sample is not None:
logger.info(f'Selecting the first {pl.i(n_sample)} samples...')
dsets = {s: dset.select(range(min(n_sample, len(dset)))) for s, dset in dsets.items()}
if map_func is not None:
logger.info('Mapping...')
if not isinstance(map_func, dict):
map_func = {s: map_func for s in splits}
dsets = {
s: dset.map(
map_func[s], batched=True, remove_columns=remove_columns, num_proc=n_proc,
load_from_cache_file=False
)
for s, dset in dsets.items()
}
datasets.set_progress_bar_enabled(True)
return DatasetDict(dsets)
class ExplicitMap:
def __init__(self, tokenizer, dataset_name: str, dataset: Dict[str, Dataset]):
self.tokenizer = tokenizer
self.is_combined = 'UTCD' in dataset_name
aspects: List[str] = sconfig('UTCD.aspects')
aspect2id = {a: i for i, a in enumerate(aspects)}
if self.is_combined: # get aspect based on dataset id
# feature is the same for both `train` and `test`
feat: ClassLabel = dataset['train'].features['dataset_id']
n_dset = feat.num_classes
self.did2aspect_id = {
i: aspect2id[sconfig(f'UTCD.datasets.{feat.int2str(i)}.aspect')] for i in range(n_dset)
}
else: # single dataset, the same aspect
self.aspect_id = aspect2id[sconfig(f'UTCD.datasets.{dataset_name}.aspect')]
def __call__(self, samples: Dict[str, List[Any]]):
ret = self.tokenizer(samples['text'], padding='max_length', truncation=True)
if self.is_combined:
ret['labels'] = [self.did2aspect_id[asp] for asp in samples['dataset_id']]
else:
ret['labels'] = [self.aspect_id] * len(samples['text'])
return ret
def get_explicit_dataset(
dataset_name: str = 'UTCD-in', tokenizer: PreTrainedTokenizerBase = None, fast: bool = True,
pbar: bool = False, **kwargs
) -> DatasetDict:
"""
override text classification labels to be aspect labels
"""
# perform preprocessing outside `get_dataset` as feature from the dataset is needed
dsets = get_dataset(dataset_name, **kwargs) # by split
logger.info('Constructing explicit dataset... ')
exp_map = ExplicitMap(tokenizer=tokenizer, dataset_name=dataset_name, dataset=dsets)
rmv = ['text']
if exp_map.is_combined:
rmv.append('dataset_id')
if not pbar:
datasets.set_progress_bar_enabled(False)
ret = dsets.map(exp_map, batched=True, remove_columns=rmv, num_proc=_get_num_proc(dsets) if fast else None)
datasets.set_progress_bar_enabled(True)
return ret | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/preprocess/dataset.py | dataset.py |
import os
import sys
import math
import logging
import datetime
from os.path import join as os_join
from time import sleep
from typing import Dict, Tuple, List, Union, Optional
from collections import OrderedDict
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter
from transformers import GPT2TokenizerFast
from transformers import TrainingArguments, TrainerCallback, Trainer
from transformers.trainer_utils import EvalLoopOutput
from transformers.file_utils import is_torch_tpu_available
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
from stefutil import *
from zeroshot_classifier.util.util import *
from zeroshot_classifier.util.training import *
class MyLoggingCallback(TrainerCallback):
"""
Requires
- Tuple of (custom compute_loss log, internal training log, internal validation log) for each step
- Intended for coupled training and evaluation
- Accuracy as a metric is passed to `Trainer` and training metric computed in `compute_loss` and logged
"""
def __init__(
self, parent_trainer: Trainer, do_eval=True,
name='GPT2-NVIDIA-Train', is_ddp: Union[int, bool] = False
):
"""
:param parent_trainer: The parent Trainer
:param name: Logger name
:param is_ddp: Flag for if distributed training is used
So that logging step is correct, since each scrip only see 1 GPU
"""
self.name = name
self.out_dict = None
self.out_dict_tr = None
self.is_compute_loss_on_train = True
self.k_acc = 'acc_meta'
self.k_cls = 'cls_acc_meta' # See `CustomTrainer`
self.k_cls_eval = f'{self.k_cls}_eval'
self.trainer = parent_trainer
args, dset_tr__, dset_vl_, md_, tokzer = (
getattr(parent_trainer, k) for k in ['args', 'train_dataset', 'eval_dataset', 'model', 'tokenizer']
)
self.n_eval = len(dset_vl_)
lr, n_ep = args.learning_rate, args.num_train_epochs
self.bsz = args.per_device_train_batch_size * args.gradient_accumulation_steps
self.is_ddp = is_ddp
if is_ddp:
assert isinstance(is_ddp, int), 'When DDP enabled, is_ddp must specify #GPU'
self.bsz = self.bsz * is_ddp
if torch.cuda.is_available() and self.trainer.args.n_gpu > 1:
self.bsz *= self.trainer.args.n_gpu
seq_max_len = len(dset_tr__[0]['input_ids'])
n_data, md_sz = len(dset_tr__), md_.config.n_positions
self.n_step = max(math.ceil(n_data / self.bsz), 1) * n_ep # #step/epoch at least 1
self.train_meta = OrderedDict([
('#data', n_data), ('model size', md_sz),
('learning rate', lr), ('batch shape', (self.bsz, seq_max_len)), ('#epochs', n_ep), ('#steps', self.n_step),
])
self.prettier = MlPrettier(ref=self.train_meta)
self.called_val_init = False
self.do_eval = do_eval
self.save_time = now(for_path=True)
self.logger, self.logger_fl, self.tb_writer = None, None, None
self.ls = None
self.log_fnm = f'{name}_{{{pl.pa(dict(n=n_data, l=md_sz, a=lr, bsz=self.bsz, n_ep=n_ep))}}}'
self.train_begin, self.train_end = None, None
self.t_strt, self.t_end = None, None
def on_train_begin(self, args: TrainingArguments, state, control, **kwargs):
if self.trainer.is_local_process_zero(): # For distributed training; TODO: support multi machine?
self.logger: logging.Logger = get_logger(self.name)
output_dir = self.trainer.args.output_dir
fl_path = os_join(output_dir, f'{self.log_fnm}.log')
self.logger_fl = get_logger(name=self.name, kind='file-write', file_path=fl_path)
self.tb_writer = SummaryWriter(os_join(output_dir, f'TB_{self.log_fnm}'))
self.ls = LogStep(
trainer=self.trainer, prettier=self.prettier,
logger=self.logger, file_logger=self.logger_fl, tb_writer=self.tb_writer
)
conf = self.trainer.model.config.to_dict()
args = self.trainer.args.to_dict()
sleep(2) # otherwise, logging messages missing
self.logger.info(f'Training started on model{pl.fmt(conf)}, {pl.i(self.train_meta)} and '
f'training args: {pl.fmt(args)}... ')
self.logger_fl.info(f'Training started on model{pl.id(conf)}, {pl.nc(self.train_meta)} and '
f'training args: {pl.id(args)}... ')
sleep(2)
self.t_strt = datetime.datetime.now()
self.train_begin = True
def on_train_end(self, args: TrainingArguments, state, control, **kwargs):
if self.train_begin:
self.train_begin = False
self.train_end = True
self.t_end = datetime.datetime.now()
t = fmt_delta(self.t_end - self.t_strt)
self.logger.info(f'Training completed in {pl.i(t)} ')
self.logger_fl.info(f'Training completed in {t} ')
def on_evaluate(self, args: TrainingArguments, state, control, **kwargs):
if self.trainer.is_local_process_zero(): # Similarly to `on_train_begin`
dl_vl: DataLoader
model, dl_vl = kwargs['model'], kwargs['eval_dataloader']
dset_vl: Dataset = dl_vl.dataset
n_eval = len(dset_vl)
bsz = dl_vl.batch_size
seq_max_len = len(dset_vl[0]['input_ids'])
md_sz = model.config.n_positions
n_bch = max(math.ceil(n_eval / bsz), 1)
eval_meta = OrderedDict([
('#data', n_eval), ('model size', md_sz), ('batch shape', (bsz, seq_max_len)), ('#batches', n_bch)
])
if not self.trainer.with_tqdm:
self.logger.info(f'Ran evaluation with {pl.i(eval_meta)}')
self.logger_fl.info(f'Ran evaluation with {pl.nc(eval_meta)}')
def _acc_stats2dict(self, out_dict: Dict) -> Dict:
"""
Convert `acc_meta`, `classification_acc_meta` dict to stats for logging
"""
stats_acc = {k: sum(d[k] for d in out_dict[self.k_acc]) for k in out_dict[self.k_acc][0].keys()}
del out_dict[self.k_acc]
ret = dict(ntp_acc=stats_acc['n_acc'] / stats_acc['n_total'])
if self.k_cls in out_dict:
stats_cls_acc = {
k: sum(d[k] for d in out_dict[self.k_cls]) for k in out_dict[self.k_cls][0].keys()
if k in ['n_acc', 'n_total']
}
del out_dict[self.k_cls]
if stats_cls_acc['n_total'] == 0:
cls_acc = 0
else:
cls_acc = (stats_cls_acc['n_acc']/stats_cls_acc['n_total'])
ret['cls_acc'] = cls_acc
return ret
def on_log(self, args: TrainingArguments, state, control, logs: Dict = None, **kwargs):
# basically only log the main process; `state.is_local_process_zero` is wrong in DDP eval
if self.trainer.is_local_process_zero():
step = state.global_step
if 'src' in logs and logs['src'] == 'compute_loss':
# For gradient_accumulation, many batches of `compute_loss` may be called,
# before going into train logging
# Loss here is per batch, not per gradient update, ignore
if self.out_dict_tr is None:
n_ep = logs['epoch']
self.out_dict_tr = {'step': step, 'epoch': n_ep, self.k_acc: [logs[self.k_acc]]}
# Aggregate accuracy & classification accuracy counts
if self.trainer.compute_cls_acc:
self.out_dict_tr[self.k_cls] = [logs[self.k_cls]]
else: # Later batch in the same gradient accumulation
step_, n_ep = self.out_dict_tr['step'], self.out_dict_tr['epoch']
n_ep_ = logs['epoch']
assert step_ == step and n_ep_ == n_ep
self.out_dict_tr[self.k_acc].append(logs[self.k_acc])
if self.trainer.compute_cls_acc:
self.out_dict_tr[self.k_cls].append(logs[self.k_cls])
elif 'loss' in logs: # Trainer default training loss logging
d_log = dict(epoch=state.epoch, step=step+1) # 1-indexed
d_log.update(dict(lr=logs['learning_rate'], loss=logs['loss']))
if not self.trainer.disable_train_metrics:
d_log.update(self._acc_stats2dict(self.out_dict_tr))
self.ls(d_log, training=True, to_console=not self.trainer.with_tqdm)
self.out_dict_tr = None # Reset for next global step
elif 'eval_loss' in logs: # Trainer eval output after eval metric computed
n_ep = logs['epoch']
assert n_ep.is_integer()
d_log = dict(epoch=int(n_ep), loss=logs['eval_loss'], cls_acc=logs['eval_cls_acc'])
self.ls(d_log, training=False, to_console=not self.trainer.with_tqdm)
else:
self.logger.info(pl.i(logs))
self.logger_fl.info(pl.nc(logs))
class ColoredPrinterCallback(TrainerCallback):
def __init__(self, name='Zero-shot GPT-2 Training'):
self.logger = logging.getLogger(name)
self.logger.setLevel(logging.DEBUG)
had_handler = False
hd_attr_nm = 'name_for_my_logging'
for hd in self.logger.handlers:
if hasattr(hd, hd_attr_nm) and getattr(hd, hd_attr_nm) == name:
had_handler = True
if not had_handler:
handler = logging.StreamHandler(stream=sys.stdout) # For my own coloring
handler.setLevel(logging.DEBUG)
handler.setFormatter(MyFormatter())
setattr(handler, hd_attr_nm, name)
self.logger.addHandler(handler)
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero:
self.logger.info(pl.i(logs) if isinstance(logs, dict) else logs)
def get_accs(
inputs: Dict[str, torch.Tensor], logits: torch.Tensor, tokenizer: GPT2TokenizerFast, mode: str = 'train',
compute_cls_acc: bool = False
) -> Dict:
"""
:param inputs: Dictionary of a 2D batch of input tensors, with keys [`labels`, `token_type_ids`, `dataset_id`]
:param logits: logits by ZsGPT2LMHeadModel's forward pass
:param tokenizer: ZsGPT2Tokenizer for getting the class label
:param mode: Determines which split the labels are from, one of [`train`, `eval`]
:param compute_cls_acc: Whether to compute classification accuracy
:return: NTP accuracy & sample classification accuracy metadata
.. note: Classification accuracy based on NTP task **during training**
**assumes** predicted token id at the same location of label id
"""
preds = logits.argmax(dim=-1)
labels_ = inputs['labels'].detach()
# CLM, predicting the next token given current, so shift
# Last prediction is not part of input label, 1st input is fed into model & not predicted
preds, labels_ = preds[:, :-1], labels_[:, 1:]
mask_non_pad = labels_ != PT_LOSS_PAD # Consider only the actual tokens for accuracy
preds_non_pad, labels_non_pad = preds[mask_non_pad], labels_[mask_non_pad]
matches: torch.Tensor = (preds_non_pad == labels_non_pad)
d_ret = dict(acc_meta=dict(n_acc=matches.sum().item(), n_total=preds_non_pad.numel()))
if compute_cls_acc:
token_type_ids, dataset_id = inputs['token_type_ids'].detach(), inputs['dataset_id'].detach()
id_att = tokenizer.enc_spec(tokenizer.answer_type_token)
id_answ = tokenizer.enc_spec(tokenizer.boa_token)
id_eos = tokenizer.enc_spec(tokenizer.eos_token)
# Also shift by 1
lst_idxs_answ: List[List[int]] = [(row == id_att).nonzero().flatten().tolist() for row in token_type_ids[:, 1:]]
id_sep = tokenizer.encode(tokenizer.ques_sep_token)[0]
def get_label_ids(i_sample: int, idxs_answ: List[int]) -> List[Tuple[int, List[int]]]:
"""
Prepare for input to `get_label_id`
:param i_sample: Index of sample as in `input_ids`
:param idxs_answ: Indices of the answer part
:return: Potentially breaks down the indices of list of labels into sublists, one for each label
"""
msk_sep: torch.Tensor = labels_[i_sample, idxs_answ] == id_sep
if torch.any(msk_sep):
idxs_sep = msk_sep.nonzero().flatten().tolist()
# filters out the sep token
idxs = [*idxs_sep, None]
lst_idxs_answ_ = [idxs_answ[:idxs_sep[0]]]
lst_idxs_answ_ += [idxs_answ[idx+1:idxs[i+1]] for i, idx in enumerate(idxs[:-1])]
return [(i_sample, idxs_answ_) for idxs_answ_ in lst_idxs_answ_]
else:
return [(i_sample, idxs_answ)]
def get_label_id(i_sample: int, idxs_answ: List[int]) -> Dict[str, int]:
"""
:return: classification label predicted & expected
.. note:: answer tokens should be present in each row/sample
"""
assert len(idxs_answ) # Should always exist, see `ZsGPT2Tokenizer.__call__`
token_ids_true = labels_[i_sample, idxs_answ].tolist() # Inputs are labels
# Remove answer special prefix token & potentially the ending token
if token_ids_true[0] == id_answ:
idxs_answ, token_ids_true = idxs_answ[1:], token_ids_true[1:]
assert len(token_ids_true) # Labels should always be available
if token_ids_true[-1] == id_eos:
idxs_answ, token_ids_true = idxs_answ[:-1], token_ids_true[:-1]
assert len(token_ids_true)
dset_id = dataset_id[i_sample].item()
dnm_ = sconfig('UTCD.dataset_id2name')[dset_id]
split = 'train' if mode == 'train' else 'test'
descs = sconfig(f'UTCD.datasets.{dnm_}.splits.{split}.labels')
desc_true = tokenizer.decode(token_ids_true)
assert desc_true in descs
# By default, the predictions and labels will not agree
d_lbs_ = dict(label_id_pred=-1, label_id_true=descs.index(desc_true)) # Local label wrt dataset
desc_pred = tokenizer.decode(preds[i_sample, idxs_answ])
if desc_pred in descs:
d_lbs_['label_id_pred'] = descs.index(desc_pred)
return d_lbs_
args = sum([get_label_ids(i_sample, idxs_answ) for i_sample, idxs_answ in enumerate(lst_idxs_answ)], start=[])
lst_idxs_n_lbs = [get_label_id(*a) for a in args]
d_lbs: Dict[str, List[int]] = {k_id: [d[k_id] for d in lst_idxs_n_lbs] for k_id in lst_idxs_n_lbs[0].keys()}
ids_pred, ids_true = d_lbs['label_id_pred'], d_lbs['label_id_true']
n_acc = sum(p == t for p, t in zip(ids_pred, ids_true)) # prediction ids match label ids
n_total = len(ids_true) # note multi-label means potentially more classification denominator than batch size
d_ret['cls_acc_meta'] = dict(n_acc=n_acc, n_total=n_total, ids_pred=ids_pred, ids_true=ids_true)
return d_ret
class MyTrainer(Trainer):
def __init__(
self, tokenizer: GPT2TokenizerFast = None, custom_logging=True,
disable_train_metrics: bool = True, compute_cls_acc: bool = False,
is_ddp: Union[bool, int] = False, with_tqdm: bool = True, **kwargs
):
super().__init__(**kwargs)
assert 'args' in kwargs
self.custom_logging = custom_logging
# Calling `get_accs` during training seems to reduce GPU util
self.disable_train_metrics = disable_train_metrics
self.compute_cls_acc = compute_cls_acc
self.is_ddp = is_ddp
self.with_tqdm = with_tqdm
self.tokenizer = tokenizer # TODO: generalize to more tokenizers?
self.mode = None
self.post_init()
# Sanity check for distributed training
print(f'Trainer instantiated with is_local_process_zero: {pl.i(self.is_local_process_zero())}')
self.logger = get_logger('GPT2 Trainer')
d_log = dict(
custom_logging=custom_logging, disable_train_metrics=disable_train_metrics,
compute_cls_acc=compute_cls_acc, is_ddp=is_ddp, with_tqdm=with_tqdm
)
self.logger.info(f'Trainer initialized w/ {pl.i(d_log)}')
def post_init(self):
callbacks = self.callback_handler.callbacks
self.callback_handler.callbacks = [ # Remove internal callback
c for c in callbacks if str(c.__class__) != "<class 'transformers.trainer_callback.PrinterCallback'>"
]
if self.custom_logging:
self.add_callback(MyLoggingCallback(self, do_eval=self.args.do_eval, is_ddp=self.is_ddp))
else:
self.add_callback(ColoredPrinterCallback())
if self.with_tqdm:
self.add_callback(MyProgressCallback())
def train(self, **kwargs):
self.mode = 'train'
return super().train(**kwargs)
def evaluate(self, **kwargs):
if not self.is_in_train:
self.mode = 'eval'
return super().evaluate(**kwargs)
def compute_loss(self, model, inputs, return_outputs=False):
"""
Override `Trainer.compute_loss` for logging accuracy
- Note that both training and validation calls `compute_loss`
=> Further logic needs to determine accuracy for which dataset
Modified from https://discuss.huggingface.co/t/metrics-for-training-set-in-trainer/2461/4?u=stefanh
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# ========================== Begin of added ==========================
inputs: Dict[str, torch.Tensor]
if self.custom_logging and model.training and 'labels' in inputs and (not self.disable_train_metrics):
d_log = get_accs(
inputs, outputs.logits.detach(), self.tokenizer, mode=self.mode, compute_cls_acc=self.compute_cls_acc
)
d_log['src'] = 'compute_loss'
self.log(d_log)
# ========================== End of added ==========================
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of
# ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
def prediction_step(self, model, inputs, prediction_loss_only: bool, ignore_keys=None):
"""
Override `Trainer.prediction_step` for reducing memory footprint
"""
# ========================== Begin of added =========================
from transformers.file_utils import is_sagemaker_mp_enabled
from transformers.trainer_pt_utils import nested_detach
if is_sagemaker_mp_enabled():
from transformers.trainer_pt_utils import smp_forward_only, smp_nested_concat
# ========================== End of added =========================
has_labels = all(inputs.get(k) is not None for k in self.label_names)
inputs = self._prepare_inputs(inputs)
if ignore_keys is None:
if hasattr(self.model, "config"):
ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", [])
else:
ignore_keys = []
# labels may be popped when computing the loss (label smoothing for instance) so we grab them first.
if has_labels:
labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))
if len(labels) == 1:
labels = labels[0]
else:
labels = None
with torch.no_grad():
if is_sagemaker_mp_enabled():
raw_outputs = smp_forward_only(model, inputs)
if has_labels:
if isinstance(raw_outputs, dict):
loss_mb = raw_outputs["loss"]
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"])
else:
loss_mb = raw_outputs[0]
logits_mb = raw_outputs[1:]
loss = loss_mb.reduce_mean().detach().cpu()
logits = smp_nested_concat(logits_mb)
else:
loss = None
if isinstance(raw_outputs, dict):
logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys)
else:
logits_mb = raw_outputs
logits = smp_nested_concat(logits_mb)
else:
if has_labels:
with self.autocast_smart_context_manager():
loss, outputs = self.compute_loss(model, inputs, return_outputs=True)
loss = loss.mean().detach()
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"])
else:
logits = outputs[1:]
else:
loss = None
with self.autocast_smart_context_manager():
outputs = model(**inputs)
if isinstance(outputs, dict):
logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)
else:
logits = outputs
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index - 1]
if prediction_loss_only:
return loss, None, None
logits = nested_detach(logits)
if len(logits) == 1:
logits = logits[0]
# ========================== Begin of added =========================
if not self.model.training and self.compute_cls_acc:
# Compute the labels right away,
# instead of potentially concatenating the original evaluation matrix of shape (#eval, #model size, #vocab)
# shape now is (#eval) cos for classification
# During training, the eval set has the same set of labels as the training set,
# which is the sole purpose of `mode`
d_acc = get_accs(inputs, logits, self.tokenizer, mode='train', compute_cls_acc=self.compute_cls_acc)
args = dict(dtype=labels.dtype, device=labels.device) # For DDP
# TODO: log token-level ACC too?
return (
loss,
torch.tensor(d_acc['cls_acc_meta']['ids_pred'], **args),
torch.tensor(d_acc['cls_acc_meta']['ids_true'], **args),
inputs['dataset_id'].detach()
)
else:
return loss, logits, labels, None
# ========================== End of added =========================
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
For sending `dataset_id` to evaluate
"""
# ========================== Begin of added =========================
from transformers.deepspeed import deepspeed_init
import collections
from transformers.trainer_utils import denumpify_detensorize
from torch.utils.data import IterableDataset
from transformers.trainer_pt_utils import (
find_batch_size, nested_concat, nested_numpify, nested_truncate, IterableDatasetShard
)
# ========================== End of added =========================
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
# ========================== Begin of added =========================
from transformers.utils import logging
logger = logging.get_logger(__name__)
# ========================== End of added =========================
logger.info(f"***** Running {description} *****")
if isinstance(dataloader.dataset, collections.abc.Sized):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device)
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
# ========================== Begin of added =========================
dataset_ids_host = None
# ========================== End of added =========================
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
# ========================== Begin of added =========================
all_dataset_ids = None
# ========================== End of added =========================
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels, dataset_ids = self.prediction_step(
model, inputs, prediction_loss_only, ignore_keys=ignore_keys
)
if is_torch_tpu_available():
xm.mark_step()
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)
if labels is not None:
labels = self._pad_across_processes(labels)
labels = self._nested_gather(labels)
labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)
# ========================== Begin of added =========================
if dataset_ids is not None:
dataset_ids = self._pad_across_processes(dataset_ids)
dataset_ids = self._nested_gather(dataset_ids)
dataset_ids_host = (
dataset_ids if dataset_ids_host is None
else nested_concat(dataset_ids_host, dataset_ids, padding_index=-100)
)
# ========================== End of added =========================
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = (
labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
)
# ========================== Begin of added =========================
if dataset_ids_host is not None:
dataset_ids = nested_numpify(dataset_ids_host)
all_dataset_ids = (
dataset_ids if all_dataset_ids is None
else nested_concat(all_dataset_ids, dataset_ids, padding_index=-100)
)
# ========================== End of added =========================
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host = None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if labels_host is not None:
labels = nested_numpify(labels_host)
all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100)
# ========================== Begin of added =========================
if dataset_ids_host is not None:
dataset_ids = nested_numpify(dataset_ids_host)
all_dataset_ids = (
dataset_ids if all_dataset_ids is None
else nested_concat(dataset_ids, all_dataset_ids, padding_index=-100)
)
# ========================== End of added =========================
# Number of samples
if not isinstance(eval_dataset, IterableDataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = nested_truncate(all_labels, num_samples)
# ========================== Begin of added =========================
if all_dataset_ids is not None:
all_dataset_ids = nested_truncate(all_dataset_ids, num_samples)
# ========================== End of added =========================
# Metrics!
if self.compute_metrics is not None and all_preds is not None and all_labels is not None:
# ========================== Begin of modified =========================
if self.is_local_process_zero():
mep = MyEvalPrediction(predictions=all_preds, label_ids=all_labels, dataset_ids=all_dataset_ids)
metrics = self.compute_metrics(mep)
else:
metrics = {}
# ========================== End of modified =========================
else:
metrics = {}
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = denumpify_detensorize(metrics)
if all_losses is not None:
metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item()
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/gpt2_train.py | gpt2_train.py |
import os
import math
import json
import configparser
from os.path import join as os_join
from typing import List, Tuple, Dict, Iterable, Optional
from zipfile import ZipFile
import numpy as np
import pandas as pd
import sklearn
from datasets import load_metric
import matplotlib.pyplot as plt
import gdown
from stefutil import *
from zeroshot_classifier.util.data_path import BASE_PATH, PROJ_DIR, DSET_DIR, PKG_NM, MODEL_DIR
__all__ = [
'in_domain_url', 'out_of_domain_url', 'in_domain_dir_nm', 'out_of_domain_dir_nm', 'download_data',
'sconfig', 'u', 'save_fig', 'plot_points',
'on_great_lakes', 'get_base_path',
'map_model_dir_nm', 'map_model_output_path', 'domain2eval_dir_nm', 'TrainStrategy2PairMap',
'eval_res2df', 'compute_metrics'
]
logger = get_logger('Util')
in_domain_url = 'https://drive.google.com/uc?id=1V7IzdZ9HQbFUQz9NzBDjmqYBdPd9Yfe3'
out_of_domain_url = 'https://drive.google.com/uc?id=1nd32_UrFbgoCgH4bDtFFD_YFZhzcts3x'
in_domain_dir_nm = 'in-domain'
out_of_domain_dir_nm = 'out-of-domain'
def download_data(domain: str = 'in'): # Needed for writing config
assert domain in ['in', 'out']
if domain == 'in':
fnm, url = f'{in_domain_dir_nm}.zip', in_domain_url
else: # `out`
fnm, url = f'{out_of_domain_dir_nm}.zip', out_of_domain_url
base_path = os.path.join(BASE_PATH, PROJ_DIR, DSET_DIR)
os.makedirs(base_path, exist_ok=True)
fl_path = os_join(base_path, fnm)
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
logger.info(f'Downloading {pl.i(domain_str)} data from GDrive to {pl.i(fl_path)}...')
gdown.download(url, fl_path, quiet=False)
with ZipFile(fl_path, 'r') as zfl:
zfl.extractall(base_path)
zfl.close()
def _download_all_data():
dset_path = os_join(BASE_PATH, PROJ_DIR, DSET_DIR)
path_in = os_join(dset_path, in_domain_dir_nm)
if not os.path.exists(path_in):
download_data(domain='in')
path_out = os_join(dset_path, out_of_domain_dir_nm)
if not os.path.exists(path_out):
download_data(domain='out')
_download_all_data() # Needed for writing config
config_path = os_join(BASE_PATH, PROJ_DIR, PKG_NM, 'util', 'config.json')
if not os.path.exists(config_path):
from zeroshot_classifier.util.config import ConfigDict
logger.info(f'Writing config file to {pl.i(config_path)}... ')
config_dict = ConfigDict(fast=True).d
with open(config_path, 'w') as f:
json.dump(config_dict, f, indent=4)
sconfig = StefConfig(config_file=config_path).__call__
u = StefUtil(
base_path=BASE_PATH, project_dir=PROJ_DIR, package_name=PKG_NM, dataset_dir=DSET_DIR, model_dir=MODEL_DIR
)
u.plot_path = os_join(BASE_PATH, PROJ_DIR, 'plot')
save_fig = u.save_fig
for _d in sconfig('check-arg'):
ca.cache_mismatch(**_d)
def plot_points(arr, **kwargs):
"""
:param arr: Array of 2d points to plot
:param kwargs: Arguments are forwarded to `matplotlib.axes.Axes.plot`
"""
arr = np.asarray(arr)
kwargs_ = dict(marker='.', lw=0.5, ms=1, c='orange')
kwargs = {**kwargs_, **kwargs} # python3.6 compatibility
plt.plot(arr[:, 0], arr[:, 1], **kwargs)
def on_great_lakes():
return 'arc-ts' in get_hostname()
def get_base_path():
# For remote machines, save heavy-duty data somewhere else to save `/home` disk space
hnm = get_hostname()
if 'clarity' in hnm: # Clarity lab
return '/data'
elif on_great_lakes(): # Great Lakes; `profmars0` picked arbitrarily among [`profmars0`, `profmars1`]
# Per https://arc.umich.edu/greatlakes/user-guide/
return os_join('/scratch', 'profmars_root', 'profmars0', 'stefanhg')
else:
return BASE_PATH
def config_parser2dict(conf: configparser.ConfigParser) -> Dict:
return {sec: dict(conf[sec]) for sec in conf.sections()}
def map_model_dir_nm(
model_name: str = None, name: str = None, mode: Optional[str] = 'vanilla',
sampling: Optional[str] = 'rand', normalize_aspect: bool = False
) -> str:
out = f'{now(for_path=True)}_{model_name}'
if name:
out = f'{out}_{name}'
d = dict()
if mode: # see config::training.strategies
nms = mode.split('-')
if len(nms) == 1:
d['md'] = mode[:3]
else:
nf, nl = nms[0], nms[-1]
d['md'] = f'{nf[:3]}-{nl[:3]}'
if sampling:
d['sp'] = sampling[0]
if normalize_aspect:
d['na'] = 'T'
if d:
out = f'{out}_{pl.pa(d)}'
return out
def map_model_output_path(
model_name: str = None, output_path: str = None, output_dir: str = None, mode: Optional[str] = 'vanilla',
sampling: Optional[str] = 'rand', normalize_aspect: bool = False
) -> str:
def _map(dir_nm_):
return map_model_dir_nm(model_name, dir_nm_, mode, sampling, normalize_aspect)
assert (output_path or output_dir) and not (output_path and output_dir) # sanity check mutually exclusive
if output_path:
paths = output_path.split(os.sep)
output_dir = _map(paths[-1])
return os_join(*paths[:-1], output_dir)
else:
dir_nm = _map(None)
if output_dir:
dir_nm = f'{dir_nm}_{output_dir}'
return os_join(get_base_path(), u.proj_dir, u.model_dir, dir_nm)
def domain2eval_dir_nm(domain: str = 'in'):
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
date = now(fmt='short-date')
return f'{date}_{domain_str}'
class TrainStrategy2PairMap:
sep_token = sconfig('training.implicit-on-text.encode-sep.aspect-sep-token')
aspect2aspect_token = sconfig('training.implicit-on-text.encode-aspect.aspect2aspect-token')
def __init__(self, train_strategy: str = 'vanilla'):
self.train_strategy = train_strategy
ca(training_strategy=train_strategy)
def __call__(self, aspect: str = None):
if self.train_strategy in ['vanilla', 'explicit']:
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[txt, lb] for lb in lbs]
elif self.train_strategy == 'implicit':
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[txt, f'{lb} {aspect}'] for lb in lbs]
elif self.train_strategy == 'implicit-on-text-encode-aspect':
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[f'{TrainStrategy2PairMap.aspect2aspect_token[aspect]} {txt}', lb] for lb in lbs]
else:
assert self.train_strategy == 'implicit-on-text-encode-sep'
def txt_n_lbs2query(txt: str, lbs: List[str]) -> List[List[str]]:
return [[f'{aspect} {TrainStrategy2PairMap.sep_token} {txt}', lb] for lb in lbs]
return txt_n_lbs2query
def map_label(self, label: str, aspect: str = None):
if self.train_strategy == 'implicit':
assert aspect is not None
return f'{label} {aspect}'
else:
return label
def map_text(self, text: str, aspect: str = None):
if self.train_strategy in ['implicit-on-text-encode-aspect', 'implicit-on-text-encode-sep']:
assert aspect is not None
if self.train_strategy == 'implicit-on-text-encode-aspect':
return f'{TrainStrategy2PairMap.aspect2aspect_token[aspect]} {text}'
else:
return f'{aspect} {TrainStrategy2PairMap.sep_token} {text}'
else:
return text
def eval_res2df(labels: Iterable, preds: Iterable, report_args: Dict = None, pretty: bool = True) -> Tuple[pd.DataFrame, float]:
report = sklearn.metrics.classification_report(labels, preds, **(report_args or dict()))
if 'accuracy' in report:
acc = report['accuracy']
else:
vals = [v for k, v in report['micro avg'].items() if k != 'support']
assert all(math.isclose(v, vals[0], abs_tol=1e-8) for v in vals)
acc = vals[0]
return pd.DataFrame(report).transpose(), round(acc, 3) if pretty else acc
def compute_metrics(eval_pred):
if not hasattr(compute_metrics, 'acc'):
compute_metrics.acc = load_metric('accuracy')
logits, labels = eval_pred
preds = np.argmax(logits, axis=-1)
return dict(acc=compute_metrics.acc.compute(predictions=preds, references=labels)['accuracy'])
if __name__ == '__main__':
from stefutil import *
def check_gl():
mic(on_great_lakes())
mic(get_base_path())
check_gl() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/util.py | util.py |
import os
import json
import math
import pickle
from os.path import join as os_join
from typing import List, Tuple, Dict, Iterable, Callable, Any, Union
from zipfile import ZipFile
from statistics import harmonic_mean
from collections import Counter, namedtuple, defaultdict
import numpy as np
import pandas as pd
import torch
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.manifold import TSNE
from datasets import Value, Features, ClassLabel, Sequence, Dataset, DatasetDict
from sentence_transformers import SentenceTransformer
import spacy
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.patches import Ellipse
from matplotlib.colors import to_rgba
from matplotlib.patheffects import withStroke
import seaborn as sns
from tqdm.auto import tqdm
import gdown
from stefutil import *
from zeroshot_classifier.util.util import *
from zeroshot_classifier.util import load_data
from zeroshot_classifier.util.data_path import BASE_PATH, PROJ_DIR, DSET_DIR
LOAD_TSNE = False
if LOAD_TSNE and torch.cuda.is_available():
from tsnecuda import TSNE as cuTSNE
logger = get_logger('UTCD')
EOT_TOKEN = '[eot]' # end of turn token for sgd
def get_utcd_from_gdrive(domain: str = 'in'):
ca(dataset_domain=domain)
path = os_join(u.proj_path, u.dset_dir, 'UTCD')
os.makedirs(path, exist_ok=True)
if domain == 'in':
url = 'https://drive.google.com/uc?id=1V7IzdZ9HQbFUQz9NzBDjmqYBdPd9Yfe3'
fnm = os_join(path, 'in-domain')
else:
url = 'https://drive.google.com/uc?id=1nd32_UrFbgoCgH4bDtFFD_YFZhzcts3x'
fnm = os_join(path, 'out-of-domain')
fnm = f'{fnm}.zip'
logger.info(f'Downloading from GDrive url {pl.i(url)} to {pl.i(fnm)}... ')
gdown.download(url=url, output=fnm, quiet=False)
logger.info(f'Extracting {pl.i(fnm)} to {pl.i(path)}... ')
with ZipFile(fnm, 'r') as zip_:
zip_.extractall(path)
zip_.close()
def dataset2hf_dataset(
dataset: load_data.Dataset = None, labels: List[str] = None, multi_label: bool = False
) -> Dataset:
# Map to **local** integer labels; index is label per `lbs_` ordering, same with `datasets.ClassLabel`
lb2id = {lb: i for i, lb in enumerate(labels)}
# if not multi-label, `Sequence` of single element
df = pd.DataFrame([dict(text=txt, labels=[lb2id[lb] for lb in lbs]) for txt, lbs in dataset.items()])
length = -1 if multi_label else 1
lbs = Sequence(feature=ClassLabel(names=labels), length=length)
feats = Features(text=Value(dtype='string'), labels=lbs)
return Dataset.from_pandas(df, features=feats)
def subsample_dataset(dataset_name: str = None, split: str = 'train', n_tgt: int = 5000, seed: int = None) -> Dataset:
d = sconfig(f'UTCD.datasets.{dataset_name}')
domain = d['domain']
dset = load_data.get_datasets(domain=domain, dataset_names=dataset_name)[dataset_name][split]
dset: Dict[str, List[str]] # text => list of labels
d = get(d, f'splits.{split}')
dset = load_data.subsample_dataset(dataset=dset, n_src=d['n_pair'], n_tgt=n_tgt, seed=seed)
return dataset2hf_dataset(dataset=dset, labels=d['labels'], multi_label=d['multi_label'])
def process_utcd_dataset(domain: str = 'in', join=False):
"""
:param domain: One of [`in`, `out`]
If 'in', process all the in-domain datasets; otherwise, process all the out-of-domain datasets
:param join: If true, all datasets are joined to a single dataset
.. note::
1. The original dataset format is dictionary mapping text to list of label
2. the datasets are processed to a multi-label format always
Save processed datasets to disk
"""
ca(dataset_domain=domain)
output_dir = 'UTCD-in' if domain == 'in' else 'UTCD-out'
path_dsets = os_join(u.proj_path, u.dset_dir)
domain_str = 'in-domain' if domain == 'in' else 'out-of-domain'
if not os.path.exists(os_join(path_dsets, 'UTCD', domain_str)):
get_utcd_from_gdrive(domain=domain)
path_out = os_join(get_base_path(), PROJ_DIR, DSET_DIR, 'processed')
logger.info(f'Processing UTCD datasets with {pl.i(dict(domain=domain, join=join))}... ')
def path2dsets(dnm: str, d_dset: Dict) -> Union[DatasetDict, Dict[str, pd.DataFrame]]:
logger.info(f'Processing dataset {pl.i(dnm)}... ')
path_ = d_dset['path']
path_ = os_join(path_dsets, f'{path_}.json')
with open(path_) as f:
dsets_: Dict = json.load(f)
def json2dset(split: str, dset: load_data.Dataset) -> Union[Dataset, pd.DataFrame]:
assert split in ['train', 'test']
if join: # will convert to global integers later, see below
return pd.DataFrame([dict(text=txt, labels=lbs) for txt, lbs in dset.items()])
else:
d = sconfig(f'UTCD.datasets.{dnm}.splits.{split}')
return dataset2hf_dataset(dataset=dset, labels=d['labels'], multi_label=d['multi_label'])
return DatasetDict(
{key: json2dset(key, dset) for key, dset in dsets_.items() if key not in ['labels', 'aspect']}
)
d_dsets = {
dnm: path2dsets(dnm, d) for dnm, d in sconfig('UTCD.datasets').items() if d['domain'] == domain
}
if join:
dnm2id = sconfig('UTCD.dataset_name2id')
# Global label across all datasets, all splits
# Needed for inversely mapping to local label regardless of joined split, e.g. train/test,
# in case some label only in certain split
lbs_global = [
sconfig(f'UTCD.datasets.{dnm}.splits.{split}.labels')
for dnm in d_dsets.keys() for split in ['train', 'test']
]
lbs_global = sorted(set().union(*lbs_global))
lb2id_global = {lb: i for i, lb in enumerate(lbs_global)}
# cos definitely multi-label
lbs_global = Sequence(feature=ClassLabel(names=lbs_global), length=-1)
def map_labels(lbs: List[str]) -> List[int]:
return [lb2id_global[lb] for lb in lbs]
def prep_single(dnm: str, df_: pd.DataFrame) -> pd.DataFrame:
df_['dataset_id'] = [dnm2id[dnm]] * len(df_) # Add dataset source information to each row
df_.labels = df_.labels.apply(map_labels)
return df_
def dfs2dset(dfs: Iterable[pd.DataFrame]) -> Dataset:
df = pd.concat(dfs)
# The string labels **may overlap** across the datasets
# Keep internal feature label ordering same as dataset id
lbs_dset = sorted(dnm2id, key=dnm2id.get)
features = Features(text=Value(dtype='string'), labels=lbs_global, dataset_id=ClassLabel(names=lbs_dset))
return Dataset.from_pandas(df, features=features)
tr = dfs2dset([prep_single(dnm, dsets['train']) for dnm, dsets in d_dsets.items()])
vl = dfs2dset([prep_single(dnm, dsets['test']) for dnm, dsets in d_dsets.items()])
dsets = DatasetDict(train=tr, test=vl)
path = os_join(path_out, output_dir)
dsets.save_to_disk(path)
logger.info(f'{pl.i("Joined")} Dataset saved to {pl.i(path)} ')
else:
for dnm, dsets in d_dsets.items():
dsets.save_to_disk(os_join(path_out, dnm))
logger.info(f'Dataset {pl.i(dnm)} saved to {pl.i(path_out)}')
def map_ag_news():
dnm = 'ag_news'
d_dset = sconfig(f'UTCD.datasets.{dnm}')
ext = sconfig('UTCD.dataset_ext')
path_dset = os_join(BASE_PATH, PROJ_DIR, DSET_DIR)
path = d_dset['path']
path = os_join(path_dset, f'{path}.{ext}')
with open(path) as f:
dsets: Dict = json.load(f)
d_lb2desc = sconfig(f'baselines.gpt2-nvidia.label-descriptors.{dnm}')
for split, dset in dsets.items():
dsets[split] = [[txt, d_lb2desc[lb]] for txt, lb in dset]
with open(os_join(path_dset, f'{dnm}.json'), 'w') as f:
json.dump(dsets, f, indent=4)
def get_utcd_info() -> pd.DataFrame:
"""
Metadata about each dataset in UTCD
"""
k_avg_tok = [f'{mode}-{text_type}_avg_tokens' for text_type in ['txt', 'lb'] for mode in ['re', 'bert', 'gpt2']]
infos = [
dict(dataset_name=dnm, aspect=d_dset['aspect'], domain=d_dset['domain'])
| {f'{split}-{k}': v for split, d_info in d_dset['splits'].items() for k, v in d_info.items()}
| {k: d_dset[k] for k in k_avg_tok}
for dnm, d_dset in sconfig('UTCD.datasets').items()
]
return pd.DataFrame(infos)
def get_dataset_names(domain: str = 'in'):
return [dnm for dnm, d_dset in sconfig('UTCD.datasets').items() if d_dset['domain'] == domain]
def get_eval_dataset_names(domain: str = 'in', dataset_name: str = 'all') -> List[str]:
all_dset = dataset_name == 'all'
if not all_dset:
_dom = sconfig(f'UTCD.datasets.{dataset_name}.domain')
if domain is not None:
domain = _dom
else:
assert domain == _dom
return get_dataset_names(domain) if all_dset else [dataset_name]
UtcdDatasetNames = namedtuple('UtcdDatasetNames', ['in_domain', 'out_of_domain'])
def _get_utcd_dnms() -> UtcdDatasetNames:
return UtcdDatasetNames(in_domain=get_dataset_names('in'), out_of_domain=get_dataset_names('out'))
def get_dataset(dnm: str, split: str) -> Dict[str, List[str]]:
d = sconfig(f'UTCD.datasets.{dnm}')
path = os_join(BASE_PATH, PROJ_DIR, DSET_DIR, f'{d["path"]}.json')
with open(path) as fl:
return json.load(fl)[split]
def get_add_special_tokens_args(tokenizer, train_strategy: str = 'vanilla') -> Dict:
"""
:return: If need to modify tokenizer & model, `Tokenizer.add_special_tokens` arg; otherwise None
"""
ca(training_strategy=train_strategy)
modify = False
# Include `end-of-turn` token for sgd, cannot set `eos` for '<|endoftext|>' already defined in GPT2
# for consistency with BERT tokenizer, not override `eos` for BERT too
add_spec_toks = []
if train_strategy == 'explicit': # sanity check, SGD EOT should be added already
added_vocab = tokenizer.get_added_vocab()
assert list(added_vocab.keys()) == [EOT_TOKEN]
else:
add_spec_toks.append(EOT_TOKEN)
modify = True
if train_strategy == 'implicit-on-text-encode-aspect':
add_spec_toks += list(sconfig('training.implicit-on-text.encode-aspect.aspect2aspect-token').values())
elif train_strategy == 'implicit-on-text-encode-sep':
add_spec_toks += [sconfig('training.implicit-on-text.encode-sep.aspect-sep-token')]
spec_tok_args = dict()
if add_spec_toks:
spec_tok_args['additional_special_tokens'] = add_spec_toks
modify = True
if modify:
return spec_tok_args
class VisualizeOverlap:
path_dset = os_join(BASE_PATH, PROJ_DIR, DSET_DIR)
in_dnms, out_dnms = _get_utcd_dnms()
# for in-domain, the training split, for out-of-domain, the test split
dnm2n_txt = {dnm: sconfig(f'UTCD.datasets.{dnm}.splits.train.n_text') for dnm in in_dnms}
dnm2n_txt.update({dnm: sconfig(f'UTCD.datasets.{dnm}.splits.test.n_text') for dnm in out_dnms})
def __init__(self):
pass
@staticmethod
def dnm2samples_n_total(dnm: str, kind: str, split: str) -> Tuple[Union[Iterable[str], List[str]], int]:
if kind == 'label':
it = sconfig(f'UTCD.datasets.{dnm}.splits.{split}.labels')
return it, len(it)
else: # text
d = sconfig(f'UTCD.datasets.{dnm}')
path = os_join(VisualizeOverlap.path_dset, f'{d["path"]}.json')
with open(path) as fl:
return json.load(fl)[split].keys(), VisualizeOverlap.dnm2n_txt[dnm]
@staticmethod
def get_utcd_overlap(
kind: str = 'label', metric: str = 'harmonic', stat='tfidf', stat_args: Dict = None,
weighted_average: bool = True
) -> pd.DataFrame:
"""
A normalized score for overlap, between each out-of-domain dataset,
with each in-domain datasets and aggregated across all in-domain datasets
Intended to get a sense of performance over overlap
"""
ca.check_mismatch('Sample Type', kind, ['label', 'text'])
ca.check_mismatch('Overlap Metric', metric, ['harmonic', 'absolute'])
ca.check_mismatch('Word Statistics', stat, ['count', 'tfidf'])
logger.info(f'Getting UTCD Overlap for {pl.i(kind=kind, metric=metric, stat=stat, stat_args=stat_args)}')
if stat == 'tfidf':
def tokenize(pbar) -> Callable:
def _tokenize(txt: str) -> List[str]:
lst = [tok.lemma_ for tok in nlp(txt) if not tok.is_stop]
pbar.update(1)
return lst
return _tokenize
# TODO: tweak?
stat_args: Dict[str, Any] = stat_args if stat_args is not None else dict(max_df=0.8, min_df=3)
assert 'token_pattern' not in stat_args and 'tokenizer' not in stat_args
stat_args['token_pattern'] = None
elif stat_args is not None:
raise NotImplementedError(f'{pl.i("stat_args")} supported for {pl.i("tfidf")} only')
nlp = spacy.load('en_core_web_sm')
nlp.max_length *= 10 # for `multi_eurlex`
def _dnm2lemma_count(dnm_: str, split: str) -> Union[Counter, TfidfVectorizer]:
it, total = VisualizeOverlap.dnm2samples_n_total(dnm_, kind, split)
in_domain = dnm_ in VisualizeOverlap.in_dnms
domain_str = 'in-domain' if in_domain else 'out-of-domain'
split = 'train' if in_domain else 'test'
pbar_args = dict(desc=f'Lemmatizing {domain_str} {dnm_} {split}', unit='sample', total=total, miniters=64)
if stat == 'count':
c = Counter()
for s in tqdm(it, **pbar_args):
# TODO: 1) `&` isn't a stop word? 2) lowercase everything? 3) remove characters?
c.update(tok.lemma_ for tok in nlp(s) if not tok.is_stop)
return c
else: # tfidf
pbar = tqdm(**pbar_args)
stat_args['tokenizer'] = tokenize(pbar)
v = TfidfVectorizer(**stat_args)
v.fit(it)
pbar.close()
return v
dnm2lemma_count = dict()
in_dnms, out_dnms = VisualizeOverlap.in_dnms, VisualizeOverlap.out_dnms
for dnm in in_dnms:
dnm2lemma_count[dnm] = _dnm2lemma_count(dnm, 'train')
logger.info(f'Lemmatizing {pl.i("in-domain")} dataset {pl.i(dnm)}, {pl.i("train")} split')
for dnm in out_dnms:
dnm2lemma_count[dnm] = _dnm2lemma_count(dnm, 'test')
logger.info(f'Lemmatizing {pl.i("out-of-domain")} dataset {pl.i(dnm)}, {pl.i("test")} split')
lst_rows = []
# See below, weighted by #samples for each in-domain dataset; TODO: weight also by label support?
in_dnm2n_pr = {dnm: sconfig(f'UTCD.datasets.{dnm}.splits.train.n_pair') for dnm in in_dnms}
for dnm_out in out_dnms:
d_row = dict()
for dnm_in in in_dnms:
if stat == 'count':
c_in: Counter = dnm2lemma_count[dnm_in]
c_out: Counter = dnm2lemma_count[dnm_out]
inter = set(c_in) & set(c_out)
n_inter_in, n_in = sum(c_in[i] for i in inter), sum(c_in.values())
n_inter_out, n_out = sum(c_out[i] for i in inter), sum(c_out.values())
else: # tfidf
v_in: TfidfVectorizer = dnm2lemma_count[dnm_in]
v_out: TfidfVectorizer = dnm2lemma_count[dnm_out]
inter = set(v_in.get_feature_names_out()) & set(v_out.get_feature_names_out())
idxs_in, idxs_out = [v_in.vocabulary_[i] for i in inter], [v_out.vocabulary_[i] for i in inter]
n_inter_in, n_in = v_in.idf_[idxs_in].sum(), v_in.idf_.sum()
n_inter_out, n_out = v_out.idf_[idxs_out].sum(), v_out.idf_.sum()
# Considers the count for both datasets; also ensure in range [0, 1]
if metric == 'harmonic':
d_row[dnm_in] = harmonic_mean([n_inter_in / n_in, n_inter_out / n_out])
else:
assert metric == 'absolute'
d_row[dnm_in] = (n_inter_in + n_inter_out) / (n_in + n_out)
dnms, vals = zip(*d_row.items())
d_row['average'] = np.mean(vals)
if weighted_average:
d_row['weighted_average'] = np.average(vals, weights=[in_dnm2n_pr[dnm] for dnm in dnms])
d_row['dataset_name'] = dnm_out
lst_rows.append(d_row)
return pd.DataFrame(lst_rows).set_index('dataset_name')
@staticmethod
def plot_utcd_overlap(
kind: str = 'label', save: bool = False, title: str = None,
get_overlap_args: Dict = None, fig_args: Dict = None, cbar_ax: bool = True
) -> None:
d_dset = sconfig('UTCD.datasets')
def dnm2dnm_print(dnm: str) -> str:
if dnm in d_dset:
# words = dnm.split('_')
# return '\n'.join(w.capitalize() for w in words)
return sconfig(f'UTCD.datasets.{dnm}.name_compact')
else:
words = dnm.split('_')
return '\n'.join(rf'$\it{{{wd}}}$' for wd in words)
df = VisualizeOverlap.get_utcd_overlap(kind=kind, **(get_overlap_args or dict()))
df *= 100
df.rename(lambda s: dnm2dnm_print(s), axis=1, inplace=True)
df.rename(lambda s: dnm2dnm_print(s), axis=0, inplace=True)
_fig_args = dict(nrows=1, ncols=2 if cbar_ax else 1, figsize=(10 + 0.25, 8))
if fig_args:
_fig_args.update(fig_args)
if cbar_ax:
if 'gridspec_kw' not in _fig_args:
w, h = _fig_args['figsize']
_fig_args['gridspec_kw'] = dict(width_ratios=[w-0.25, 0.25])
fig, (ax, ax_cbar) = plt.subplots(**_fig_args)
else:
assert 'gridspec_kw' not in _fig_args
(fig, ax), ax_cbar = plt.subplots(**_fig_args), None
ax = sns.heatmap(df, annot=True, cmap='mako', fmt='.1f', square=True, ax=ax, cbar_ax=ax_cbar)
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
ax.tick_params(axis='y', labelrotation=0)
ax.tick_params(axis='x', top=False) # hide tick marks
title_ = f'Out-of-domain eval datasets {kind.capitalize()} overlap against In-domain training datasets'
if title == 'none':
title = title_ # for filename export
else:
title = title or title_
plt.suptitle(title)
ax.set_xlabel('In-domain dataset', fontsize=12, labelpad=10)
ax.set_ylabel('Out-of-domain dataset', fontsize=12)
if cbar_ax:
ax_cbar.set_ylabel('Overlap Score (%)')
if save:
# see `get_utcd_overlap`
mt_, st_ = get_overlap_args.get('metric', 'harmonic'), get_overlap_args.get('stat', 'tfidf')
mt_ = 'harm' if mt_ == 'harmonic' else 'abs'
st_ = 'ti' if st_ == 'tfidf' else 'ct'
save_fig(f'{title}, mt={mt_}, st={st_}')
else:
plt.show()
@staticmethod
def get_utcd_embeddings(
kind: str = 'label', aspect: str = None, batch_size: int = 16, cache: str = None
) -> Dict[str, np.ndarray]:
"""
Plot sample embeddings in lower dimension
and hopefully the overlap between each dataset cluster lines up with performance
"""
def _get():
return VisualizeOverlap._get_utcd_embeddings(kind=kind, aspect=aspect, batch_size=batch_size)
if cache:
fnm = f'{cache}.pkl'
path = os_join(BASE_PATH, PROJ_DIR, 'cache')
os.makedirs(path, exist_ok=True)
path = os_join(path, fnm)
if os.path.exists(path):
with open(path, 'rb') as f:
return pickle.load(f)
else:
d = _get()
with open(path, 'wb') as f:
pickle.dump(d, f)
return d
else:
return _get()
@staticmethod
def _get_utcd_embeddings(kind, aspect, batch_size):
# per SBert package, the one with the highest quality
model = SentenceTransformer('all-mpnet-base-v2', device='cuda' if torch.cuda.is_available() else 'cpu')
in_dnms, out_dnms = VisualizeOverlap.in_dnms, VisualizeOverlap.out_dnms
ret = dict()
dnms = in_dnms + out_dnms
if aspect is not None:
dnms = [dnm for dnm in dnms if sconfig(f'UTCD.datasets.{dnm}.aspect') == aspect]
for dnm in dnms:
split = 'train' if dnm in in_dnms else 'test'
it, total = VisualizeOverlap.dnm2samples_n_total(dnm, kind, split)
total = math.ceil(total/batch_size)
desc = f'Encoding {dnm:>21} {kind:>5} {split:>5}'
vects = np.empty(total, dtype=object)
for i, sents in enumerate(tqdm(group_n(it, batch_size), total=total, desc=desc, unit='ba')):
vects[i] = model.encode(sents, batch_size=batch_size)
ret[dnm] = np.concatenate(vects)
return ret
@staticmethod
def plot_utcd_embeddings(
kind: str = 'label', save=False, aspect: str = None, cs: List = None, mode: str = 'sklearn',
n_sample: int = None,
**kwargs
):
"""
:param kind: Encode either text or label
:param save: If true, plot is saved
:param aspect: If given, plot only one aspect
:param cs: A list of colors for each cluster
:param mode: t-SNE mode, one of ['sklearn', 'cuda']
:param n_sample: If given, plot a subset of each dataset randomly
:param n_sample: If given, plot a subset of each dataset randomly
"""
from adjustText import adjust_text
ca.check_mismatch('Sample Type', kind, ['label', 'text'])
ca.check_mismatch('t-SNE Mode', mode, ['sklearn', 'cuda'])
if aspect is not None:
ca.check_mismatch('Dataset Aspect', aspect, ['sentiment', 'intent', 'topic'])
d_log = dict(kind=kind, aspect=aspect, mode=mode)
logger.info(f'Plotting embeddings on {pl.i(d_log)}... ')
d_vect = VisualizeOverlap.get_utcd_embeddings(kind=kind, aspect=aspect, **kwargs)
if n_sample:
def _get_sample(dnm):
idxs = np.random.permutation(len(d_vect[dnm]))[:n_sample]
return d_vect[dnm][idxs]
d_vect = {dnm: _get_sample(dnm) for dnm in d_vect}
dnms = VisualizeOverlap.in_dnms + VisualizeOverlap.out_dnms
if aspect is not None:
dnms = [dnm for dnm in dnms if sconfig(f'UTCD.datasets.{dnm}.aspect') == aspect]
vect = np.concatenate([d_vect[dnm] for dnm in dnms])
# TODO or `random` init?
args = dict(
n_components=2, perplexity=50,
# learning_rate='auto', # TODO: causes numpy error???
learning_rate=1000,
random_state=sconfig('random-seed')
)
if mode == 'sklearn':
cls = TSNE
args['init'] = 'pca'
else:
cls = cuTSNE
args['init'] = 'random'
del args['random_state']
logger.info(f'Running t-SNE on {pl.i(len(vect))} vectors with args {pl.i(args)}... ')
mapped = cls(**args).fit_transform(vect)
logger.info('Plotting... ')
k_dnm = 'dataset_name'
df = pd.DataFrame(list(chain_its([dnm] * len(d_vect[dnm]) for dnm in dnms)), columns=[k_dnm])
df['x'] = mapped[:, 0]
df['y'] = mapped[:, 1]
aspect2domain2dset = defaultdict(lambda: defaultdict(list))
for dnm, d_dset in sconfig('UTCD.datasets').items():
aspect2domain2dset[d_dset['aspect']][d_dset['domain']].append(dnm)
if not cs:
n_gap = 6
n_aspect, n_dset_per_aspect = sconfig('UTCD.num_aspect'), sconfig('UTCD.num_dataset_per_aspect')
if aspect is not None:
n_aspect = 1
cs = sns.color_palette('husl', n_colors=n_aspect * (n_dset_per_aspect+n_gap))
cs = cs[:n_dset_per_aspect] + cs[n_dset_per_aspect+n_gap:n_dset_per_aspect*2+n_gap] + \
cs[n_dset_per_aspect*2+n_gap*2:-n_gap]
dnms = [] # update order for color-coding
for i_as, aspect_ in enumerate(aspect2domain2dset.keys()):
if aspect is not None and aspect_ != aspect:
continue
for i_dm, (domain, dnms_) in enumerate(aspect2domain2dset[aspect_].items()):
for i_dset, dnm in enumerate(dnms_):
dnms.append(dnm)
df_col2cat_col(df, k_dnm, categories=dnms) # enforce legend order
dnm2count = {k: len(v) for k, v in d_vect.items()}
n_sample = sum(dnm2count.values()) # now, all datasets combined
fig_w, fig_h = 10, 12
ms = max(min(fig_w * fig_h * 128/n_sample, 192), 16)
dnm2ms = {dnm: 1/math.log(c) * ms for dnm, c in dnm2count.items()}
fig = plt.figure(figsize=(fig_w, fig_h), constrained_layout=False)
ax = sns.scatterplot(data=df, x='x', y='y', hue=k_dnm, palette=cs, size=k_dnm, sizes=dnm2ms, alpha=0.3)
def confidence_ellipse(xs_, ys_, n_std=1., **kws):
"""
Modified from https://matplotlib.org/stable/gallery/statistics/confidence_ellipse.html
Create a plot of the covariance confidence ellipse of x and y
:param xs_: x values
:param ys_: y values
:param n_std: number of standard deviations to determine the ellipse's radius'
:return matplotlib.patches.Ellipse
"""
cov = np.cov(xs_, ys_)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
r_x, r_y = np.sqrt(1 + pearson), np.sqrt(1 - pearson)
_args = {**dict(fc='none'), **kws}
ellipse = Ellipse((0, 0), width=r_x*2, height=r_y*2, **_args)
scl_x, scl_y = np.sqrt(cov[0, 0]) * n_std, np.sqrt(cov[1, 1]) * n_std
mu_x, mu_y = np.mean(xs_), np.mean(ys_)
tsf = transforms.Affine2D().rotate_deg(45).scale(scl_x, scl_y).translate(mu_x, mu_y)
ellipse.set_transform(tsf + ax.transData)
return ax.add_patch(ellipse)
txt_locs, dnm2pa = [], dict()
for dnm, c in zip(dnms, cs):
xs, ys = df[df[k_dnm] == dnm]['x'].values, df[df[k_dnm] == dnm]['y'].values
dnm2pa[dnm] = confidence_ellipse(xs, ys, n_std=1, fc=to_rgba(c, 0.1), ec=to_rgba(c, 0.6))
inv_tsf = ax.transData.inverted()
txts = []
for dnm, c in zip(dnms, cs):
xs, ys = df[df[k_dnm] == dnm]['x'].values, df[df[k_dnm] == dnm]['y'].values
pa = dnm2pa[dnm]
verts = pa.get_transform().transform_path(pa.get_path()).vertices
verts = inv_tsf.transform(verts) # this is needed to get the vertices properly
def close_to_added(x_, y_, threshold=1):
for x__, y__ in txt_locs:
if np.sqrt((x_ - x__) ** 2 + (y_ - y__) ** 2) < threshold:
return True
return False
def in_other_ellipse(x_, y_):
other_dnms = [dnm_ for dnm_ in dnms if dnm_ != dnm]
for dnm_ in other_dnms:
pa_ = dnm2pa[dnm_]
path = pa_.get_transform().transform_path(pa_.get_path())
if inv_tsf.transform_path(path).contains_point((x_, y_)):
return True
return False
x, y, coord_found = None, None, False # find a working coordinate to add the text
verts = np.random.permutation(verts)
for x, y in verts:
if not close_to_added(x, y, threshold=3) and not in_other_ellipse(x, y):
coord_found = True
break
if not coord_found:
verts = np.random.permutation(verts)
for x, y in verts:
if not close_to_added(x, y):
coord_found = True
break
if not coord_found:
x, y = np.mean(xs), np.mean(ys)
txt_locs.append((x, y))
txts.append(plt.text(x=x, y=y, s=dnm.replace('_', ' '), c=c, ha='center', va='center'))
adjust_text(txts)
for txt in txts: # add border-color
txt.set_path_effects([withStroke(linewidth=1, foreground='w')])
def map_label(dnm: str) -> str:
_d_dset = sconfig(f'UTCD.datasets.{dnm}')
dm = _d_dset['domain']
asp = _d_dset['aspect']
dnm = dnm.replace('_', ' ')
dm = rf'$\it{{{dm}}}$'
asp = rf'$\it{{{asp}}}$'
return f'{asp}::{dm}::{dnm}'
ax.set_aspect('equal')
ax.set_xlabel(None)
ax.set_ylabel(None)
title = f'UTCD dataset Embedded {kind.capitalize()} t-SNE scatter plot'
if aspect:
title = f'{title} on {aspect.capitalize()}'
plt.suptitle(title)
lgd = ax.get_legend() # need to have the seaborn legend added first
lgd.remove()
lgd = fig.legend(title=k_dnm.replace('_', ' '), loc='lower center', bbox_transform=fig.transFigure, ncol=3)
for t in lgd.get_texts():
t.set_text(map_label(t.get_text()))
legend_v_ratio = 0.15
plt.subplots_adjust(bottom=legend_v_ratio)
plt.tight_layout(rect=[0, legend_v_ratio, 1, 1])
if save:
title = f'{title}, md={mode}'
if n_sample:
title = f'{title}, n={n_sample}'
save_fig(title)
else:
plt.show()
if __name__ == '__main__':
from datasets import load_from_disk
mic.output_width = 256
np.random.seed(sconfig('random-seed'))
def sanity_check(dsets_nm):
path = os_join(get_base_path(), PROJ_DIR, DSET_DIR, 'processed', dsets_nm)
mic(path)
dset = load_from_disk(path)
te, vl = dset['train'], dset['test']
mic(len(te), len(vl))
lbs = vl.features['labels'].feature
mic(lbs)
mic(vl[60])
mic(lbs.int2str(154))
# sanity_check('UTCD-in')
def get_utcd_in():
process_utcd_dataset(domain='in', join=False)
# sanity_check('UTCD-in')
get_utcd_in()
# get_utcd_from_gdrive(domain='out')
def get_utcd_out():
process_utcd_dataset(domain='out', join=False)
sanity_check('UTCD-out')
# get_utcd_out()
def output_utcd_info():
df = get_utcd_info()
mic(df)
df.to_csv(os_join(u.base_path, u.proj_dir, u.dset_dir, 'utcd-info.csv'), float_format='%.3f')
# output_utcd_info()
vs = VisualizeOverlap()
def plot_token_overlap():
# mic(get_utcd_overlap())
kd = 'label'
# kd = 'text'
# st = 'count'
st = 'tfidf'
args = dict()
if kd == 'text':
args = None
# sv = False
sv = True
vs.plot_utcd_overlap(
kind=kd, save=sv, title='none',
get_overlap_args=dict(stat=st, stat_args=args, weighted_average=False),
fig_args=dict(
figsize=(11, 7.5),
# gridspec_kw=dict(width_ratios=[9, 0.5])
),
cbar_ax=False
)
# vs.profile_runtime(lambda: get_utcd_overlap(kind=kd))
# plot_token_overlap()
def plot_encoded_overlap():
kd = 'text'
# kd = 'label'
# mic(vs.get_utcd_embeddings(kind=kd))
# sv = False
sv = True
cnm = f'{kd} embedding cache'
# cs = None
# cs = sns.color_palette('husl', n_colors=18)
# cs = sns.color_palette('hls', n_colors=18)
cs = sns.color_palette(n_colors=18)
md = 'cuda'
# TODO: running on all data & some # subset of data gives CUDA error???
# n = None
n = 3072 * 32
vs.plot_utcd_embeddings(kind=kd, cs=cs, save=sv, cache=cnm, batch_size=1024, mode=md, n_sample=n)
# plot_encoded_overlap()
def plot_encoded_overlap_aspect():
kd = 'label'
# sv = False
sv = True
cnm = f'{kd} embedding cache'
# aspect = None
# aspect = 'topic'
# aspect = 'intent'
# aspect = 'sentiment'
for aspect in sconfig('UTCD.aspects'):
vs.plot_utcd_embeddings(kind=kd, aspect=aspect, save=sv, cache=cnm)
# plot_encoded_overlap_aspect() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/utcd.py | utcd.py |
import os
import csv
import json
import time
import gzip
import random
import itertools
from os import listdir
from os.path import isfile, join as os_join, basename
from collections import Counter, defaultdict
from typing import List, Tuple, Set, Dict, Union
import numpy as np
from numpy import argmax, argmin
import spacy
from sentence_transformers.readers import InputExample
from sentence_transformers import util
from tqdm import tqdm
from stefutil import *
from zeroshot_classifier.util import *
__all__ = [
'Dataset', 'SplitDataset',
'get_datasets', 'to_aspect_normalized_datasets',
'nli_template', 'get_nli_data', 'binary_cls_format', 'nli_cls_format', 'encoder_cls_format', 'seq_cls_format',
'binary_explicit_format'
]
logger = get_logger('Load Data')
ASPECT_NORM_DIRNM = 'aspect-normalized'
def _get_nlp():
if not hasattr(_get_nlp, 'nlp'):
_get_nlp.nlp = spacy.load("en_core_web_md")
_get_nlp.nlp.disable_pipes(['tagger', 'parser', 'attribute_ruler', 'lemmatizer', 'ner'])
return _get_nlp.nlp
category_map = {
"ag_news": "category",
"clinc_150": "intent",
"dbpedia": "category",
"emotion": "sentiment",
"sentiment_tweets_2020": "sentiment",
"go_emotion": "sentiment",
"sgd": "intent",
"slurp": "intent",
"yahoo": "category",
"amazon_polarity": "sentiment",
"multi_eurlex": "category",
"banking77": "intent",
"consumer_finance": "category",
"finance_sentiment": "sentiment",
"nlu_evaluation": "intent",
"patent": "category",
"snips": "intent",
"yelp": "sentiment",
}
Dataset = Dict[str, List[str]] # text => labels associated with that text
SplitDataset = Dict[str, Union[Dataset, List[str], str]] # train & test splits + metadata including labels & aspect
def get_datasets(
domain: str = 'in', n_sample: int = None, normalize_aspect: Union[bool, int] = False,
dataset_names: Union[str, List[str]] = None
) -> [str, SplitDataset]:
"""
:param n_sample: If given, a random sample of the entire dataset is selected
Intended for debugging
:param normalize_aspect: If true, # of training samples for each aspect is normalized
via subsampling datasets in the larger aspect
If int given, used as seed for sampling
:param domain: Needed for aspect normalization
Intended for training directly on out-of-domain data, see `zeroshot_classifier/models/bert.py`
:param dataset_names: If given, only load the specified datasets
"""
domain_dir_nm = in_domain_dir_nm if domain == 'in' else out_of_domain_dir_nm
path = os_join(u.proj_path, u.dset_dir, domain_dir_nm)
if not os.path.exists(path):
download_data(domain=domain)
datasets = None
_keys = {'train', 'test', 'aspect', 'labels'}
if normalize_aspect:
if isinstance(normalize_aspect, int):
assert normalize_aspect == sconfig('random-seed')
path = os_join(path, ASPECT_NORM_DIRNM)
if not os.path.exists(path):
datasets = save_aspect_normalized_datasets(domain=domain)
_keys.add('eval')
if not datasets:
if dataset_names:
if isinstance(dataset_names, str):
dataset_names = [dataset_names]
dataset_names = [f'{dnm}.json' for dnm in dataset_names]
else:
dataset_names = listdir(path)
paths = [os_join(path, f) for f in dataset_names if isfile(os_join(path, f)) and f.endswith('.json')]
datasets = dict()
it = tqdm(paths, desc='Loading JSON datasets')
for path in it:
dataset_name = basename(path).split('.')[0]
it.set_postfix(dataset=pl.i(dataset_name))
dset = json.load(open(path))
dset: SplitDataset
assert set(dset.keys()) == _keys # sanity check
datasets[dataset_name] = dset
splits = ['train', 'eval', 'test'] if normalize_aspect else ['train', 'test']
if n_sample:
for dnm, dsets in datasets.items():
for sp in splits:
dset = dsets[sp]
n = len(dset) if normalize_aspect else sconfig(f'UTCD.datasets.{dnm}.splits.{sp}.n_text')
if n < n_sample:
break
if normalize_aspect:
txts = list(dset.keys())
txts = random.sample(txts, n_sample)
else: # TODO: support eval set
txts = np.empty(n, dtype=object)
for i, t in enumerate(dset.keys()):
txts[i] = t
txts = np.random.permutation(txts)[:n_sample]
dsets[sp] = {t: dset[t] for t in txts}
counts = {dnm: {sp: len(dsets[sp]) for sp in splits} for dnm, dsets in datasets.items()}
logger.info(f'Datasets loaded w/ {pl.i(counts)}')
return datasets
def subsample_dataset(dataset: Dataset = None, n_src: int = None, n_tgt: int = None, seed: int = None) -> Dataset:
"""
Sample texts from text-labels pairs to roughly `n_sample` in total, while maintaining class distribution
"""
if n_src is None:
n_src = sum(len(lbs) for lbs in dataset.values())
assert n_tgt < n_src
ratio = n_tgt / n_src
d_log = {'#source': n_src, '#target': n_tgt, 'subsample-ratio': f'{round(ratio * 100, 3)}%'}
logger.info(f'Subsampling dataset w/ {pl.i(d_log)}... ')
cls2txt = defaultdict(set)
for txt, lbs in dataset.items():
for lb in lbs: # the same text may be added to multiple classes & hence sampled multiple times, see below
cls2txt[lb].add(txt)
# so that seed ensures reproducibility; TODO: too slow?
cls2txt = {cls: sorted(txts) for cls, txts in cls2txt.items()}
cls2count = {cls: len(txts) for cls, txts in cls2txt.items()}
# normalize by #pair instead of #text for keeping output #text close to `n_sample`
cls2count = {cls: round(c * ratio) for cls, c in cls2count.items()} # goal count for output
ret = dict()
if seed:
random.seed(seed)
for cls, c in cls2count.items():
to_sample = c
while to_sample > 0:
txts = random.sample(cls2txt[cls], to_sample)
for t in txts:
if t not in ret: # ensure no-duplication in # samples added, since multi-label
ret[t] = dataset[t]
to_sample -= 1
cls2txt[cls].remove(t)
return ret
def to_aspect_normalized_datasets(
data: Dict[str, SplitDataset], seed: int = None, domain: str = 'in'
) -> Dict[str, SplitDataset]:
"""
Sample the `train` split of the 9 in-domain datasets so that each `aspect` contains same # of samples
Maintain class distribution
"""
if seed:
random.seed(seed)
aspect2n_txt = defaultdict(int)
for dnm, d_dset in sconfig('UTCD.datasets').items():
if d_dset['domain'] == domain:
aspect2n_txt[d_dset['aspect']] += d_dset['splits']['train']['n_text']
logger.info(f'Aspect distribution: {pl.i(aspect2n_txt)}')
asp_min = min(aspect2n_txt, key=aspect2n_txt.get)
logger.info(f'Normalizing each aspect to ~{pl.i(aspect2n_txt[asp_min])} samples... ')
for dnm, d_dset in data.items():
asp = sconfig(f'UTCD.datasets.{dnm}.aspect')
if asp != asp_min:
n_normed = sconfig(f'UTCD.datasets.{dnm}.splits.train.n_text') * aspect2n_txt[asp_min] / aspect2n_txt[asp]
n_src = sconfig(f'UTCD.datasets.{dnm}.splits.train.n_pair')
d_dset['train'] = subsample_dataset(dataset=d_dset['train'], n_src=n_src, n_tgt=round(n_normed))
dnm2count = defaultdict(dict)
for dnm, d_dset in data.items():
dnm2count[sconfig(f'UTCD.datasets.{dnm}.aspect')][dnm] = len(d_dset['train'])
logger.info(f'Dataset counts after normalization: {pl.fmt(dnm2count)}')
return data
def dataset2train_eval_split(dataset: Dataset, eval_ratio: float = 0.1, seed: int = None) -> Dict[str, Dataset]:
"""
Split training set into train & eval set, try to maintain class distribution in both sets
"""
if seed:
random.seed(seed)
# just like in `to_aspect_normalized_datasets::normalize_single`; the same text may be added to multiple classes
cls2txt: Dict[str, Set[str]] = defaultdict(set)
for txt, lbs in dataset.items():
for lb in lbs:
cls2txt[lb].add(txt)
tr, vl = dict(), dict()
txts_added = set() # so that the same text is not added to both train & eval set
for cls, txts in cls2txt.items():
n_eval = max(round(len(txts) * eval_ratio), 1) # at least 1 sample in eval for each class
txts_vl = random.sample(txts, n_eval)
txts_tr = txts - set(txts_vl)
for t in txts_tr:
if t not in txts_added:
tr[t] = dataset[t]
txts_added.add(t)
for t in txts_vl:
if t not in txts_added:
vl[t] = dataset[t]
txts_added.add(t)
assert len(tr) + len(vl) == len(dataset) # sanity check
def dset2meta(dset: Dataset):
labels = set().union(*dataset.values())
return {'#text': len(dset), '#label': len(labels), 'labels': list(labels)}
logger.info(f'Training set after split: {pl.i(dset2meta(tr))}')
logger.info(f'Eval set after split: {pl.i(dset2meta(vl))}')
return dict(train=tr, eval=vl)
def save_aspect_normalized_datasets(domain: str = 'in'):
seed = sconfig('random-seed')
domain_dir_nm = in_domain_dir_nm if domain == 'in' else out_of_domain_dir_nm
path = os_join(u.proj_path, u.dset_dir, domain_dir_nm)
dsets = get_datasets(domain=domain)
dsets = to_aspect_normalized_datasets(dsets, seed=seed, domain=domain)
out_path = os.path.join(path, ASPECT_NORM_DIRNM)
os.makedirs(out_path, exist_ok=True)
ret = dict()
for dnm, dsets_ in dsets.items():
dsets__ = dataset2train_eval_split(dsets_.pop('train'), seed=seed)
dsets__.update(dsets_)
mic(dsets__.keys())
path_out = os.path.join(out_path, f'{dnm}.json')
logger.info(f'Saving normalized {pl.i(dnm)} dataset to {pl.i(path_out)}... ')
with open(path, 'w') as f:
json.dump(dsets__, f)
ret[dnm] = dsets__
return ret
def get_nli_data():
nli_dataset_path = 'dataset/AllNLI.tsv.gz'
if not os.path.exists(nli_dataset_path):
util.http_get('https://sbert.net/datasets/AllNLI.tsv.gz', nli_dataset_path)
label2int = {"contradiction": 0, "entailment": 1, "neutral": 2}
train_samples = []
dev_samples = []
with gzip.open(nli_dataset_path, 'rt', encoding='utf8') as fIn:
reader = csv.DictReader(fIn, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
label_id = label2int[row['label']]
if row['split'] == 'train':
train_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
else:
dev_samples.append(InputExample(texts=[row['sentence1'], row['sentence2']], label=label_id))
return train_samples, dev_samples
def binary_cls_format(
dataset: SplitDataset = None, sampling='rand', split: str = 'train', mode='vanilla'
):
ca.check_mismatch('Data Negative Sampling', sampling, ['rand', 'vect'])
examples = []
aspect = dataset['aspect']
if split in ['train', 'eval']:
aspect_token, sep_token = None, None
label_un_modified = mode != 'implicit'
ca(training_strategy=mode)
if mode in ['vanilla', 'implicit-on-text-encode-aspect', 'implicit-on-text-encode-sep', 'explicit']:
label_list = dataset['labels']
if mode == 'implicit-on-text-encode-aspect':
aspect_token = sconfig('training.implicit-on-text.encode-aspect.aspect2aspect-token')[aspect]
elif mode == 'implicit-on-text-encode-sep':
sep_token = sconfig('training.implicit-on-text.encode-sep.aspect-sep-token')
elif mode == 'implicit':
label_list = ['{} {}'.format(label, dataset['aspect']) for label in dataset['labels']]
else:
raise NotImplementedError(f'{pl.i(mode)} not supported yet')
example_list = [x for x in dataset[split].keys()]
vects, label_vectors = None, None
if sampling == 'vect':
nlp = _get_nlp()
label_vectors = {label: nlp(label) for label in label_list}
start = time.time()
vects = list(nlp.pipe(example_list, n_process=4, batch_size=128))
print('Time Elapsed {} ms'.format((time.time() - start) * 1000))
for i, (text, labels) in enumerate(dataset[split].items()):
if label_un_modified:
true_labels = labels
else:
true_labels = ['{} {}'.format(label, dataset['aspect']) for label in labels]
other_labels = [label for label in label_list if label not in true_labels]
if mode == 'implicit-on-text-encode-aspect':
text = f'{aspect_token} {text}'
elif mode == 'implicit-on-text-encode-sep':
text = f'{aspect} {sep_token} {text}'
# Generate label for true example
for label in true_labels:
examples.append(InputExample(texts=[text, label], label=1))
# Generate sample based on sampling strategy
if sampling == 'rand':
random.seed(i)
if len(other_labels) >= 2:
random_label = random.sample(other_labels, k=2)
# As expected by sentence-transformer::CrossEncoder
examples.append(InputExample(texts=[text, random_label[0]], label=float(0)))
examples.append(InputExample(texts=[text, random_label[1]], label=float(0)))
elif len(other_labels) > 0:
examples.append(InputExample(texts=[text, other_labels[0]], label=float(0)))
elif sampling == 'vect':
if len(other_labels) >= 2:
text_vector = vects[i]
other_label_vectors = [label_vectors[label] for label in other_labels]
scores = [text_vector.similarity(vector) for vector in other_label_vectors]
examples.append(InputExample(texts=[text, other_labels[argmax(scores)]], label=float(0)))
examples.append(InputExample(texts=[text, other_labels[argmin(scores)]], label=float(0)))
elif len(other_labels) > 0:
examples.append(InputExample(texts=[text, other_labels[0]], label=float(0)))
else: # test split
aspect_token = sconfig('training.implicit-on-text.encode-aspect.aspect2aspect-token')[aspect]
sep_token = sconfig('training.implicit-on-text.encode-sep.aspect-sep-token')
for text, labels in dataset['test'].items():
for label in labels:
if mode in ['vanilla', 'explicit']:
pass
elif mode == 'implicit':
label = '{} {}'.format(label, aspect)
elif mode == 'implicit-on-text-encode-aspect':
text = f'{aspect_token} {text}'
elif mode == 'implicit-on-text-encode-sep':
text = f'{aspect} {sep_token} {text}'
else:
raise NotImplementedError(f'{pl.i(mode)} not supported yet')
examples.append(InputExample(texts=[text, label], label=1))
return examples
def nli_template(label, category):
if category == 'topic':
return 'This text belongs to the topic of {}'.format(label)
elif category == 'intent':
return 'This text expresses the intent of {}'.format(label)
elif category == 'sentiment':
return 'This text expresses a {} sentiment'.format(label)
def nli_cls_format(data, name=None, sampling='rand', train=True):
examples = []
if train:
label_list = data['labels']
example_list = [x for x in data['train'].keys()]
vects, label_vectors = None, None
if sampling == 'vect':
nlp = _get_nlp()
label_vectors = {label: nlp(label) for label in label_list}
start = time.time()
vects = list(nlp.pipe(example_list, n_process=4, batch_size=128))
print('Time Elapsed {} ms'.format((time.time() - start) * 1000))
print('Generating {} examples'.format(name))
for i, (text, labels) in enumerate(tqdm(data['train'].items())):
true_labels = labels
other_labels = [label for label in label_list if label not in true_labels]
# Generate label for true example
for label in true_labels:
examples.append(InputExample(texts=[text, nli_template(label, data['aspect'])], label=1))
# Generate sample based on sampling strategy
if sampling == 'rand':
random.seed(i)
if len(other_labels) >= 2:
random_label = random.sample(other_labels, k=2)
examples.append(InputExample(texts=[text, nli_template(random_label[0], data['aspect'])], label=0))
examples.append(InputExample(texts=[text, nli_template(random_label[1], data['aspect'])], label=0))
elif len(other_labels) > 0:
examples.append(InputExample(texts=[text, nli_template(other_labels[0], data['aspect'])], label=0))
elif sampling == 'vect':
if len(other_labels) >= 2:
text_vector = vects[i]
other_label_vectors = [label_vectors[label] for label in other_labels]
scores = [text_vector.similarity(vector) for vector in other_label_vectors]
examples.append(InputExample(
texts=[text, nli_template(other_labels[argmax(scores)], data['aspect'])], label=0))
examples.append(InputExample(
texts=[text, nli_template(other_labels[argmin(scores)], data['aspect'])], label=0))
elif len(other_labels) > 0:
examples.append(InputExample(texts=[text, nli_template(other_labels[0])], label=0))
else:
for text, labels in data['test'].items():
for label in labels:
examples.append(InputExample(texts=[text, nli_template(label, data['aspect'])], label=1))
return examples
WARN_NOT_ENOUGH_NEG_LABEL = 'Not Enough Negative Label'
def encoder_cls_format(
arr: List[Tuple[str, str]], name=None, sampling='rand', train=True,
neg_sample_for_multi=False, show_warnings=True
) -> List[InputExample]:
"""
:param arr: List of dataset (text, descriptive label) pairs
:param name: Dataset name
:param sampling: Sampling approach, one of [`rand`, `vect`]
:param train: If true, negative samples are generated
Intended for training
:param neg_sample_for_multi: If true, negative samples are added for each positive labels for a text
:param show_warnings: If true, warning for missing negative labels are logged
"""
examples = []
if train:
nlp = _get_nlp()
label_list = list(dict.fromkeys([example[1] for example in arr]))
label_vectors = {label: nlp(label) for label in label_list}
example_list = [x[0] for x in arr]
vects = None
if sampling == 'vect':
start = time.time()
vects = list(nlp.pipe(example_list, n_process=4, batch_size=128))
print('Time Elapsed {} ms'.format((time.time() - start) * 1000))
# count instances
count = Counter(example_list)
has_multi_label = any((c > 1) for c in count.values())
txt2lbs = None
if has_multi_label: # Potentially all valid labels for each text
arr_ = sorted(arr) # map from unique text to all possible labels
txt2lbs = {k: set(lb for txt, lb in v) for k, v in itertools.groupby(arr_, key=lambda pair: pair[0])}
logger.info(f'Generating examples for dataset {pl.i(name)}, with labels {pl.i(label_list)}... ')
print('Generating {} examples'.format(name))
for i, element in enumerate(tqdm(arr)):
true_label = element[1]
other_labels = [label for label in label_list if label != element[1]]
# Generate label for true example
examples.append(InputExample(texts=[true_label, element[0]], label=float(1)))
# Generate sample based on sampling strategy
if has_multi_label and neg_sample_for_multi:
assert sampling == 'rand' # TODO: vect not supported
random.seed(i)
txt = element[0]
neg_pool = set(label_list) - txt2lbs[txt]
def neg_sample2label(lb: str) -> InputExample:
return InputExample(texts=[lb, txt], label=float(0))
if len(neg_pool) < 2:
# Ensures 2 negative labels are sampled, intended to work with existing Jaseci training code
warn_name = WARN_NOT_ENOUGH_NEG_LABEL
if len(neg_pool) == 0:
warn_name = f'{warn_name}, severe'
neg_label = 'dummy negative label'
else:
neg_label = list(neg_pool)[0]
examples.extend([neg_sample2label(neg_label), neg_sample2label(neg_label)])
if show_warnings:
logger.warning(f'{pl.s(warn_name, c="y", bold=True)}: # negative labels for text less '
f'than {2}: {pl.i(text=txt, pos_labels=txt2lbs[txt], neg_labels=neg_pool)}')
else:
examples.extend([neg_sample2label(neg_label) for neg_label in random.sample(neg_pool, k=2)])
else:
if sampling == 'rand' and count[element[0]] < 2:
random.seed(i)
random_label = random.sample(other_labels, k=2)
examples.append(InputExample(texts=[random_label[0], element[0]], label=float(0)))
examples.append(InputExample(texts=[random_label[1], element[0]], label=float(0)))
elif sampling == 'vect' and count[element[0]] < 2:
text_vector = vects[i]
other_label_vectors = [label_vectors[label] for label in other_labels]
scores = [text_vector.similarity(vector) for vector in other_label_vectors]
examples.append(InputExample(texts=[other_labels[argmax(scores)], element[0]], label=float(0)))
examples.append(InputExample(texts=[other_labels[argmin(scores)], element[0]], label=float(0)))
else:
for element in arr:
examples.append(InputExample(texts=[element[1], element[0]], label=float(1)))
return examples
def seq_cls_format(data, all=False):
train = []
test = []
label_map = {}
if all:
for dataset, item in data.items():
for label in item['labels']:
if label not in label_map:
label_map[label] = len(label_map)
for k, v in item['train'].items():
# loop through each true label
for label in v:
train.append({'text': k, 'label': label_map[label], 'label_name': label})
for k, v in item['test'].items():
# loop through each true label
for label in v:
test.append({'text': k, 'label': label_map[label], 'label_name': label})
else:
label_map = {k: i for i, k in enumerate(data['labels'])}
for k, v in data['train'].items():
# loop through each true label
for label in v:
train.append({'text': k, 'label': label_map[label], 'label_name': label})
for k, v in data['test'].items():
# loop through each true label
for label in v:
test.append({'text': k, 'label': label_map[label], 'label_name': label})
return train, test, label_map
class ExplicitInputExample:
def __init__(self, texts, label, aspect) -> None:
self.texts = texts
self.label = label
self.aspect = aspect
def __str__(self):
return "<ExplicitInputExample> label: {}, text: {}".format(str(self.label), self.text)
def binary_explicit_format(dataset):
aspect_map = {"sentiment": 0, "intent": 1, "topic": 2}
train = []
for name, data in dataset.items():
aspect = data['aspect']
label_list = data['labels']
for i, (text, labels) in enumerate(tqdm(data['train'].items(), desc=name)):
true_labels = labels
other_labels = [label for label in label_list if label not in true_labels]
# Generate label for true example
for label in true_labels:
train.append(ExplicitInputExample(texts=[text, label], label=1, aspect=aspect_map[aspect]))
random.seed(i)
if len(other_labels) >= 2:
random_label = random.sample(other_labels, k=2)
train.append(
ExplicitInputExample(texts=[text, random_label[0]], label=0, aspect=aspect_map[aspect]))
train.append(
ExplicitInputExample(texts=[text, random_label[1]], label=0, aspect=aspect_map[aspect]))
elif len(other_labels) > 0:
train.append(
ExplicitInputExample(texts=[text, other_labels[0]], label=0, aspect=aspect_map[aspect]))
return train
if __name__ == '__main__':
mic.output_width = 512
random.seed(sconfig('random-seed'))
def check_sampling():
data = get_datasets(domain='in')
data = to_aspect_normalized_datasets(data)
for dnm, d_dset in data.items():
mic(dnm, len(d_dset['train']))
c = Counter()
for txt, lbs in d_dset['train'].items():
c.update(lbs)
mic(c, len(c))
# check_sampling()
save_aspect_normalized_datasets(domain='in')
save_aspect_normalized_datasets(domain='out') | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/load_data.py | load_data.py |
import os.path
import re
import math
import json
import itertools
from os.path import join as os_join
from typing import List, Tuple, Dict, Callable, Union, Optional, Any
from collections import Counter
import numpy as np
import pandas as pd
from scipy.stats import norm
from transformers import AutoTokenizer
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
from stefutil import *
from zeroshot_classifier.util.data_path import *
__all__ = ['ConfigDict']
tokenize_modes = ['re', 'bert', 'gpt2']
def _re_call() -> Callable[[str], int]:
if not hasattr(_re_call, 'token_pattern'):
# taken from sklearn.CountVectorizer
_re_call.token_pattern = re.compile(r'(?u)\b\w+\b')
return lambda x: len(_re_call.token_pattern.findall(x))
def _hf_call(model_name) -> Callable[[Union[str, List[str]]], Union[int, List[int]]]:
if not hasattr(_hf_call, 'd'):
_hf_call.d = {}
d = _hf_call.d
if model_name not in d:
d[model_name] = AutoTokenizer.from_pretrained(model_name)
def _call(x):
ids = d[model_name](x)['input_ids']
if isinstance(x, str):
return len(ids)
else:
return [len(i) for i in ids]
return _call
def get_tokenizer_len(s: Union[str, List[str]], mode: str = 're') -> Union[int, List[int]]:
assert mode in ['re', 'bert', 'gpt2']
if not hasattr(get_tokenizer_len, 'd_f'):
get_tokenizer_len.d_f = dict(
re=_re_call(),
bert=_hf_call('bert-base-cased'),
gpt2=_hf_call('gpt2')
)
return get_tokenizer_len.d_f[mode](s)
class ConfigDict:
d = {
'baselines': {
'gpt2-nvidia': {
'templates': [
'To which category does the following document belong? : {}',
'To which category does the following text belong? : {}',
'To which category does the text belong? : {}',
'To which category does the article belong? : {}',
'How would you describe the following document? : as {}',
'How would you describe the text? : as {}',
'How would you describe the following text? : as {}',
'Which best describes the text? : {}',
'Which best describes the document? : {}',
'Which best describes the following document? : {}',
'Which best describes the following text? : {}',
'The following document is _ ? : {}',
'The following text is _ ? : {}',
'The text is _ ? : {}',
'The document is _ ? : {}',
'How is the text best described? : {}',
'How is the document best described? : {}',
'How is the following text best described? : {}',
'How is the following document best described? : {}',
'Which of these choices best describes the text? : {}',
'Which of these options best describes the text? : {}',
'Which of these choices best describes the document? : {}',
'Which of these options best describes the document? : {}',
'Which of these categories best describes the following document? : {}',
'Which of these choices best describes the following document? : {}',
'Which of these options best describes the following text? : {}'
],
'label-descriptors': dict( # string label to natural language descriptor, as in paper
ag_news={
'World': 'World News',
'Sports': 'Sports',
'Business': 'Business',
'Sci/Tech': 'Science & Technology'
}
)
},
'bert-mnli': dict(
templates=dict(
sentiment='This text expresses a {} sentiment',
intent='This text expresses the intent of {}',
topic='This text belongs to the topic of {}'
)
),
},
'UTCD': dict(
datasets=dict(
# in-domain evaluation has the same labels as training
go_emotion=dict(
path='in-domain/go_emotion', aspect='sentiment', eval_labels_same=True, domain='in',
name='GoEmotions', name_compact='GoEmotions'
),
sentiment_tweets_2020=dict(
path='in-domain/sentiment_tweets_2020', aspect='sentiment', eval_labels_same=True, domain='in',
name='TweetEval', name_compact='TweetEval'
),
emotion=dict(
path='in-domain/emotion', aspect='sentiment', eval_labels_same=True, domain='in',
name='Emotion', name_compact='Emotion'
),
# not `eval_labels_same` := has some unique test labels
sgd=dict(
path='in-domain/sgd', aspect='intent', eval_labels_same=False, domain='in',
name='Schema-Guided Dialogue', name_compact='SGD'
),
clinc_150=dict(
path='in-domain/clinc_150', aspect='intent', eval_labels_same=True, domain='in',
name='Clinc-150', name_compact='Clinc-150'
),
slurp=dict(
path='in-domain/slurp', aspect='intent', eval_labels_same=False, domain='in',
name='SLURP', name_compact='SLURP'
),
ag_news=dict(
path='in-domain/ag_news', aspect='topic', eval_labels_same=True, domain='in',
name='AG News', name_compact='AG News'
),
dbpedia=dict(
path='in-domain/dbpedia', aspect='topic', eval_labels_same=True, domain='in',
name='DBpedia', name_compact='DBpedia'
),
yahoo=dict(
path='in-domain/yahoo', aspect='topic', eval_labels_same=True, domain='in',
name='Yahoo Answer Topics', name_compact='Yahoo'
),
# Out-of-domain datasets: only test split used & intended for evaluation
amazon_polarity=dict(
path='out-of-domain/amazon_polarity', aspect='sentiment', eval_labels_same=True, domain='out',
name='Amazon Review Polarity', name_compact='Amazon Polarity'
),
finance_sentiment=dict(
path='out-of-domain/finance_sentiment', aspect='sentiment', eval_labels_same=True,
domain='out',
name='Financial Phrase Bank', name_compact='Fin. Phrase Bank'
),
yelp=dict(
path='out-of-domain/yelp', aspect='sentiment', eval_labels_same=True, domain='out',
name='Yelp Review', name_compact='Yelp'
),
banking77=dict(
path='out-of-domain/banking77', aspect='intent', eval_labels_same=True, domain='out',
name='Banking77', name_compact='Banking77'
),
snips=dict(
path='out-of-domain/snips', aspect='intent', eval_labels_same=True, domain='out',
name='SNIPS', name_compact='SNIPS'
),
nlu_evaluation=dict(
path='out-of-domain/nlu_evaluation', aspect='intent', eval_labels_same=True, domain='out',
name='NLU Evaluation', name_compact='NLU Eval'
),
multi_eurlex=dict(
path='out-of-domain/multi_eurlex', aspect='topic', eval_labels_same=True, domain='out',
name='MultiEURLEX', name_compact='MultiEURLEX'
),
patent=dict(
path='out-of-domain/patent', aspect='topic', eval_labels_same=True, domain='out',
name='Big Patent', name_compact='Patent'
),
consumer_finance=dict(
path='out-of-domain/consumer_finance', aspect='topic', eval_labels_same=True, domain='out',
name='Consumer Finance Complaints', name_compact='Consumer Finance'
)
),
aspects=['sentiment', 'intent', 'topic'],
domains=['in', 'out'],
num_aspect=3,
num_dataset_per_aspect=6,
num_dataset_per_domain_per_aspect=3,
num_domain=2,
num_dataset_per_domain=9,
dataset_ext='json' # all in json
),
'training': {
'implicit-on-text': {
'encode-aspect': {
'aspect2aspect-token': dict(sentiment='<|sentiment|>', intent='<|intent|>', topic='<|topic|>')
},
'encode-sep': {'aspect-sep-token': '<|ASPECT-SEP|>'}
},
'strategies': [
'vanilla',
'implicit', # prepend aspect text before each label
'implicit-on-text-encode-aspect', # encode each of the 3 aspects as 3 special tokens, followed by text
'implicit-on-text-encode-sep', # encode aspects normally, but add special token between aspect and text
'explicit' # see `zeroshot_classifier.explicit.binary_bert.py` for explicit training
]
},
'random-seed': 77,
'check-arg': [
dict(
display_name='Model Name', attr_name='model_name',
accepted_values=[
'bert-seq-cls', # not a Zeroshot framework, a supervised learning upperbound
'binary-bert', 'bert-nli', 'bi-encoder', 'dual-bi-encoder', 'gpt2-nvidia'
]
),
dict(
display_name='Dataset Domain', attr_name='dataset_domain',
accepted_values=['in', 'out']
),
dict(
display_name='Sampling Strategy', attr_name='sampling_strategy',
accepted_values=['rand', 'vect', 'none', 'NA']
),
dict(
display_name='Training strategy', attr_name='training_strategy',
accepted_values=[
'vanilla',
'implicit',
'implicit-on-text-encode-aspect',
'implicit-on-text-encode-sep',
'explicit'
]
),
dict(
display_name='GPT2 Training Strategy', attr_name='gpt2_training_strategy',
accepted_values=['vanilla', 'implicit', 'explicit']
)
]
}
def __init__(self, fast: bool = False):
"""
:param fast: If true, token length statistics on all datasets will not be computed
Intended for fast loading
"""
self.fast = fast
self.d = ConfigDict.d # TODO: just use the same dict
self.d_n_toks = self.extract_utcd_meta() # Mutates config_dict
def _path2dataset_info(self, d: Dict) -> Optional[Tuple[Dict, Dict, Dict]]:
"""
:return: 3-tuple of (
dataset label information per split for `config`,
dataset token information per dataset for `config`,
number of tokens for plot
)
"""
path = os_join(BASE_PATH, PROJ_DIR, DSET_DIR, f'{d["path"]}.json')
assert os.path.exists(path) # datasets must be present to extract config metadata
with open(path) as fl:
dsets: Dict = json.load(fl)
def split2info(split, dset: Dict[str, List[str]], count_token_length: bool = True) -> Dict:
# Based on heuristics on how the `json` are stored
# creating a list of all the strings consume memory for prohibitively large datasets
n_text_, n_pair_ = len(dset.keys()), sum([len(lbs) for lbs in dset.values()])
lbs_uniq = set().union(*dset.values())
n_multi_label = sum([len(lbs_) > 1 for lbs_ in dset.values()])
txt_n_toks, lb_n_toks = None, None
count_len = not self.fast and count_token_length
if count_len:
txt_n_toks, lb_n_toks = dict(), dict()
for mode in tokenize_modes:
n, desc_t, desc_l = 16, f'{split}-{mode}-text', f'{split}-{mode}-label'
lb2tokenize_len = {lb: get_tokenizer_len(lb, mode) for lb in lbs_uniq}
counter_txt, counter_lb = Counter(), Counter()
if mode == 're':
for t in tqdm(dset.keys(), total=len(dset), desc=f'{desc_t:>{n}}'):
counter_txt[get_tokenizer_len(t, mode)] += 1
else:
batch_size = 2048*2
for grp in tqdm(
group_n(dset.keys(), batch_size),
total=math.ceil(len(dset) / batch_size), desc=f'{desc_t:>{n}}'
):
lens: List[int] = get_tokenizer_len(list(grp), mode)
counter_txt.update(lens)
for t in tqdm(dset.values(), desc=f'{desc_l:>{n}}'):
for lb in t:
counter_lb[lb2tokenize_len[lb]] += 1
txt_n_toks[mode], lb_n_toks[mode] = counter_txt, counter_lb
ret: Dict[str, Any] = dict(
labels=sorted(lbs_uniq),
n_label=len(lbs_uniq),
n_text=n_text_,
n_pair=n_pair_,
multi_label=n_text_ < n_pair_,
n_multi_label=n_multi_label
)
if count_len:
ret.update(dict(txt_n_toks=txt_n_toks, lb_n_toks=lb_n_toks))
return ret
labels, aspect = dsets.pop('labels'), dsets.pop('aspect')
assert aspect == d['aspect']
d_out = { # ignore out of domain train split for potentially too large
split: split2info(split, dset, count_token_length=not (split == 'train' and d['domain'] == 'out'))
for split, dset in dsets.items()
} # Labels for each split
assert all(split in ['train', 'test'] for split in d_out.keys())
assert set(labels) == set().union(*[set(d['labels']) for d in d_out.values()])
if not self.fast:
# sum over all splits of the dataset for token length computation
txt_n_toks_all = [d_out.pop('txt_n_toks') for d_out in d_out.values()]
lb_n_toks_all = [d_out.pop('lb_n_toks') for d_out in d_out.values()]
txt_n_toks_all = [e for e in txt_n_toks_all if e] # pop from the dict, then remove them for stats
lb_n_toks_all = [e for e in lb_n_toks_all if e]
txt_n_toks_all = {mode: sum([c[mode] for c in txt_n_toks_all], start=Counter()) for mode in tokenize_modes}
lb_n_toks_all = {mode: sum([c[mode] for c in lb_n_toks_all], start=Counter()) for mode in tokenize_modes}
def counter2mean(c: Counter) -> float:
lens, counts = zip(*c.items())
return np.average(lens, weights=counts)
avg_toks = {
**{f'{mode}-txt_avg_tokens': counter2mean(txt_n_toks_all[mode]) for mode in tokenize_modes},
**{f'{mode}-lb_avg_tokens': counter2mean(lb_n_toks_all[mode]) for mode in tokenize_modes}
}
d_n_tok = dict(text=txt_n_toks_all, label=lb_n_toks_all)
else:
avg_toks, d_n_tok = None, None
if d['eval_labels_same']:
assert d_out['train']['labels'] == d_out['test']['labels']
return d_out, avg_toks, d_n_tok
def extract_utcd_meta(self) -> Dict:
d_utcd = self.d['UTCD']
d_dsets: Dict = d_utcd['datasets']
logger = get_logger('Extracting UTCD metadata')
d_n_toks = dict()
it = d_dsets.items()
if self.fast:
it = tqdm(it, desc='Extracting UTCD metadata', total=len(d_dsets))
for dnm, d_dset in it:
if self.fast:
it.set_postfix(dnm=pl.i(dnm))
else:
logger.info(f'Processing {pl.i(dnm)}... ')
d_dset['splits'], d_avg_tok, d_n_toks[dnm] = self._path2dataset_info(d_dset)
if d_avg_tok is not None:
d_dset.update(d_avg_tok)
dnms = sorted(d_dsets) # All datasets, in- and out-of-domain, share the same dataset <=> id mapping
d_utcd['dataset_id2name'] = dnms
d_utcd['dataset_name2id'] = {dnm: i for i, dnm in enumerate(dnms)}
return d_n_toks
def plot_utcd_n_toks(d_n_toks: Dict, domain: str, save=True):
def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False):
# Credit: https://stackoverflow.com/a/29677616/10732321
""" Very close to numpy::percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: numpy array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of
initial array
:param old_style: if True, will correct output to be consistent
with numpy.percentile.
:return: numpy.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), \
'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with numpy.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
logger = get_logger('Token Lengths Distribution Plot')
d_df = dict()
text_types = ['text', 'label']
for text_type, mode in itertools.product(text_types, tokenize_modes):
logger.info(f'Processing {pl.i(text_type)} with {pl.i(mode)} tokenization')
def dnm2dset(dnm: str) -> List[Tuple[int, int, str]]:
counter = d_n_toks[dnm][text_type][mode]
lens, counts = zip(*counter.items())
return [(l, c, dnm) for l, c in zip(lens, counts)]
toks_unrolled = sum([dnm2dset(dnm) for dnm in d_n_toks.keys()], start=[])
# `count` is a pd.DataFrame specific attribute
d_df[(text_type, mode)] = pd.DataFrame(toks_unrolled, columns=['n_token', 'counts', 'dataset_name'])
fig, axes = plt.subplots(2, 3, figsize=(16, 9))
n_tt, n_tm = len(text_types), len(tokenize_modes)
for i_row, i_col in itertools.product(range(n_tt), range(n_tm)):
text_type, mode = text_types[i_row], tokenize_modes[i_col]
logger.info(f'Plotting {pl.i(text_type)} with {pl.i(mode)} tokenization')
ax = axes[i_row, i_col]
df = d_df[(text_type, mode)]
legend = i_row == 0 and i_col == 0
sns.histplot(
data=df, x='n_token', hue='dataset_name', weights='counts',
kde=text_type == 'text', kde_kws=dict(gridsize=2048), discrete=True, common_norm=False, stat='density',
palette='husl', legend=legend, ax=ax
)
ax.set(xlabel=None, ylabel=None)
ax.set_title(f'{text_type} with {mode} tokenization')
if text_type == 'text': # empirical, cos there are outliers for `text`s
p = norm().cdf(3) # quantile at 3std
mi = df.n_token.min()
ma = round(weighted_quantile(df.n_token, [p], sample_weight=df.counts)[0])
ax.set_xlim([mi, ma])
else:
xticks = ax.get_xticks() # enforce integer ticks
ax.set_xticks(list(range(math.floor(xticks.min()), math.ceil(xticks.max()) + 1)))
domain = 'in-domain' if domain == 'in' else 'out-of-domain eval'
title = f'Tokenization length distribution across {domain} datasets'
plt.suptitle(title)
fig.supxlabel('#token')
fig.supylabel('Density')
if save:
# this function is for standalone execution, importing here prevents circular import
from zeroshot_classifier.util.util import save_fig
save_fig(title)
else:
plt.show()
if __name__ == '__main__':
config_d = ConfigDict(fast=False) # If run standalone, compute #token stats
def plot_toks():
d_n_tok = config_d.d_n_toks
for dom in ['in']:
# plot only in-domain data as out-of-domain tokens lengths are too long,
# resulting in prohibitively large # of patches for bar-plot to terminate soon
d_n_tok_ = {dnm: v for dnm, v in d_n_tok.items() if get(config_d.d, f'UTCD.datasets.{dnm}.domain') == dom}
plot_utcd_n_toks(d_n_tok_, domain=dom, save=True)
# plot_toks()
def write_config():
mic(config_d.d)
with open(os_join(BASE_PATH, PROJ_DIR, PKG_NM, 'util', 'config.json'), 'w') as f:
json.dump(config_d.d, f, indent=4)
write_config() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/config.py | config.py |
import math
import datetime
from os.path import join as os_join
from torch.utils.tensorboard import SummaryWriter
from transformers import Trainer, TrainingArguments, TrainerCallback
import datasets
from stefutil import *
logger = get_logger('Explicit Trainer')
class MyTrainer(Trainer):
"""
Override `compute_loss` for getting training stats
"""
def __init__(self, name: str = None, with_tqdm: bool = True, disable_train_metrics: bool = True, **kwargs):
super().__init__(**kwargs)
self.name = name
self.with_tqdm = with_tqdm
self.disable_train_metrics = disable_train_metrics
self._replace_callback()
self.acc = datasets.load_metric('accuracy')
d_log = dict(with_tqdm=with_tqdm, disable_train_metrics=disable_train_metrics)
self.logger = get_logger('Explicit Trainer')
self.logger.info(f'Trainer initialized w/ {pl.i(d_log)}')
def _replace_callback(self):
callbacks = self.callback_handler.callbacks
# Trainer adds a `PrinterCallback` or a `ProgressCallback`, replace all that with my own,
# see `MyProgressCallback`
rmv = [
"<class 'transformers.trainer_callback.ProgressCallback'>",
"<class 'transformers.trainer_callback.PrinterCallback'>"
]
self.callback_handler.callbacks = [c for c in callbacks if str(c.__class__) not in rmv]
if self.with_tqdm:
self.add_callback(MyProgressCallback())
self.add_callback(MyTrainStatsMonitorCallback(trainer=self, with_tqdm=self.with_tqdm))
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
if self.label_smoother is not None and "labels" in inputs:
labels = inputs.pop("labels")
else:
labels = None
outputs = model(**inputs)
# ========================== Begin of added ==========================
if model.training and not self.disable_train_metrics:
labels_, logits = inputs['labels'].detach(), outputs.logits.detach()
acc = self.acc.compute(predictions=logits.argmax(dim=-1), references=labels_)['accuracy']
self.log(dict(src='compute_loss', acc=acc))
# ========================== End of added ==========================
# Save past state if it exists
# TODO: this needs to be fixed and made cleaner later.
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None:
loss = self.label_smoother(outputs, labels)
else:
# We don't use .loss here since the model may return tuples instead of ModelOutput.
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
class MyTrainStatsMonitorCallback(TrainerCallback):
"""
Supports colored terminal output, logging file write, data sent to tensorboard for plotting
Evaluation during training **not supported**
"""
def __init__(self, trainer: MyTrainer, with_tqdm: bool = True):
self.mode = 'eval'
self.t_strt, self.t_end = None, None
self.trainer = trainer
self.name = self.trainer.name
self.logger, self.logger_fl, self.writer = None, None, None
self.ls = None
args = trainer.args
n_ep = args.num_train_epochs
bsz = args.per_device_train_batch_size * args.gradient_accumulation_steps
n_data = len(trainer.train_dataset)
n_step = max(math.ceil(n_data / bsz), 1) * n_ep
self.prettier = MlPrettier(ref=dict(step=n_step, epoch=n_ep))
self.out_dict = None
def on_train_begin(self, args: TrainingArguments, state, control, **kwargs):
self.mode = 'train'
self.logger = get_logger(self.name)
mdl_type = self.trainer.model.__class__.__qualname__
output_dir = self.trainer.args.output_dir
path_log = os_join(output_dir, f'{mdl_type} train.log')
self.logger_fl = get_logger(name=self.name, kind='file-write', file_path=path_log)
self.writer = SummaryWriter(os_join(output_dir, f'tb'))
self.ls = LogStep(
trainer=self.trainer, prettier=self.prettier,
logger=self.logger, file_logger=self.logger_fl, tb_writer=self.writer
)
conf = self.trainer.model.config.to_dict()
train_args = self.trainer.args.to_dict()
self.logger.info(f'Training launched on model {pl.i(mdl_type)}, {pl.fmt(conf)} '
f'with training args {pl.fmt(train_args)}... ')
self.logger_fl.info(f'Training launched on model {pl.i(mdl_type)}, {pl.id(conf)} '
f'with training args {pl.id(train_args)}... ')
self.t_strt = datetime.datetime.now()
def on_train_end(self, args: TrainingArguments, state, control, **kwargs):
self.t_end = datetime.datetime.now()
t = fmt_delta(self.t_end - self.t_strt)
self.logger.info(f'Training completed in {pl.i(t)} ')
self.logger_fl.info(f'Training completed in {t} ')
self.mode = 'eval'
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero:
step = state.global_step
in_train = self.trainer.model.training
if in_train and 'src' in logs and logs['src'] == 'compute_loss':
del logs['src']
self.out_dict = logs
elif in_train and all('runtime' not in k for k in logs):
d_log = dict(step=step, epoch=state.epoch, lr=logs['learning_rate'], loss=logs['loss'])
if not self.trainer.disable_train_metrics:
d_log['sp_cls_acc'] = self.out_dict['acc']
self.ls(d_log, training=in_train, to_console=not self.trainer.with_tqdm)
elif not in_train and 'eval_loss' in logs:
d_log = dict(step=step, epoch=int(state.epoch), loss=logs['eval_loss'], asp_cls_acc=logs['eval_acc'])
self.ls(d_log, training=in_train, to_console=not self.trainer.with_tqdm)
else:
self.logger.info(pl.i(logs))
self.logger_fl.info(pl.nc(logs)) | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/util/explicit_v2_pretrain.py | explicit_v2_pretrain.py |
import json
from os.path import join as os_join
from typing import List, Dict, Union, Any
from collections import defaultdict
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from stefutil import *
from zeroshot_classifier.util import *
def get_bad_samples(d_loss: Dict[str, np.array], k: int = 32, save: str = None) -> Dict[str, List[Dict[str, Any]]]:
"""
:param d_loss: The loss of each text sample in each dataset by a model, in iteration order
:param k: top #samples to keep
:return: A list of text samples with the respective loss that the model performs the worst on, sorted by performance
:param save: Save the results to a directory path
"""
d_out, split = dict(), 'test'
for dnm, loss in d_loss.items():
idxs_top = np.argpartition(loss, -k)[-k:]
s_idxs_top = set(idxs_top)
out = []
for i, (txt, lbs) in enumerate(utcd.get_dataset(dnm, split).items()):
if i in s_idxs_top:
out.append(dict(text=txt, labels=lbs, loss=float(loss[i])))
d_out[dnm] = sorted(out, key=lambda x: -x['loss'])
if save:
fnm = os_join(save, f'{now(for_path=True)}, bad_samples.json')
with open(fnm, 'w') as fl:
json.dump(d_out, fl, indent=4)
return d_out
class AttentionVisualizer:
def __init__(self, model_path):
self.model_path = model_path
self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.dataset_cache: Dict[str, Dict[str, List[str]]] = dict()
self.model_cache = defaultdict(lambda: defaultdict(dict)) # dataset name => text => label => visualization args
self.logger = get_logger('Binary BERT Attention Visualizer')
def visualize(self, dataset_name: str, text: str, label: str = None, aggregate_attention: bool = True, **kwargs):
"""
Visualize the attention weights of a text, label pair
Intended for binary bert
Previously computed attention weights are cached
Should be called in a notebook only per `bertviz`
"""
from bertviz import head_view
split = 'test'
if dataset_name not in self.dataset_cache:
self.dataset_cache[dataset_name] = utcd.get_dataset(dataset_name, split)
label_options = sconfig(f'UTCD.datasets.{dataset_name}.splits.{split}.labels')
self.logger.info(f'Visualizing dataset {pl.i(dataset_name)} with label options {pl.i(label_options)}... ')
if label is None: # just assume not run on this text before
label, args = self._get_pair(dataset_name, text, label_options)
elif label not in self.model_cache[dataset_name][text]:
args = self._get_pair(dataset_name, text, label)
else:
args = self.model_cache[dataset_name][text][label]
self.logger.info(f'Visualizing on {pl.i(text=text, label=label)} ... ')
if aggregate_attention:
attn = args['attention']
# snap batch dimension, stack by layer
attn = torch.stack([a.squeeze() for a in attn], dim=0) # #layer x #head x #seq_len x #seq_len
attn = attn.mean(dim=1) # average over all heads; L x T x T
attn += torch.eye(attn.size(1)) # reverse residual connections; TODO: why diagonals??
attn /= attn.sum(dim=-1, keepdim=True) # normalize all keys for each query
attn_res = torch.empty_like(attn) # get recursive contribution of each token for all layers
attn_res[0] = attn[0]
for i in range(1, attn.size(0)): # start from the bottom, multiply out the attentions on higher layers
attn_res[i] = attn[i] @ attn[i - 1]
attn_res[:, 0, 0] = 0 # ignore the score from cls to cls
# attn_res[:, :, 1:] = 0 # keep only scores queried from cls
args['attention'] = [a.unsqueeze(0).unsqueeze(0) for a in attn_res]
head_view(**args, **kwargs)
def _get_pair(self, dataset_name: str, text: str, label: Union[str, List[str]]):
batched = isinstance(label, list)
if batched:
text_in, label_in = [text] * len(label), label
else: # single label
text_in, label_in = [text], [label]
tok_args = dict(padding=True, truncation='longest_first', return_tensors='pt')
inputs = self.tokenizer(text_in, label_in, **tok_args)
input_ids, token_type_ids = inputs['input_ids'], inputs['token_type_ids']
with torch.no_grad():
outputs = self.model(**inputs, output_attentions=True)
attn = outputs.attentions
if batched:
for i, (lb, iids, tids) in enumerate(zip(label, input_ids, token_type_ids)):
toks = self.tokenizer.convert_ids_to_tokens(iids)
b_strt = tids.tolist().index(1)
a = tuple(a[None, i] for a in attn)
self.model_cache[dataset_name][text][lb] = dict(attention=a, tokens=toks, sentence_b_start=b_strt)
scores = outputs.lg.its[:, 1]
lb = label[scores.argmax()] # pick the label with the highest score
return lb, self.model_cache[dataset_name][text][lb]
else:
b_strt = token_type_ids[0].tolist().index(1)
toks = self.tokenizer.convert_ids_to_tokens(input_ids[0]) # remove batch dimension
arg = dict(attention=attn, tokens=toks, sentence_b_start=b_strt)
self.model_cache[dataset_name][text][label] = arg # index into the 1-element list
return arg
if __name__ == '__main__':
import pickle
from stefutil import mic
mic.output_width = 512
model_dir_nm = os_join('binary-bert-rand-vanilla-old-shuffle-05.03.22', 'rand')
mdl_path = os_join(u.proj_path, u.model_dir, model_dir_nm)
def get_bad_eg():
# dir_nm = 'in-domain, 05.09.22'
dir_nm = 'out-of-domain, 05.10.22'
path_eval = os_join(mdl_path, 'eval', dir_nm)
with open(os_join(path_eval, 'eval_loss.pkl'), 'rb') as f:
d = pickle.load(f)
save_path = os_join(u.proj_path, 'eval', 'binary-bert', 'rand, vanilla', 'in-domain, 05.09.22')
get_bad_samples(d, save=save_path)
get_bad_eg()
def visualize():
av = AttentionVisualizer(mdl_path)
dnm = 'emotion'
txt = 'i feel like the writer wants me to think so and proclaiming he no longer liked pulsars is a petty and ' \
'hilarious bit of character '
lbl = 'anger'
lbl = None
av.visualize(dataset_name=dnm, text=txt, label=lbl)
# visualize() | zeroshot-classifier | /zeroshot-classifier-0.2.3.tar.gz/zeroshot-classifier-0.2.3/zeroshot_classifier/visualize/visualize_text_sample_loss.py | visualize_text_sample_loss.py |
zeroshot_topics
===============
.. contents:: **Table of Contents**
:backlinks: none
Installation
------------
zeroshot_topics is distributed on `PyPI <https://pypi.org>`_ as a universal
wheel and is available on Linux/macOS and Windows and supports
Python 3.7+ and PyPy.
.. code-block:: bash
$ pip install zeroshot_topics
License
-------
zeroshot_topics is distributed under the terms of
- `MIT License <https://choosealicense.com/licenses/mit>`_
- `Apache License, Version 2.0 <https://choosealicense.com/licenses/apache-2.0>`_
| zeroshot-topics | /zeroshot_topics-0.1.0.tar.gz/zeroshot_topics-0.1.0/README.rst | README.rst |
# Zeroshot (Python)
Image classification for the masses
## Installation
Install via pip: `pip install zeroshot`
## Usage
First, go to usezeroshot.com and create a classifier. See [here]() for more instructions.
Then, in Python (`image` should be an RGB numpy array with channels last):
```python
import zeroshot
# Create the classifier and preprocessing function.
classifier = zeroshot.Classifier("your model string or path")
preprocess_fn = zeroshot.create_preprocess_fn()
# Run the model!
prediction = classifier.predict(preprocess_fn(image))
print(f"The image is class {prediction}")
```
## Read the docs
PUT DOCS HERE. | zeroshot | /zeroshot-0.1.3.tar.gz/zeroshot-0.1.3/README.md | README.md |
zerosms
==============
Send a text message via Way2SMS to your friends and family in India. Enter your India mobile number and sms text message as parameters. Your Free SMS sending to India will be delivered instantly
----
Description: zerosms
==============
It is python package to Send a text message via "Way2SMS"
Send a text message to your friends and family in India. Enter your India mobile number and sms text message as parameters. Your Free SMS sending to India will be delivered instantly
Requirements
============================
BeautifulSoup4
requests
urllib3 1.22
NOTE
============================
use Way2Sms site credentials to send sms and future message
----
Example Code
------------
Open Python Interpreter::
>>> import zerosms
>>> zerosms.sms(phno=phonenum,passwd=password,message='helloworld!!',receivernum=receiver mobile number)
>>> zerosms.futuresms(phno=phno,passwd=password,set_time='17:47',set_date='15/12/2017',receivernum=receiver mobile num,message='helloworld!!')
| zerosms | /zerosms-0.0.1.tar.gz/zerosms-0.0.1/README.rst | README.rst |
#### downloadable items
- [X] `zrc datasets:{pull,import,rm}`: list, pull, import and delete datasets archives
- [X] `zrc checkpoints:{pull,import,rm}`: list pull, import and delete checkpoint archives
- [X] `zrc samples:{pull,import,rm}`: list pull and, import delete samples archives
#### benchmarks
- [X] `zrc benchmarks`: list existing benchmarks
- [X] `zrc benchmarks:info`: details on each benchmark
- [X] `zrc benchmarks:run <name> <submission_dir>`: run a benchmark on a submission
- [X] `zrc benchmarks:run <name> -t <task_name>`: run only some tasks of a benchmark on a submission
- [X] `zrc benchmarks:run <name> -s <set_name>`: run a benchmark only on specific subsets of a submission
### Index
- [X] auto-update: auto update: conditions: if local file is older than a week, if remote file has been updated
- [X] manual update `zrc reset-index`
#### submissions
- [X] `zrc submission:init <benchmark_name> <submission_dir>`: create a submission directory
- [X] TODO: deduce benchmark from meta.yaml (remove it as argument to the command)
- [X] `zrc submission:params <submission_dir>`: show current parameters
- [X] `zrc submission:verify <submission_dir>`: validate a submission directory
# Submit
- [ ] `zrc submit <submission_dir> `: upload a submission from a directory
- [X] submission verification
- [X] leaderboard generation
- [X] zip_submission
- [X] upload zip (with resume function)
- [ ] TODO: connect to backend
#### user
- [X] `zrc user`
- [X] `zrc user:login`
- [X] `zrc user:clear`
#### Available Benchmark list
- [X] sLM21
- [X] abxLS
- [X] abx17
- [X] tde17
- [ ] tts019
- [ ] TODO: eval implementation
- [ ] Add dataset
- [ ] Add random submission
- [ ] TODO: Benchmark & task integration
- [ ] TODO: Submission
- [ ] TODO: Submission Validation
- [ ] TODO: Leaderboard & ScoreDir
#### potential extensions & plugins
- extension 0: vocolab_extension:
- [X] implement for leaderboard validation/management
- [ ] TODO: implement for submission validation
- extension 1 : extractors --> implement some basic extractor for the most used models
Extractor for CPC, Bert, LSTM, etc...
- extension 2 : infSim adaptor wrapper package
Wrapper module that allows to use this API to allow running benchmarks on infSim architecture
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/TODO.md | TODO.md |
# Zero Resource Challenge Benchmark Toolkit 
 [](https://badge.fury.io/py/zerospeech-benchmarks)  
This repository contains a toolbox assisting in running and handling all things related to the zerospeech benchmarks.
For more information on the [Zero Resource Challenge you can visit our website](https://zerospeech.com).
This toolbox allows to download all resources linked with the benchmarks (datasets, model checkpoints, samples, etc..),
to run the various benchmarks on your own submissions, and to upload the results to our website for them to be included in our leaderboards.
The available resources can be also found directly on our repository [download.zerospeech.com](https://download.zerospeech.com)
## Installation
The zerospeech benchmark toolbox is a python package, so you require a version of python installed on your system
before you can start.
You can use [miniconda](https://docs.conda.io/en/latest/miniconda.html) a
lightweight version of anaconda or any other way of installing python you prefer.
**Note that the package has been tested on python 3.8+, other versions are not recommended.**
Once python is installed you can install the package using :
```
pip install "zerospeech-benchmarks[all]"
```
If you are a conda user you can use our prepackaged environment :
```
conda env create coml/zrc-toolkit
```
To verify that the toolbox is installed correctly you can try `zrc version` which should print
the version information. If this is not the case you can open an issue with your errors on directly on our
[github](https://github.com/zerospeech/benchmarks/issues)
## Toolbox Usage
### Downloads
Installation location can be specified using the environment variable `APP_DIR`, in linux & macOS this can
be done in a terminal:
`$ export APP_DIR=/location/to/data`
By default, all data is saved in `$HOME/zr-data`
All temporary files are saved in `/tmp` this can be changed using the environment variable `TMP_DIR`
#### Download benchmark datasets
You can start by listing the available datasets using the command `zrc datasets` then you can download the
dataset you want using the command `zrc datasets:pull [dataset-name]`.
> When listing datasets the **Installed** column specifies whether the dataset has been downloaded.

Datasets are installed in the `$APP_DIR/datasets` folder.
To delete a dataset you can use the command `zrc dataset:rm [dataset-name]`
#### Download model checkpoints
The command `zrc checkpoints` allows you to list available checkpoints.
You can then download each set by typing `zrc checkpoints:pull [name]`
Checkpoints are installed in the `$APP_DIR/checkpoints` folder.

To delete the checkpoints you can use the command : `zrc checkpoints:rm [name]`
#### Download samples
The command `zrc samples` allows you to list the available samples.
You can then download each sample by typing `zrc samples:pull [name]`.
Samples are installed in the `$APP_DIR/samples` folder.

To delete a sample from your system you can use `zrc samples:rm [name]`
### Benchmarks
You can list available benchmarks by typing the `zrc benchmarks` command.
To create a submission you have to follow the instructions on each of our task pages
[Task1](https://zerospeech.com/tasks/task_1/tasks_goals/),
[Task2](https://zerospeech.com/tasks/task_2/tasks_goals/),
[Task3](https://zerospeech.com/tasks/task_3/tasks_goals/),
[Task4](https://zerospeech.com/tasks/task_4/tasks_goals/)
**Some older benchmarks may not be available straight away, but they will be added as soon as possible.**
Once the submission has been created you can run the benchmark on it
with the following command :
- `zrc benchmarks:run [name] [/path/to/submission] [...args]`
Some benchmarks are split into sub-tasks you can run partial tasks by using the following syntax:
- `zrc benchmarks:run sLM21 [/path/to/submission] -t lexical syntactic`
With this syntax we run the sLM21 benchmark our submission but only for the lexical and syntactic task and we omit the semantic.
In the same way we can also only run on the dev set (or the test) :
- `zrc benchmarks:run sLM21 [/path/to/submission] -s dev -t lexical syntactic`
We run the same tasks as previously but only on the dev set of the benchmark.
For information on each benchmark you can run the `zrc benchmarks:info [name]` command or visit the corresponding section on our website [zerospeech.com](https://zerospeech.com)
#### Submission Format
Each benchmark has a specific format that a submission has to follow, you can initialize a
submission directory by using the following syntax : `zrc submission:init [name] [/path/to/desired-location-for-submission]`, this will
create a set of folders in the architecture corresponding to the benchmark name selected.
For more detailed information on each benchmark you can see each Task page respectively.
Once all submission files have been created your can validate your submission to see if everything is working properly.
To do so use the following syntax : `zrc submission:verify [name] [/path/to/submission]` this will verify that all files
are set up correctly, or show informative errors if not.
**Note:** During benchmark evaluation the default behavior is to run validation on your submission, you can deactivate this by adding
the option `--skip-verification`.
### Submit 
The submit functionality allows uploading scores to our platform,
this helps us keep track of new models or new publications that happen using the benchmarks also we compile all the
scores into our leaderboards to be able to compare them.
> The submit functionality is a Work in progress, it will be available soon.
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/README.md | README.md |
# ABX-LS submission
The format of a submission is briefly explained here.
For a more detailed explanation go to our website [zerospeech.com](https://zerospeech.com)
## meta.yaml
## params.yaml
## /dev-{clean, other}, /test-{clean, other}
submission must contain the following subdirectories: `dev-clean`, `dev-other`, `test-clean` and `test-other`.
Each `.wav` file in the dataset must have its corresponding `.npy` file in the submission under the same directory structure.
For example the dataset file /path/to/dataset/phonetic/dev-clean/1272-128104-0000.wav must have its submitted
file /path/to/submission/phonetic/dev-clean/1272-128104-0000.npy.
> In the past .txt were used but binary npy files allow to reduce the size of the submission please prefer those
Each .npy file encodes a single 2D numpy array of floats, each line encoding one features frame. For example:
```
42.286527175400906 -107.68503050450957 59.79000088588511 -113.85831030071697
0.7872647311548775 45.33505222077471 -8.468742865224545 0
328.05422046327067 -4.495454384937348 241.186547397405 40.16161685378687
```
- The number of columns (the features dimension) must be constant across the files.
The number of lines depends on the speech sample duration.
- The frame shift (the shift between two successive frames) must be given in `params.yaml` along with the
metric used for evaluation of those features.
- Each array must contain at least 2 frames (i.e. each file must have at least 2 lines).
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/docs/abxLS.md | abxLS.md |
# sLM21 submission
The format of a submission is briefly explained here.
For a more detailed explanation go to our website [zerospeech.com](https://zerospeech.com)
## meta.yaml
A yaml file specifying various information about the submission.
## params.yaml
A yaml file specifying various runtime parameters :
- quiet: a boolean specifying if the console needs to not print information and loading bars
- semantic/librispeech: a boolean specifying if the use of semantic subset librispeech is to be used
- semantic/synthetic: a boolean specifying if the use of semantic subset synthetic is to be used
- semantic/metric: a string specifying which metric function to use (any metric supported by scipy.spatial.distance.cdist is supported)
- semantic/pooling: pooling method used (must be 'min','max', 'mean', 'sum', 'last' or 'lastlast')
- n_jobs: how many processes to use to speed-up certain parallelized parts of evaluation
## /lexical and /syntactic
The /lexical and /syntactic folders of the submission must contain the two files dev.txt and test.txt. For each *.wav file in the dataset must correspond a line either in dev.txt or test.txt with its corresponding pseudo-probability (order does not matter). For example if the dev dataset contains:
```
/path/to/dataset/lexical/dev
├── aAAfmkmQpVz.wav
├── AaaggUZsvkR.wav
├── aAakhKfuvQI.wav
├── aAaOswLeeBL.wav
├── AaasVuoMJnS.wav
```
The submitted file dev.txt must contain entries like:
```
aAAfmkmQpVz -313.37445068359375
AaaggUZsvkR -447.8950500488281
aAakhKfuvQI -383.8902587890625
aAaOswLeeBL -430.2048645019531
AaasVuoMJnS -356.9426574707031
```
## /semantic
The semantic folder of the submission must contain the following subdirectories: dev/synthetic, dev/librispeech, test/synthtic and test/librispeech.
Each .wav file in the dataset must have its corresponding .txt file in the submission under the same directory structure. For example the dataset file /path/to/dataset/semantic/dev/synthetic/aAbcsWWKCz.wav must have its submitted file /path/to/submission/semantic/dev/synthetic/aAbcsWWKCz.txt.
Each .txt file encodes a single 2D numpy array of floats, each line encoding one features frame. For example:
```
42.286527175400906 -107.68503050450957 59.79000088588511 -113.85831030071697
0.7872647311548775 45.33505222077471 -8.468742865224545 0
328.05422046327067 -4.495454384937348 241.186547397405 40.16161685378687
```
The number of columns (the features dimension) must be constant across the files. The number of lines depends on the speech sample duration.
The metric and pooling method used for evaluation must be specified in params.yaml | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/docs/prosAudit.md | prosAudit.md |
# sLM21 submission
The format of a submission is briefly explained here.
For a more detailed explanation go to our website [zerospeech.com](https://zerospeech.com)
## meta.yaml
A yaml file specifying various information about the submission.
## params.yaml
A yaml file specifying various runtime parameters :
- quiet: a boolean specifying if the console needs to not print information and loading bars
- semantic/librispeech: a boolean specifying if the use of semantic subset librispeech is to be used
- semantic/synthetic: a boolean specifying if the use of semantic subset synthetic is to be used
- semantic/metric: a string specifying which metric function to use (any metric supported by scipy.spatial.distance.cdist is supported)
- semantic/pooling: pooling method used (must be 'min','max', 'mean', 'sum', 'last' or 'lastlast')
- n_jobs: how many processes to use to speed-up certain parallelized parts of evaluation
## /lexical and /syntactic
The /lexical and /syntactic folders of the submission must contain the two files dev.txt and test.txt. For each *.wav file in the dataset must correspond a line either in dev.txt or test.txt with its corresponding pseudo-probability (order does not matter). For example if the dev dataset contains:
```
/path/to/dataset/lexical/dev
├── aAAfmkmQpVz.wav
├── AaaggUZsvkR.wav
├── aAakhKfuvQI.wav
├── aAaOswLeeBL.wav
├── AaasVuoMJnS.wav
```
The submitted file dev.txt must contain entries like:
```
aAAfmkmQpVz -313.37445068359375
AaaggUZsvkR -447.8950500488281
aAakhKfuvQI -383.8902587890625
aAaOswLeeBL -430.2048645019531
AaasVuoMJnS -356.9426574707031
```
## /semantic
The semantic folder of the submission must contain the following subdirectories: dev/synthetic, dev/librispeech, test/synthtic and test/librispeech.
Each .wav file in the dataset must have its corresponding .txt file in the submission under the same directory structure. For example the dataset file /path/to/dataset/semantic/dev/synthetic/aAbcsWWKCz.wav must have its submitted file /path/to/submission/semantic/dev/synthetic/aAbcsWWKCz.txt.
Each .txt file encodes a single 2D numpy array of floats, each line encoding one features frame. For example:
```
42.286527175400906 -107.68503050450957 59.79000088588511 -113.85831030071697
0.7872647311548775 45.33505222077471 -8.468742865224545 0
328.05422046327067 -4.495454384937348 241.186547397405 40.16161685378687
```
The number of columns (the features dimension) must be constant across the files. The number of lines depends on the speech sample duration.
The metric and pooling method used for evaluation must be specified in params.yaml | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/docs/sLM21.md | sLM21.md |
```
""" Download the benchmark dataset """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.datasets import DatasetsDir
from zerospeech.benchmarks.datasets import AbxLSDataset
# Load and download AbxLs dataset
datasets_dir = DatasetsDir.load()
dataset = datasets_dir.get("abxLS-dataset", cls=AbxLSDataset)
if not dataset.installed:
dataset.pull()
""" Download a sample submission (Optional) """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.samples import SamplesDir
# Load sample dir
sample_dir = SamplesDir.load()
# Download abxLS randomly generated submission
random_sub = sample_dir.get("abxLS-random-submission")
if not random_sub.installed:
random_sub.pull()
# Download the bert baseline
bert_baseline_sub = sample_dir.get("abxLS-baseline-bert-submission")
if not bert_baseline_sub.installed:
bert_baseline_sub.pull()
# Download the lstm baseline
lstm_baseline_sub = sample_dir.get("abxLS-baseline-lstm-submission")
if not lstm_baseline_sub.installed:
lstm_baseline_sub.pull()
""" Evaluate a submission """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.benchmarks import abx_LS
from pathlib import Path
# Path to your submission
sub_location: Path = random_sub.location
# load submission from disk
submission_obj = abx_LS.AbxLSSubmission.load(random_sub.location)
# load benchmark
benchmark = abx_LS.AbxLSBenchmark()
# run benchmark
benchmark.run(submission_obj)
# Results are in sub_location / 'scores' dir by default
```
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/examples/benchmark_examples/abxLS.ipynb | abxLS.ipynb |
```
""" Download the benchmark dataset """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.datasets import DatasetsDir
from zerospeech.benchmarks.datasets import ProsAuditLMDataset
# Load and download the ProsAudit Dataset
dataset_dir = DatasetsDir.load()
dataset = dataset_dir.get('prosaudit-dataset', cls=ProsAuditLMDataset)
if not dataset.installed:
dataset.pull()
""" Evaluate a submission """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.benchmarks import pros_audit
from pathlib import Path
# Path to your submission
sub_location: Path = ...
# load submission
submission_obj = pros_audit.ProsodySubmission.load(sub_location)
# Optional validation
if not submission_obj.valid:
raise ValueError('submission is not valid')
# load benchmark
benchmark = pros_audit.SLMProsodyBenchmark()
# run benchmark
benchmark.run(submission_obj)
# Results are in sub_location / 'scores' dir by default
```
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/examples/benchmark_examples/prosAudit.ipynb | prosAudit.ipynb |
```
""" Download the benchmark dataset """
%env APP_DIR=~/zr-data
%env TMP_DIR=/tmp
from zerospeech.model.datasets import DatasetsDir
from zerospeech.benchmarks.datasets import SLM21Dataset
# Load and download AbxLs dataset
datasets_dir = DatasetsDir.load()
dataset = datasets_dir.get("sLM21-dataset", cls=SLM21Dataset)
if not dataset.installed:
dataset.pull()
""" Download a sample submission (Optional) """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.samples import SamplesDir
# Load sample dir
sample_dir = SamplesDir.load()
# Download randomly generated submission
random_sub = sample_dir.get("sLM-random-submission")
if not random_sub.installed:
random_sub.pull()
# Download the bert baseline
bert_baseline_sub = sample_dir.get("sLM-baseline-bert-submission")
if not bert_baseline_sub.installed:
bert_baseline_sub.pull()
# Download the lstm baseline
lstm_baseline_sub = sample_dir.get("sLM-baseline-lstm-submission")
if not lstm_baseline_sub.installed:
lstm_baseline_sub.pull()
""" Evaluate a submission """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.benchmarks import sLM_21
from pathlib import Path
# Path to your submission
sub_location: Path = random_sub.location
# load submission from disk
submission_obj = sLM_21.SLM21Submission.load(sub_location)
# load benchmark
benchmark = sLM_21.SLM21Benchmark()
# run benchmark
benchmark.run(submission_obj)
# Results are in sub_location / 'scores' dir by default
```
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/examples/benchmark_examples/sLM21.ipynb | sLM21.ipynb |
```
""" Download the benchmark dataset """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.datasets import DatasetsDir
from zerospeech.benchmarks.datasets import ZRC2017Dataset
# Load and download AbxLs dataset
datasets_dir = DatasetsDir.load()
dataset = datasets_dir.get("zrc2017-test-dataset", cls=ZRC2017Dataset)
if not dataset.installed:
dataset.pull()
""" Download a sample submission (Optional) """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
# from zerospeech.model.samples import SamplesDir
# Load sample dir
# sample_dir = SamplesDir.load()
# todo: create sample submissions for abx17
# sample_itm = sample_dir.get("....")
# if not sample_itm.installed:
# sample_itm.pull()
""" Evaluate a submission """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.benchmarks import abx_17
from pathlib import Path
# Path to your submission
sub_location: Path = ...
# load submission from disk
submission_obj = abx_17.ABX17Submission.load(sub_location)
# load benchmark
benchmark = abx_17.ABX17Benchmark()
# run benchmark
benchmark.run(submission_obj)
# Results are in sub_location / 'scores' dir by default
```
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/examples/benchmark_examples/abx17.ipynb | abx17.ipynb |
```
""" Download the benchmark dataset """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
from zerospeech.model.datasets import DatasetsDir
from zerospeech.benchmarks.datasets import ZRC2017Dataset
# Load and download AbxLs dataset
datasets_dir = DatasetsDir.load()
dataset = datasets_dir.get("zrc2017-test-dataset", cls=ZRC2017Dataset)
if not dataset.installed:
dataset.pull()
""" Download a sample submission (Optional) """
%env APP_DIR=zr-data
%env TMP_DIR=/tmp
# from zerospeech.model.samples import SamplesDir
# Load sample dir
# sample_dir = SamplesDir.load()
# todo: create sample submissions for tde17
# sample_itm = sample_dir.get("....")
# if not sample_itm.installed:
# sample_itm.pull()
%env APP_DIR=zr-data
from zerospeech.benchmarks import tde_17
from pathlib import Path
# Path to your submission
sub_location: Path = ...
# load submission from disk
submission_obj = tde_17.TDE17Submission.load(sub_location)
# load benchmark
benchmark = tde_17.TDE17Benchmark()
# run benchmark
benchmark.run(submission_obj)
# Results are in sub_location / 'scores' dir by default
```
| zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/examples/benchmark_examples/tde17.ipynb | tde17.ipynb |
import contextlib
from types import TracebackType
from typing import IO, Type, AnyStr, Iterator, Iterable, Union, Optional, List, Generator
from rich.console import Console
from rich.progress import (
Progress, TextColumn, BarColumn, TaskProgressColumn, TimeElapsedColumn,
FileSizeColumn, TotalFileSizeColumn, SpinnerColumn
)
from rich.table import Column
class DevNull(IO[str]):
""" Class emulating /dev/null functionality """
def close(self) -> None:
""" /dev/null no interaction needed """
pass
def fileno(self) -> int:
""" /dev/null no interaction needed """
pass
def flush(self) -> None:
""" /dev/null no interaction needed """
pass
def isatty(self) -> bool:
""" /dev/null no interaction needed """
pass
def read(self, __n: int = ...) -> AnyStr:
""" /dev/null no interaction needed """
pass
def readable(self) -> bool:
""" /dev/null no interaction needed """
pass
def readline(self, __limit: int = ...) -> AnyStr:
""" /dev/null no interaction needed """
pass
def readlines(self, __hint: int = ...) -> List[AnyStr]:
""" /dev/null no interaction needed """
pass
def seek(self, __offset: int, __whence: int = ...) -> int:
""" /dev/null no interaction needed """
pass
def seekable(self) -> bool:
""" /dev/null no interaction needed """
pass
def tell(self) -> int:
""" /dev/null no interaction needed """
pass
def truncate(self, __size: Union[int, None] = ...) -> int:
""" /dev/null no interaction needed """
pass
def writable(self) -> bool:
""" /dev/null no interaction needed """
pass
def writelines(self, __lines: Iterable[AnyStr]) -> None:
""" /dev/null no interaction needed """
pass
def __next__(self) -> AnyStr:
""" /dev/null no interaction needed """
pass
def __iter__(self) -> Iterator[AnyStr]:
""" /dev/null no interaction needed """
pass
def __enter__(self) -> IO[AnyStr]:
""" /dev/null no interaction needed """
pass
def __exit__(self, __t: Optional[Type[BaseException]], __value: Optional[BaseException],
__traceback: Optional[TracebackType]) -> Optional[bool]:
""" /dev/null no interaction needed """
pass
def write(self, *_):
""" /dev/null no interaction needed """
pass
console = Console(log_time_format="[info]")
warning_console = Console(stderr=True, style="bold yellow", log_time_format="[warning]")
error_console = Console(stderr=True, style="bold red", log_time_format="[error]")
void_console = Console(file=DevNull())
@contextlib.contextmanager
def with_progress(show: bool = True, file_transfer: bool = False, spinner: bool = False) -> Generator[
Progress, None, None]:
if show:
con = console
else:
con = void_console
bar_items = [
TextColumn("{task.description}", table_column=Column(ratio=1)),
BarColumn(bar_width=None, table_column=Column(ratio=2))
]
if file_transfer:
bar_items.append(FileSizeColumn())
bar_items.append(TotalFileSizeColumn())
else:
bar_items.append(TaskProgressColumn())
if spinner:
bar_items.append(SpinnerColumn())
bar_items.append(TimeElapsedColumn())
progress = Progress(*bar_items, console=con, expand=True, transient=True)
with progress:
yield progress | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/out.py | out.py |
import atexit
import functools
import os
import shutil
import tempfile
from datetime import datetime, timedelta
from functools import lru_cache
from pathlib import Path
from tempfile import gettempdir
from typing import Dict, Tuple, Any, Optional, Union, Callable
from pydantic import (
BaseSettings,
AnyHttpUrl,
parse_obj_as,
validator,
DirectoryPath,
Field,
EmailStr, BaseModel,
)
StrOrCallable = Union[str, Callable[..., str]]
API_URL = os.environ.get('_DEV_API_URL', 'https://api.cognitive-ml.fr')
class Token(BaseModel):
""" Dataclass defining a session token"""
username: str
access_token: str
token_type: str
expiry: datetime = Field(default_factory=lambda: datetime.now() + timedelta(days=5))
def is_expired(self) -> bool:
return datetime.now() > self.expiry
class ZerospeechAPI(BaseModel):
client_id: str = "zrc-commandline-benchmark-tool"
client_secret: str = "wIBhXvNDTZ2xtDh3k0MJGWx+dAFohlKkGfFwV101CWo="
API_URL: AnyHttpUrl = parse_obj_as(AnyHttpUrl, API_URL)
API_ROUTES = {
"user_login": '/auth/login',
"benchmark_info": functools.partial(
lambda benchmark_id: f'/benchmarks/{benchmark_id}/info'
),
"user_info": functools.partial(lambda username: f'/users/{username}/profile'),
"new_model": functools.partial(
lambda username, author_name: f'/users/{username}/models/create?author_name={author_name}'),
"new_submission": functools.partial(lambda username: f'/users/{username}/submissions/create'),
"submission_content_add": functools.partial(
lambda submission_id, part_name: f'/submissions/{submission_id}/content/add?part_name={part_name}'),
"submission_status": functools.partial(
lambda submission_id: f"/submissions/{submission_id}/content/status"
)
}
@staticmethod
def build_api_headers(token: Optional[Token]):
""" Build correct headers for connecting with the zerospeech API"""
if token is None:
return dict()
if token.is_expired():
raise ValueError('token is expired, please create a new session')
headers = {}
if token.token_type == 'bearer':
headers['Authorization'] = f"Bearer {token.access_token}"
return headers
def request_params(self, route_name: str, token: Optional[Token] = None, **kwargs) -> Tuple[StrOrCallable, Dict[str, Any]]:
""" Build params for sending request to api """
sub_route = self.API_ROUTES.get(route_name, None)
if sub_route is None:
raise ValueError(f'route {route_name} does not exist')
if callable(sub_route):
sub_route = sub_route(**kwargs)
route_url = f"{self.API_URL}{sub_route}"
return route_url, self.build_api_headers(token)
class ZerospeechBenchmarkSettings(BaseSettings):
APP_DIR: Path = Path.home() / "zr-data"
TMP_DIR: DirectoryPath = Path(gettempdir())
repo_origin: AnyHttpUrl = parse_obj_as(
AnyHttpUrl, "https://download.zerospeech.com/repo.json"
)
admin_email: EmailStr = parse_obj_as(EmailStr, "[email protected]")
api: ZerospeechAPI = ZerospeechAPI()
@validator("repo_origin", pre=True)
def cast_url(cls, v):
""" Cast strings to AnyHttpUrl """
if isinstance(v, str):
return parse_obj_as(AnyHttpUrl, v)
return v
@property
def dataset_path(self) -> Path:
""" Path to dataset storage folder """
return self.APP_DIR / "datasets"
@property
def samples_path(self) -> Path:
""" Path to samples storage folder """
return self.APP_DIR / "samples"
@property
def checkpoint_path(self) -> Path:
""" Path to checkpoint folder """
return self.APP_DIR / "checkpoints"
@property
def repository_index(self) -> Path:
""" Path to local repository index """
return self.APP_DIR / "repo.json"
@property
def user_credentials(self):
return self.APP_DIR / "creds.json"
@property
def submit_available_url(self) -> AnyHttpUrl:
""" URL to check if submit is available """
return parse_obj_as(
AnyHttpUrl, f"{str(self.api.API_URL)}/_private/submit-available"
)
def mkdtemp(self, auto_clean: bool = True) -> Path:
tmp_loc = Path(tempfile.mkdtemp(prefix="zeroC", dir=self.TMP_DIR))
def clean_tmp(d):
shutil.rmtree(d)
if auto_clean:
# create an auto-clean action
atexit.register(clean_tmp, d=tmp_loc)
return tmp_loc
@lru_cache()
def get_settings() -> ZerospeechBenchmarkSettings:
""" Build & return global settings """
env_file = os.environ.get("ZR_ENV", None)
if env_file:
return ZerospeechBenchmarkSettings(
_env_file=env_file, _env_file_encoding="utf-8"
)
return ZerospeechBenchmarkSettings() | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/settings.py | settings.py |
import _thread as thread
import contextlib
import io
import json
import re
import sys
import tarfile
import threading
import urllib.parse
from pathlib import Path
from typing import Dict, List, Union, Optional, Protocol
from zipfile import ZipFile
import requests
from Crypto.Hash import MD5 # noqa: the package name is not the same
# from datasize import DataSize todo: find out why this was deleted ? maybe on unpushed thing on laptop?
from pydantic import ByteSize, BaseModel
try:
import yaml
except ImportError:
yaml = None
try:
import tomli # noqa: is not a strict requirement
except ImportError:
tomli = None
from .out import with_progress, void_console, console
from .settings import get_settings
st = get_settings()
def exit_after(s):
""" Decorator that kills function after s number of seconds
Usage:
@exit_after(10)
def f():
# complex computation
try:
f()
except KeyboardInterrupt:
print('Function f could not finish in 10 seconds and was interrupted')
"""
def process_quit():
""" Raise a Keyboard interrupt"""
thread.interrupt_main() # raises KeyboardInterrupt
def outer(fn):
def inner(*args, **kwargs):
"""
Uses a timer from threading module to raise a KeyboardInterrupt after s seconds.
"""
timer = threading.Timer(s, process_quit)
timer.start()
try:
result = fn(*args, **kwargs)
finally:
""" Cancel timer if function finished processing """
timer.cancel()
return result
return inner
return outer
class ContextualItem(Protocol):
""" Item providing context to exceptions """
def print(self, allow_warnings: bool = False):
""" protocol function allowing to print context """
pass
class ContextualException(Exception):
""" Custom exception providing a context """
def __init__(self, msg: str, ctx: Optional[ContextualItem] = None):
self._context: ContextualItem = ctx
super().__init__(msg)
def print_context(self, allow_warnings: bool = False):
""" Prints the current context """
if self._context:
self._context.print(allow_warnings)
class ScoresNotFound(ContextualException):
pass
class MetaYamlNotValid(ContextualException):
pass
class InvalidSubmissionError(ContextualException):
pass
class SizeUnit(BaseModel):
__root__: ByteSize
@property
def human_readable(self):
return self.__root__.human_readable(decimal=True)
@property
def as_bytes(self):
return self.__root__
def load_obj(location: Path) -> Union[Dict, List]:
""" Loads an object from standard formats (.json, yaml, ...) to a standard structure (Dict, List)"""
with location.open() as fp, location.open('rb') as bfp:
if location.suffix == '.json':
return json.load(fp)
elif yaml and location.suffix in ('.yaml', '.yml'):
return yaml.load(fp, Loader=yaml.FullLoader)
elif tomli and location.suffix in ('.toml', '.tml'):
return tomli.load(bfp)
elif location.suffix in ('.txt', '.list'):
return fp.readlines()
else:
raise ValueError('File of unknown format !!')
def md5sum(file_path: Path, chunk_size: int = 8192):
""" Return a md5 hash of a files content """
h = MD5.new()
with file_path.open('rb') as f:
while True:
chunk = f.read(chunk_size)
if len(chunk):
h.update(chunk)
else:
break
return h.hexdigest()
def unzip(archive: Path, output: Path):
""" Unzips contents of a zip archive into the output directory """
# create folder if it does not exist
output.mkdir(exist_ok=True, parents=True)
# open & extract
with ZipFile(archive, 'r') as zipObj:
zipObj.extractall(output)
def untar(archive: Path, output: Path):
""" Extract a tar archive (supports gzipped format) into the output directory"""
# create folder if it does not exist
output.mkdir(exist_ok=True, parents=True)
# Open & extract
with tarfile.open(archive, 'r') as tar:
tar.extractall(path=output)
def extract(archive: Path, output: Path):
""" Extract an archive into the output directory """
if archive.suffix in ('.zip',):
unzip(archive, output)
elif archive.suffix in ('.tar', '.gz', '.tgz', '.bz2', '.tbz2', '.xz', '.txz'):
untar(archive, output)
else:
raise ValueError(f'{archive.suffix}: Unsupported archive format')
def zip_folder(archive_file: Path, location: Path):
""" Create a zip archive from a folder """
with ZipFile(archive_file, 'w') as zip_obj:
for file in filter(lambda x: x.is_file(), location.rglob("*")):
zip_obj.write(file, str(file.relative_to(location)))
def get_request_filename(response: requests.Response) -> str:
""" Get filename from response """
if "Content-Disposition" in response.headers.keys():
return re.findall("filename=(.+)", response.headers["Content-Disposition"])[0]
else:
return Path(urllib.parse.unquote(response.url)).name
def download_extract_archive(
archive_url: str, target_location: Path, size_in_bytes: int, *, filename: str = "",
md5sum_hash: str = "", quiet: bool = False, show_progress: bool = True,
):
tmp_dir = st.mkdtemp()
response = requests.get(archive_url, stream=True)
tmp_filename = tmp_dir / get_request_filename(response)
if quiet:
_console = void_console
show_progress = False
else:
_console = console
with with_progress(show=show_progress, file_transfer=True) as progress:
total = int(size_in_bytes)
task1 = progress.add_task(f"[red]Downloading {filename}...", total=total)
with tmp_filename.open("wb") as stream:
for chunk in response.iter_content(chunk_size=1024):
stream.write(chunk)
progress.update(task1, advance=1024)
progress.update(task1, completed=total, visible=False)
_console.print("[green]Download completed Successfully!")
if md5sum_hash != "":
with _console.status("[red]Verifying md5sum from repository..."):
h = md5sum(tmp_filename)
if h == md5sum_hash:
_console.print("[green]MD5 sum verified!")
else:
_console.print("[green]MD5sum Failed, Check with repository administrator.\nExiting...")
sys.exit(1)
with _console.status("[red]Extracting archive..."):
extract(tmp_filename, target_location)
def symlink_dir_contents(source: Path, dest: Path):
""" create symlinks of all content in a directory into another """
dest.mkdir(exist_ok=True, parents=True)
for item in source.iterdir():
(dest / item.name).symlink_to(item, target_is_directory=item.is_dir())
def download_file(url: str, dest: Path):
""" Download a file from a given URL """
response = requests.get(url, allow_redirects=True)
with dest.open('wb') as fb:
fb.write(response.content)
@contextlib.contextmanager
def nostdout():
""" Redirect stdout to /dev/null """
save_stdout = sys.stdout
sys.stdout = io.BytesIO()
yield
sys.stdout = save_stdout | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/misc.py | misc.py |
import argparse
import sys
from pathlib import Path
from rich.padding import Padding
from rich.table import Table
from zerospeech.generics import samples
from zerospeech.misc import md5sum, extract
from zerospeech.networkio import check_update_repo_index, update_repo_index
from zerospeech.out import console, error_console, void_console, warning_console
from zerospeech.settings import get_settings
from .cli_lib import CMD
st = get_settings()
class SamplesCMD(CMD):
""" Manipulate Samples """
COMMAND = "samples"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("--local", action="store_true", help="List local checkpoint only")
def run(self, argv: argparse.Namespace):
samples_dir = samples.SamplesDir.load()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Name")
table.add_column("Origin")
table.add_column("Size")
table.add_column("Installed")
if argv.local:
dt_list = samples_dir.items
else:
dt_list = samples_dir.available_items
for d in dt_list:
dts = samples_dir.get(d)
if dts.origin.type == 'internal':
host = st.repo_origin.host
else:
host = "external"
table.add_row(
dts.name, host, dts.origin.size_label, f"{dts.installed}"
)
console.print(Padding(f"==> RootDir: {samples_dir.root_dir}", (1, 0, 1, 0), style="bold grey70", expand=False))
console.print(table)
class PullSampleCMD(CMD):
""" Download a sample """
COMMAND = "pull"
NAMESPACE = "samples"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
parser.add_argument('-u', '--skip-verification', action='store_true', help="Skip archive verification")
parser.add_argument('-q', '--quiet', action='store_true', help='Suppress download info output')
def run(self, argv: argparse.Namespace):
# update repo index if necessary
if check_update_repo_index():
update_repo_index()
sample_dir = samples.SamplesDir.load()
sample_itm = sample_dir.get(argv.name, cls=samples.SampleItem)
sample_itm.pull(quiet=argv.quiet, show_progress=True, verify=not argv.skip_verification)
class ImportSamples(CMD):
""" Import a sample from a zip archive """
COMMAND = "import"
NAMESPACE = "samples"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("zip_file")
parser.add_argument('-u', '--skip-verification', action='store_true',
help='Do not check hash in repo index.')
parser.add_argument('-q', '--quiet', action='store_true',
help='Suppress download info output')
def run(self, argv: argparse.Namespace):
# update repo index if necessary
if check_update_repo_index():
update_repo_index()
sample_dir = samples.SamplesDir.load()
archive = Path(argv.zip_file)
std_out = console
if argv.quiet:
std_out = void_console
if not archive.is_file() and archive.suffix != '.zip':
error_console.print(f'Given archive ({archive}) does not exist or is not a valid zip archive !!!')
sys.exit(1)
if not argv.skip_verification:
with std_out.status(f'Hashing {archive.name}'):
md5hash = md5sum(archive)
item = sample_dir.find_by_hash(md5hash)
if item is None:
error_console.print(f'Archive {archive.name} does not correspond to a registered sample')
sys.exit(1)
name = item.name
std_out.print(f"[green]Sample {name} detected")
else:
name = archive.stem
warning_console.print(f"Importing {name} without checking, could be naming/file mismatch")
# unzip sample
with std_out.status(f"Unzipping {name}..."):
extract(archive, sample_dir.root_dir / name)
std_out.print(f"[green]Sample {name} installed successfully !!")
class RemoveSampleCMD(CMD):
""" Remove a sample item """
COMMAND = "rm"
NAMESPACE = "samples"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
def run(self, argv: argparse.Namespace):
sample_dir = samples.SamplesDir.load()
smp = sample_dir.get(argv.name)
if smp:
smp.uninstall()
console.log("[green] Sample uninstalled successfully !")
else:
error_console.log(f"Failed to find sample named :{argv.name}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/samples.py | samples.py |
import argparse
import sys
from pathlib import Path
from rich.markdown import Markdown
from zerospeech.benchmarks import BenchmarkList
from zerospeech.out import error_console, warning_console
from zerospeech.submissions import show_errors
from .cli_lib import CMD
class BenchmarksCMD(CMD):
""" List available benchmarks """
COMMAND = "benchmarks"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
""" No extra arguments"""
pass
# noinspection PyUnresolvedReferences
def run(self, argv: argparse.Namespace):
markdown_text = """#### List of Benchmarks\n\n"""
for nb, bench in enumerate(BenchmarkList):
markdown_text += f"{nb + 1}) **{bench.value}**\n\n"
markdown_text += f"\t{len(bench.value) * '='} documentation ===> [{bench.doc_url}]({bench.doc_url})\n"
self.console.print(Markdown(markdown_text))
class BenchmarkRunCMD(CMD):
""" Run a benchmark """
COMMAND = "run"
NAMESPACE = "benchmarks"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("name")
parser.add_argument("submission_dir")
parser.add_argument('--skip-validation', action="store_true", help="Skip the validation of submission")
parser.add_argument("-s", "--sets", nargs='*', action='store', default=('all',),
help="Limit the sets the benchmark is run on")
parser.add_argument("-t", "--tasks", nargs='*', action='store', default=('all',),
help="Limit the tasks the benchmark runs")
parser.add_argument('-q', '--quiet', action='store_true', default=False,
help="Do not print information to stdout")
def run(self, argv: argparse.Namespace):
try:
benchmark_type = BenchmarkList(argv.name)
except ValueError:
error_console.log(f"Specified benchmark ({argv.name}) does not exist !!!!")
warning_console.log(f"Use one of the following : {','.join(b for b in BenchmarkList)}")
sys.exit(1)
# Load benchmark
benchmark = benchmark_type.benchmark(quiet=argv.quiet)
spinner = self.console.status("Loading submission !")
spinner.start()
sub_dir = Path(argv.submission_dir)
if not sub_dir.is_dir():
error_console.log("Submission directory given does not exist !!!")
sys.exit(1)
load_args = {}
if 'all' not in argv.sets and len(argv.sets) > 0:
load_args['sets'] = argv.sets
if 'all' not in argv.tasks and len(argv.sets) > 0:
load_args['tasks'] = argv.tasks
submission = benchmark.load_submission(location=sub_dir, **load_args)
spinner.stop()
self.console.print(":heavy_check_mark: Submission loaded successfully", style="bold green")
if not argv.skip_validation:
with self.console.status("Validating submission... ", spinner="aesthetic"):
if not submission.valid:
error_console.print(f"Found Errors in submission: {submission.location}")
show_errors(submission.validation_output)
sys.exit(1)
self.console.print(":heavy_check_mark: Submission Valid", style="bold green")
# load saved parameters
self.console.print(f"Loaded parameters from :arrow_right: {submission.params_file}")
submission.params_obj = submission.load_parameters()
# update values from args
submission.params.quiet = argv.quiet
# run benchmark
benchmark.run(submission)
class BenchmarksInfoCMD(CMD):
""" List information on a benchmark """
COMMAND = "info"
NAMESPACE = "benchmarks"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("name")
def run(self, argv: argparse.Namespace):
try:
bench = BenchmarkList(argv.name)
except ValueError:
error_console.log(f"Specified benchmark ({argv.name}) does not exist !!!!")
warning_console.log(f"Use one of the following : {','.join(b for b in BenchmarkList)}")
sys.exit(1)
# print benchmark documentation
self.console.print(bench.benchmark.docs()) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/benchmarks.py | benchmarks.py |
import argparse
import sys
from pathlib import Path
from zerospeech.out import error_console, console as std_console
from zerospeech.settings import get_settings
from zerospeech.upload import SubmissionUploader, APIHTTPException, BenchmarkClosedError
from .cli_lib import CMD
st = get_settings()
class SubmitOnline(CMD):
""" Submit your results to zerospeech.com """
COMMAND = "submit"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('-r', '--resume', action='store_true',
help='Try resuming submission from given directory')
parser.add_argument('-q', '--quiet', action='store_true',
help="Do not print status information")
parser.add_argument('-m', '--multipart', action='store_true',
help='Upload archive in multiple parts (better for large submissions)')
parser.add_argument('submission_dir', type=Path,
help="The directory containing the submission")
def run(self, argv: argparse.Namespace):
std_console.print("Feature Not Yet Available !!!", style="red bold")
def _run(self, argv: argparse.Namespace):
try:
if argv.resume:
uploader = SubmissionUploader.resume(Path(argv.submission_dir), quiet=argv.quiet)
else:
uploader = SubmissionUploader.from_submission(
submission=Path(argv.submission_dir),
quiet=argv.quiet,
multipart=argv.multipart
)
if not uploader.ready:
error_console.print("Oups :: Submission failed to prepare for upload, please try again !!!")
sys.exit(1)
# Upload
uploader.upload()
# clean-up
uploader.clean()
except APIHTTPException as e:
error_console.print(e)
sys.exit(1)
except BenchmarkClosedError as e:
error_console.print(e)
sys.exit(1) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/submit.py | submit.py |
import abc
import argparse
import sys
import uuid
from collections import namedtuple
from typing import Optional, Type
from treelib import Tree, Node
from zerospeech.out import console, void_console
NAMESPACE_SEP = ":"
LIST_OF_COMMANDS = []
class CMD(abc.ABC):
COMMAND = "<cmd_name>"
NAMESPACE = "<cmd-path>"
quiet: bool = False
def __init__(self, root):
self._unique_id = f"{uuid.uuid4()}"
self.__check_presets__()
prog = f"{root} {self.NAMESPACE}{NAMESPACE_SEP}{self.COMMAND}"
if self.NAMESPACE == '':
prog = f"{root} {self.COMMAND}"
self.parser = argparse.ArgumentParser(
prog=prog,
usage=f"{prog}[{NAMESPACE_SEP}subcommand] [<args>]",
formatter_class=argparse.RawTextHelpFormatter
)
# load description
if self.long_description:
self.parser.description = self.long_description
else:
self.parser.description = self.short_description
@classmethod
def __init_subclass__(cls, /, **kwargs):
super().__init_subclass__(**kwargs)
# append to global list for autodiscover
LIST_OF_COMMANDS.append(cls)
def __check_presets__(self):
""" Verify that subclass sets default parameters """
assert self.COMMAND != "<cmd_name>", "Command not set in class"
assert self.NAMESPACE != "<cmd-path>", "Command path not set in class"
@property
def short_description(self):
return self.__doc__
@property
def console(self):
if self.quiet:
return void_console
return console
@property
def long_description(self):
return self.run.__doc__
@property
def label(self):
return f"{self.COMMAND}:\t{self.short_description}"
def add_epilog(self, child_info):
# todo check if this works
self.parser.epilog = child_info
@property
def name(self) -> str:
return self.COMMAND
@property
def id(self) -> str:
return self._unique_id
def run_cmd(self, argv=None):
""" """
self.init_parser(self.parser)
# todo add argument completion
# argcomplete.autocomplete(parser)
argv = argv if argv is not None else sys.argv[1:]
args = self.parser.parse_args(argv)
# if quiet mode is set propagate it
self.quiet = getattr(args, 'quiet', False)
# run command
self.run(args)
@abc.abstractmethod
def init_parser(self, parser: argparse.ArgumentParser):
pass
@abc.abstractmethod
def run(self, argv: argparse.Namespace):
pass
class CommandTree:
__RootNodeLabel = namedtuple('__RootNodeLabel', 'label')
help_commands = ['help', 'list', 'commands', '--help', '-h']
autocomplete = '__all_cmd__'
autocomplete_fn = '__auto_fn__'
path_separator = NAMESPACE_SEP
def is_help_cmd(self, cmd):
""" Check if command is in help list """
return cmd in self.help_commands
def is_auto_fn(self, cmd):
""" Check if command is autocomplete_fn """
return cmd == self.autocomplete_fn
def is_autocomplete(self, cmd):
"""Check if command is autocomplete """
return cmd == self.autocomplete
def __init__(self, root_cmd: str, auto_discover: bool = True):
self.root_cmd = root_cmd
self.__cmd_tree = Tree()
self.__cmd_tree.create_node('.', 0, data=self.__RootNodeLabel(label='.'))
if auto_discover:
self.add_cmds(*LIST_OF_COMMANDS)
def find_cmd(self, path: str) -> Optional[Node]:
""" Find a cmd in the tree """
current_node = 0
for tag in path.split(self.path_separator):
if current_node is None:
# todo allow empty nodes ?
return None
current_node = next((x.identifier for x in self.__cmd_tree.children(current_node) if x.tag == tag), None)
return self.__cmd_tree.get_node(current_node)
def add_cmd(self, cmd_class: Type[CMD]):
""" Add a CMD to the current tree """
cmd = cmd_class(self.root_cmd)
father_node = self.find_cmd(cmd.NAMESPACE)
if father_node is None:
father_node = self.__cmd_tree.get_node(self.__cmd_tree.root)
self.__cmd_tree.create_node(
tag=f"{cmd.name}",
identifier=cmd.id,
data=cmd,
parent=father_node.identifier
)
def add_cmds(self, *cmd_items):
for cmd in cmd_items:
self.add_cmd(cmd)
def has_children(self, _id):
return self.__cmd_tree.children(_id)
def show(self, root=None) -> str:
if root:
return self.__cmd_tree.subtree(root).show(data_property="label", stdout=False)
else:
return self.__cmd_tree.show(data_property="label", stdout=False)
def build_epilogs(self):
""" Iterate over all nodes and append epilog to help message"""
for node in self.__cmd_tree.all_nodes():
if node.identifier == 0:
continue
if not self.has_children(node.identifier):
continue
epilog = "---\n" \
"list of available sub-commands : \n\n" \
f"{self.show(root=node.identifier)}"
node.data.add_epilog(epilog)
def get_all_paths(self):
paths_as_list = []
paths_as_str = []
tree = self.__cmd_tree
for leaf in tree.all_nodes():
paths_as_list.append([tree.get_node(nid).tag for nid in tree.rsearch(leaf.identifier)][::-1])
for item in paths_as_list:
if '.' in item:
item.remove('.')
paths_as_str.append(f"{self.path_separator}".join(item))
if '' in paths_as_str:
paths_as_str.remove('')
paths_as_str.extend(self.help_commands)
paths_as_str.append(self.autocomplete)
paths_as_str.append(self.autocomplete_fn)
return paths_as_str
class CLI:
""" The Command Line Interface Builder Class """
path_separator = NAMESPACE_SEP
def __init__(self, cmd_tree: CommandTree, *, description: str = "", usage: str = ""):
"""
:param cmd_tree:
:param description:
:param usage:
"""
self.cmd_tree = cmd_tree
# Build the tree epilogs
self.cmd_tree.build_epilogs()
# Help epilog
epilog = "---\n" \
"list of available commands : \n\n" \
f"{cmd_tree.show()}"
self.parser = argparse.ArgumentParser(
description=description,
usage=usage,
epilog=epilog,
formatter_class=argparse.RawTextHelpFormatter
)
self.parser.add_argument('command', help='Subcommand to run')
def run(self):
""" Run the Command Line Interface """
args = self.parser.parse_args(sys.argv[1:2])
# check if help is asked
if self.cmd_tree.is_help_cmd(args.command):
self.parser.print_help()
sys.exit(0)
# check if requesting cmd list for autocomplete
if self.cmd_tree.is_autocomplete(args.command):
print(" ".join(self.cmd_tree.get_all_paths()))
sys.exit(0)
# check if requesting auto complete bash function
if self.cmd_tree.is_auto_fn(args.command):
# print(BASH_AUTOCOMPLETE_FN)
sys.exit(0)
cmd_node = self.cmd_tree.find_cmd(args.command)
if cmd_node is None or cmd_node.identifier == 0:
print(f'Unrecognized command {args.command}\n', file=sys.stderr)
self.parser.print_help()
sys.exit(1)
cmd = cmd_node.data
if not isinstance(cmd, CMD):
print(f'Unrecognized command {args.command}\n', file=sys.stderr)
self.parser.print_help()
sys.exit(2)
# call sub-command
cmd.run_cmd(argv=sys.argv[2:]) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/cli_lib.py | cli_lib.py |
import argparse
import sys
from rich.prompt import Confirm
from rich.table import Table
from .cli_lib import CMD
from ..out import console as std_console, error_console, warning_console
from zerospeech.upload import CurrentUser
class User(CMD):
""" User management command """
COMMAND = "user"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
std_console.print("Feature Not Yet Available !!!", style="red bold")
def _run(self, argv: argparse.Namespace):
current = CurrentUser.load()
if current is None:
error_console.print("No current user session, please use login to create a session !")
sys.exit(1)
table = Table(show_header=False)
table.add_column("****")
table.add_column("****")
table.add_row("Username", current.username)
table.add_row("Email", current.email)
table.add_row("Affiliation", current.affiliation)
table.add_row("Session Expiry", current.token.expiry.strftime("%m/%d/%Y, %H:%M:%S"))
std_console.print(table)
class UserLogin(CMD):
""" User loging to the zerospeech.com platform """
COMMAND = "login"
NAMESPACE = "user"
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
std_console.print("Feature Not Yet Available !!!", style="red bold")
def _run(self, argv: argparse.Namespace):
if CurrentUser.session_file.is_file():
CurrentUser.clear()
try:
usr = CurrentUser.login()
std_console.print(f"User {usr.username} was logged in successfully !", style="bold green")
except ValueError:
error_console.print("User authentication failed, bad credentials")
class UserClear(CMD):
""" Clear the saved login """
COMMAND = "clear"
NAMESPACE = "user"
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
if Confirm("Are you sure you want to clear the current session ?", console=warning_console):
CurrentUser.clear() | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/user.py | user.py |
import argparse
import platform
import sys
import urllib.parse
import webbrowser
from importlib.metadata import version, PackageNotFoundError
from typing import Optional
import distro
from rich.table import Table
from .cli_lib import CMD
from zerospeech.settings import get_settings
from zerospeech.out import error_console, console as std_console
st = get_settings()
class ResetIndex(CMD):
""" Reset remote index """
COMMAND = "reset-index"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
st.repository_index.unlink(missing_ok=True)
std_console.print("Index has been reset successfully !!", style="bold green")
class Version(CMD):
""" Print the current version used """
COMMAND = "version"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
@staticmethod
def get_package_version(pkg_name: str) -> Optional[str]:
try:
return version(pkg_name)
except PackageNotFoundError:
# package is not installed
return None
def run(self, argv: argparse.Namespace):
zr_bench = self.get_package_version("zerospeech-benchmarks")
# benchmark versions
abx = self.get_package_version("zerospeech-libriabx")
abx2 = self.get_package_version("zerospeech-libriabx2")
tde = self.get_package_version("zerospeech-tde")
torch = version('torch')
numpy = version('numpy')
torchaudio = version('torchaudio')
table = Table(show_header=False, header_style="bold magenta")
table.add_column("Package")
table.add_column("Version")
if zr_bench is None:
error_console.print("ERROR: module zerospeech-benchmark not installed locally")
sys.exit(1)
table.add_row("zerospeech-benchmarks", zr_bench, end_section=True)
if abx:
table.add_row("zerospeech-libriabx", abx)
if abx2:
table.add_row("zerospeech-libriabx2", abx2)
if tde:
table.add_row("zerospeech-tde", tde)
if numpy:
table.add_row("numpy", numpy)
if torch:
table.add_row("torch", torch)
if torchaudio:
table.add_row("torchaudio", torchaudio)
table.add_row(end_section=True)
table.add_row("python", sys.version, end_section=True)
_ = platform.platform(aliased=True) # os_alias
if 'linux' in platform.system().lower():
table.add_row("Operating System", f"{distro.name(pretty=True)}\n{platform.platform(aliased=True)}")
else:
table.add_row("Operating System", f"{platform.platform(aliased=True)}")
std_console.print(table)
class HelpCMD(CMD):
""" """
COMMAND = "support"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
""" """
pass
class AskHelpCMD(CMD):
""" Send an email to ask for help """
COMMAND = "email"
NAMESPACE = "support"
def init_parser(self, parser: argparse.ArgumentParser):
pass
def run(self, argv: argparse.Namespace):
os_info = urllib.parse.quote(f"{platform.system()}-{platform.release()}-{platform.version()}")
py_info = urllib.parse.quote(f"{sys.version}".replace('\n', ''))
# todo add installed packages from pip / conda
tech_info = f'%0D%0A%0D%0A%5BINFO%5D%3A%0D%0AOS%3A%20{os_info}%0D%0APYTHON%3A%20{py_info}'
url = f'mailto:{st.admin_email}?subject=%5BZR-BENCHMARK%5D%5BSUPPORT%5D&body={tech_info}'
webbrowser.open(url, new=1)
class DocumentationCMD(CMD):
""" Opens our documentation """
COMMAND = "docs"
NAMESPACE = "support"
def init_parser(self, parser: argparse.ArgumentParser):
""" """
pass
def run(self, argv: argparse.Namespace):
webbrowser.open('https://zerospeech.com/', new=1) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/generic.py | generic.py |
import argparse
import sys
from pathlib import Path
from rich.padding import Padding
from rich.table import Table
from zerospeech.generics import checkpoints
from zerospeech.misc import md5sum, extract
from zerospeech.networkio import check_update_repo_index, update_repo_index
from zerospeech.out import console, error_console, void_console, warning_console
from zerospeech.settings import get_settings
from .cli_lib import CMD
st = get_settings()
class CheckpointsCMD(CMD):
"""Manipulate Checkpoints """
COMMAND = "checkpoints"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("--local", action="store_true", help="List local checkpoint only")
def run(self, argv: argparse.Namespace):
checkpoints_dir = checkpoints.CheckpointDir.load()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Name")
table.add_column("Origin")
table.add_column("Size")
table.add_column("Installed")
if argv.local:
dt_list = checkpoints_dir.items
else:
dt_list = checkpoints_dir.available_items
for d in dt_list:
dts = checkpoints_dir.get(d)
if dts.origin.type == 'internal':
host = st.repo_origin.host
else:
host = "external"
table.add_row(
dts.name, host, dts.origin.size_label, f"{dts.installed}"
)
console.print(
Padding(f"==> RootDir: {checkpoints_dir.root_dir}", (1, 0, 1, 0), style="bold grey70", expand=False))
console.print(table)
class PullCheckpointCMD(CMD):
""" Download a checkpoint item """
COMMAND = "pull"
NAMESPACE = "checkpoints"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
parser.add_argument('-u', '--skip-verification', action='store_true', help="Skip archive verification")
parser.add_argument('-q', '--quiet', action='store_true', help='Suppress download info output')
def run(self, argv: argparse.Namespace):
# update repo index if necessary
if check_update_repo_index():
update_repo_index()
chkpt_dir = checkpoints.CheckpointDir.load()
chkpt = chkpt_dir.get(argv.name, cls=checkpoints.CheckPointItem)
chkpt.pull(quiet=argv.quiet, show_progress=True, verify=not argv.skip_verification)
class ImportCheckpointCMD(CMD):
""" Import checkpoints from a zip archive """
COMMAND = "import"
NAMESPACE = "checkpoints"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("zip_file")
parser.add_argument('-u', '--skip-verification', action='store_true',
help='Do not check hash in repo index.')
parser.add_argument('-q', '--quiet', action='store_true',
help='Suppress download info output')
def run(self, argv: argparse.Namespace):
# update repo index if necessary
if check_update_repo_index():
update_repo_index()
chkpt_dir = checkpoints.CheckpointDir.load()
archive = Path(argv.zip_file)
std_out = console
if argv.quiet:
std_out = void_console
if not archive.is_file() and archive.suffix != '.zip':
error_console.print(f'Given archive ({archive}) does not exist or is not a valid zip archive !!!')
sys.exit(1)
if not argv.skip_verification:
with std_out.status(f'Hashing {archive.name}'):
md5hash = md5sum(archive)
item = chkpt_dir.find_by_hash(md5hash)
if item is None:
error_console.print(f'Archive {archive.name} does not correspond to a registered checkpoint archive')
sys.exit(1)
name = item.name
std_out.print(f"[green]Checkpoint {name} detected")
else:
name = archive.stem
warning_console.print(f"Importing {name} without checking, could be naming/file mismatch")
with std_out.status(f"Unzipping {name}..."):
extract(archive, chkpt_dir.root_dir / name)
std_out.print(f"[green]Checkpoint {name} installed successfully !!")
class RemoveCheckpointCMD(CMD):
""" Remove a checkpoint item """
COMMAND = "rm"
NAMESPACE = "checkpoints"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
def run(self, argv: argparse.Namespace):
checkpoints_dir = checkpoints.CheckpointDir.load()
cpt = checkpoints_dir.get(argv.name)
if cpt:
cpt.uninstall()
console.log("[green] Checkpoint uninstalled successfully !")
else:
error_console.log(f"Failed to find checkpoint named :{argv.name}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/checkpoints.py | checkpoints.py |
import argparse
import sys
from pathlib import Path
from zerospeech.benchmarks import BenchmarkList
from zerospeech.out import error_console, warning_console, console as std_console
from zerospeech.submissions import MetaFile, show_errors
from zerospeech.tasks import BenchmarkParameters
from .cli_lib import CMD
class Submission(CMD):
""" Submission manager subcommand """
COMMAND = "submission"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
parser.print_help()
def run(self, argv: argparse.Namespace):
""" """
pass
class SubmissionInit(CMD):
""" Initialise a directory for a specific benchmark """
COMMAND = "init"
NAMESPACE = "submission"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("name")
parser.add_argument("location")
def run(self, argv: argparse.Namespace):
try:
benchmark_type = BenchmarkList(argv.name)
except ValueError:
error_console.log(f"Specified benchmark ({argv.name}) does not exist !!!!")
warning_console.log(f"Use one of the following : {','.join(b for b in BenchmarkList)}")
sys.exit(1)
# Load benchmark
benchmark = benchmark_type.benchmark()
location = Path(argv.location)
if location.is_dir():
error_console.log("Location specified already exists !!!")
sys.exit(2)
with std_console.status("Initialising submission dir"):
benchmark.init_submission_dir(location)
std_console.print(f"Submission directory created @ {location}", style="green bold")
class BenchmarkParamsCMD(CMD):
""" Create template params.yaml """
COMMAND = "params"
NAMESPACE = "submission"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("submission_dir")
parser.add_argument('-r', '--reset', action="store_true", help="Reset params.yaml to default values")
def run(self, argv: argparse.Namespace):
location = Path(argv.submission_dir)
if not location.is_dir():
error_console("Location specified does not exist !!!")
sys.exit(2)
benchmark_name = None
try:
benchmark_name = MetaFile.benchmark_from_submission(location)
if benchmark_name is None:
raise TypeError("benchmark not found")
benchmark_type = BenchmarkList(benchmark_name)
except TypeError:
error_console.log(f"Specified submission does not have a valid {MetaFile.file_stem}"
f"\nCannot find benchmark type")
sys.exit(1)
except ValueError:
error_console.log(f"Specified benchmark ({benchmark_name}) does not exist !!!!")
warning_console.log(f"Use one of the following : {','.join(b for b in BenchmarkList)}")
sys.exit(1)
# Load benchmark
benchmark = benchmark_type.benchmark(quiet=argv.quiet)
if argv.reset:
# remove old params file if exists
(location / BenchmarkParameters.file_stem).unlink(missing_ok=True)
submission = benchmark.load_submission(location)
if argv.reset:
self.console.log(f"Params file created/reset at @ {submission.params_file}")
self.console.print(submission.params)
class SubmissionVerify(CMD):
""" Verify the validity of a submission """
COMMAND = "verify"
NAMESPACE = "submission"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("location")
def run(self, argv: argparse.Namespace):
location = Path(argv.location)
if not location.is_dir():
error_console("Location specified does not exist !!!")
sys.exit(2)
benchmark_name = None
try:
benchmark_name = MetaFile.benchmark_from_submission(location)
if benchmark_name is None:
raise TypeError("benchmark not found")
benchmark_type = BenchmarkList(benchmark_name)
except TypeError:
error_console.log(f"Specified submission does not have a valid {MetaFile.file_stem}"
f"\nCannot find benchmark type")
sys.exit(1)
except ValueError:
error_console.log(f"Specified benchmark ({benchmark_name}) does not exist !!!!")
warning_console.log(f"Use one of the following : {','.join(b for b in BenchmarkList)}")
sys.exit(1)
# Load benchmark
benchmark = benchmark_type.benchmark(quiet=argv.quiet)
submission = benchmark.load_submission(location)
with std_console.status(f"Validating submission @ {location}"):
_ = submission.valid
if submission.valid:
std_console.print(f"Submission @ {location} is a valid submission for {bench} :heavy_check_mark:",
style='bold green')
else:
show_errors(submission.validation_output) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/submission.py | submission.py |
import argparse
import sys
from pathlib import Path
from rich.padding import Padding
from rich.table import Table
from zerospeech.datasets import DatasetsDir, Dataset
from zerospeech.misc import md5sum, extract
from zerospeech.networkio import check_update_repo_index, update_repo_index
from zerospeech.out import console, error_console, warning_console, void_console
from zerospeech.settings import get_settings
from .cli_lib import CMD
st = get_settings()
class DatasetCMD(CMD):
""" Manipulate Datasets """
COMMAND = "datasets"
NAMESPACE = ""
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument("--local", action="store_true", help="List local datasets only")
def run(self, argv: argparse.Namespace):
datasets_dir = DatasetsDir.load()
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Name")
table.add_column("Origin")
table.add_column("Size")
table.add_column("Installed")
if argv.local:
dt_list = datasets_dir.items
else:
dt_list = datasets_dir.available_items
for d in dt_list:
dts = datasets_dir.get(d)
if dts.origin.type == 'internal':
host = st.repo_origin.host
else:
host = "external"
table.add_row(
dts.origin.name, host, dts.origin.size_label, f"{dts.installed}"
)
console.print(Padding(f"==> RootDir: {datasets_dir.root_dir}", (1, 0, 1, 0), style="bold grey70", expand=False))
console.print(table)
class PullDatasetCMD(CMD):
""" Download a dataset """
COMMAND = "pull"
NAMESPACE = "datasets"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
parser.add_argument('-u', '--skip-verification', action='store_true', help="Skip archive verification")
parser.add_argument('-q', '--quiet', action='store_true', help='Suppress download info output')
def run(self, argv: argparse.Namespace):
# update repo index if necessary
if check_update_repo_index():
update_repo_index()
datasets_dir = DatasetsDir.load()
dataset = datasets_dir.get(argv.name, cls=Dataset)
dataset.pull(quiet=argv.quiet, show_progress=True, verify=not argv.skip_verification)
class ImportDatasetCMD(CMD):
""" Import a dataset from a zip file """
COMMAND = "import"
NAMESPACE = "datasets"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('zip_file')
parser.add_argument('-u', '--skip-verification', action='store_true',
help='Do not check hash in repo index.')
parser.add_argument('-q', '--quiet', action='store_true',
help='Suppress download info output')
def run(self, argv: argparse.Namespace):
datasets_dir = DatasetsDir.load()
archive = Path(argv.zip_file)
std_out = console
if argv.quiet:
std_out = void_console
if not archive.is_file() and archive.suffix != '.zip':
error_console.print(f'Given archive ({archive}) does not exist or is not a valid zip archive !!!')
sys.exit(1)
if not argv.skip_verification:
with std_out.status(f'Hashing {archive.name}'):
md5hash = md5sum(archive)
item = datasets_dir.find_by_hash(md5hash)
if item is None:
error_console.print(f'Archive {archive.name} does not correspond to a registered dataset')
sys.exit(1)
name = item.name
std_out.print(f"[green]Dataset {name} detected")
else:
name = archive.stem
warning_console.print(f"Importing {name} without checking, could be naming/file mismatch")
# unzip dataset
with std_out.status(f"Unzipping {name}..."):
extract(archive, datasets_dir.root_dir / name)
std_out.print(f"[green]Dataset {name} installed successfully !!")
class RemoveDatasetCMD(CMD):
""" Remove a dataset item """
COMMAND = "rm"
NAMESPACE = "datasets"
def init_parser(self, parser: argparse.ArgumentParser):
parser.add_argument('name')
def run(self, argv: argparse.Namespace):
dataset_dir = DatasetsDir.load()
dts = dataset_dir.get(argv.name)
if dts:
dts.uninstall()
console.log("[green] Dataset uninstalled successfully !")
else:
error_console.log(f"Failed to find dataset named :{argv.name}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/cmd/datasets.py | datasets.py |
import abc
import functools
import json
import shutil
from datetime import datetime
from pathlib import Path
from typing import List, Optional, Type, ClassVar, Literal, Union, Tuple, Dict, Any
from pydantic import BaseModel, AnyHttpUrl, validator, parse_obj_as, DirectoryPath, ByteSize, root_validator
from ..settings import get_settings
st = get_settings()
RepositoryItemType = Literal[
'datasets', 'samples', 'checkpoints', 'origin_item', 'downloadable_item', 'importable_item']
ItemType = Literal['internal', 'external']
InstallAction = Literal['download', 'symlink', 'download_extract']
class RepositoryItem(BaseModel):
""" An item represents a dataset inside the repository that can be pulled locally """
name: str
type: ItemType
zip_url: Optional[AnyHttpUrl]
zip_parts: Optional[List[AnyHttpUrl]]
install_config: Optional[AnyHttpUrl]
md5sum: str
total_size: ByteSize
details_url: Optional[AnyHttpUrl]
description: Optional[str]
@property
def origin_host(self) -> str:
return self.zip_url.host
@property
def size_label(self) -> str:
return self.total_size.human_readable(decimal=True)
@validator('zip_url', pre=True)
def cast_url1(cls, v):
""" Cast strings to AnyHttpUrl """
if isinstance(v, str):
return parse_obj_as(AnyHttpUrl, v)
return v
@validator('zip_parts', pre=True)
def cast_url2(cls, v):
""" Cast strings to AnyHttpUrl """
if isinstance(v, str):
return parse_obj_as(AnyHttpUrl, v)
return v
@root_validator(pre=True)
def validate_type(cls, values):
valid_types = ('internal', 'external')
current_type = values.get('type', 'internal')
values['type'] = current_type
assert current_type in valid_types, f'Type should be one of the following {valid_types}'
if current_type == 'internal':
assert values.get('zip_url') is not None \
or values.get('zip_parts') is not None, \
"Internal Items must have either a zip_url or a zip_parts value."
elif values.get('type') == 'external':
assert values.get('install_config') is not None, "External items must have an install_config field"
return values
class RepositoryIndex(BaseModel):
""" Item Indexing all datasets available online various repositories."""
last_modified: datetime
datasets: List[RepositoryItem]
checkpoints: List[RepositoryItem]
samples: List[RepositoryItem]
@classmethod
@functools.lru_cache
def load(cls):
""" Load index from disk """
with st.repository_index.open() as fp:
data = json.load(fp)
return cls(**data)
class InstallRule(BaseModel):
action: InstallAction
source: Optional[AnyHttpUrl]
target: Optional[str]
source_target: Optional[List[Tuple[str, str]]]
source_size: Optional[ByteSize]
@root_validator(pre=True)
def validate_actions(cls, values):
actions = ('download', 'symlink', 'download_extract')
if values['action'] == 'download':
assert values.get('source') is not None, "Download action requires a source"
assert values.get('target') is not None, "Download action requires a target"
elif values['action'] == 'symlink':
assert values.get('source_target') is not None, "Symlink action requires a source_target"
elif values['action'] == 'download_extract':
assert values.get('source') is not None, "Download_Extract action requires a source"
assert values.get('target') is not None, "Download_Extract action requires a target"
assert values.get('source_size') is not None, "Download_Extract action requires a source_size"
else:
assert values['action'] in actions, \
f"Action needs to be one of the following {actions}"
class InstallConfig(BaseModel):
rules: Dict[str, InstallRule]
index_obj: Dict[str, Any]
class OriginItem(BaseModel, abc.ABC):
""" An item that has an external origin derived from the repository """
key_name: ClassVar[RepositoryItemType] = "origin_item"
location: Path
origin: RepositoryItem
@property
def installed(self) -> bool:
""" Check if item is installed locally"""
return self.location.is_dir()
def uninstall(self):
""" Uninstall item from local storage """
shutil.rmtree(self.location)
@property
def name(self) -> str:
return self.origin.name
class DownloadableItem(OriginItem, abc.ABC):
""" Abstract class to define an item that can be downloaded from the repository """
@abc.abstractmethod
def pull(self, *, verify: bool = True, quiet: bool = False, show_progress: bool = False):
""" Pull item from origin """
pass
class ImportableItem(OriginItem, abc.ABC):
"""Abstract class to define item that can be imported from a local resource """
@abc.abstractmethod
def import_zip(self, *, archive: Path):
""" Import dataset from an archive """
pass
class RepoItemDir(BaseModel, abc.ABC):
""" Abstract class defining a directory manager for repository items of a specific type """
root_dir: DirectoryPath
item_type: ClassVar[Union[Type[DownloadableItem], Type[ImportableItem]]]
@classmethod
@abc.abstractmethod
def load(cls):
pass
@property
def items(self) -> List[str]:
""" Returns a list of installed items """
return [d.name for d in self.root_dir.iterdir() if d.is_dir()]
@property
def available_items(self) -> List[str]:
index = RepositoryIndex.load()
item_list: List[RepositoryItem] = getattr(index, self.item_type.key_name, [])
return [d.name for d in item_list]
def find_by_hash(self, hash_code: str) -> Optional[RepositoryItem]:
index = RepositoryIndex.load()
item_list: List[RepositoryItem] = getattr(index, self.item_type.key_name, [])
for d in item_list:
if d.md5sum == hash_code:
return d
return None
def find_by_name(self, name: str) -> Optional[RepositoryItem]:
"""Find all relevant items with the same type in repository """
index = RepositoryIndex.load()
item_list: List[RepositoryItem] = getattr(index, self.item_type.key_name, [])
for d in item_list:
if d.name == name:
return d
return None
def get(self, name, cls: Union[Type[DownloadableItem], Type[ImportableItem]] = None):
loc = self.root_dir / name
repo = self.find_by_name(name)
if repo is None:
return None
if cls is None:
return self.item_type(location=loc, origin=repo)
return cls(location=loc, origin=repo) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/generics/repository.py | repository.py |
from pathlib import Path
from typing import Optional, Dict, Generic, Any, TypeVar, Tuple
from pydantic import BaseModel, validator, Field
from pydantic.generics import GenericModel
from .data_items import (
Item, ItemType, FileListItem, FileItem
)
T = TypeVar("T")
class Namespace(GenericModel, Generic[T]):
""" Simple object for storing attributes. """
store: Dict[str, T] = Field(default_factory=dict)
@property
def names(self) -> Tuple[str, ...]:
return tuple(self.store.keys())
@property
def as_dict(self) -> Dict[str, T]:
""" Get Store as dict """
return self.store
def get(self, name: str, default: Any = None):
""" Access items by name """
return self.store.get(name, default)
def __getattr__(self, name) -> Optional[T]:
""" Reimplementation of getattr """
a: T = self.store.get(name, None)
return a
def __iter__(self):
""" Allow to iterate over store """
return iter(self.store.items())
class Subset(BaseModel):
""" A subset of a dataset containing various items."""
items: Namespace[Item]
@property
def names(self) -> Tuple[str, ...]:
return self.items.names
@validator("items", pre=True)
def items_parse(cls, values):
""" Allow items to be cast to the correct subclass """
casted_items = dict()
for k, v, in values.items():
item_type = ItemType(v.get('item_type', "base_item"))
if item_type == ItemType.filelist_item:
casted_items[k] = FileListItem.parse_obj(v)
elif item_type == ItemType.file_item:
casted_items[k] = FileItem.parse_obj(v)
else:
v["item_type"] = item_type
casted_items[k] = Item(**v)
return Namespace[Item](store=casted_items)
def make_relative(self, relative_to: Path):
""" Convert all the items to relative paths """
for _, item in self.items:
item.relative_to(relative_to)
def make_absolute(self, root_dir: Path):
""" Convert all items to absolute paths """
for _, item in self.items:
item.absolute_to(root_dir) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/generics/named_storage.py | named_storage.py |
from enum import Enum
from pathlib import Path
from typing import List, Callable, Iterator
from pydantic import BaseModel, validator
class FileTypes(str, Enum):
txt = "txt"
npy = "npy"
csv = "csv"
wav = "wav"
flac = "flac"
item = "item" # abx task file
tsv = "tsv"
json = "json"
yaml = "yaml"
phn = "phn" # phone alignment file
wrd = "wrd" # words alignment file
vad = "vad.csv" # vad segmentation file in csv format
@property
def ext(self) -> str:
return f".{self.value}"
@classmethod
def dataframe_types(cls):
return {
cls.csv, cls.txt, cls.tsv, cls.item, cls.vad, cls.wrd, cls.phn
}
@classmethod
def audio_types(cls):
return {
cls.wav, cls.flac
}
@classmethod
def numpy_types(cls):
return {
cls.npy, cls.txt
}
class ItemType(str, Enum):
base_item = "base_item"
filelist_item = "filelist_item"
file_item = "file_item"
class Item(BaseModel):
""" The Atom of the dataset an item can be a file list, a file or a datastructure """
item_type: ItemType = ItemType.base_item
file_type: FileTypes
relative_path: bool
@validator('file_type', pre=True)
def fix_file_extension(cls, v):
if isinstance(v, str):
return v.lstrip('.')
return v
def relative_to(self, path: Path):
""" Convert internal path to relative paths """
self.relative_path = True
def absolute_to(self, path: Path):
self.relative_path = False
class Config:
extra = "allow"
class FileListItem(Item):
item_type: ItemType = ItemType.filelist_item
files_list: List[Path]
@classmethod
def from_dir(cls, path: Path, f_type: FileTypes):
""" Build a FileListItem from a directory"""
temp = path
rgexp = f"*{f_type.ext}"
thing = temp.rglob(rgexp)
file_list = list(thing)
return cls(
file_type=f_type,
files_list=file_list,
relative_path=False
)
def __iter__(self) -> Iterator[Path]:
return iter(self.files_list)
def relative_to(self, path: Path):
""" Convert all paths to relative if they are absolute """
if not self.relative_path:
for i in range(len(self.files_list)):
self.files_list[i] = self.files_list[i].relative_to(path)
# call method in super class
super(FileListItem, self).relative_to(path)
def absolute_to(self, path: Path):
""" Convert all paths to absolute if they are relative """
if self.relative_path:
for i in range(len(self.files_list)):
self.files_list[i] = path / self.files_list[i]
# call method in super class
super(FileListItem, self).absolute_to(path)
class Config:
extra = "ignore"
FileValidator = Callable[["FileItem"], bool]
class FileItem(Item):
item_type: ItemType = ItemType.file_item
file: Path
@classmethod
def from_file(cls, path: Path, relative: bool = False):
""" Build a FileItem from a path """
suffix = path.suffix.replace('.', '')
return cls(
file=path,
file_type=FileTypes(suffix),
relative_path=relative
)
def valid(self, validate: FileValidator) -> bool:
# todo rethink a bit this validation pattern
return self.file.resolve().is_file() and validate(self)
def relative_to(self, path: Path):
""" Convert all paths to relative if they are absolute """
if not self.relative_path:
self.file = self.file.relative_to(path)
# call method in super class
super(FileItem, self).relative_to(path)
def absolute_to(self, path: Path):
""" Convert all paths to absolute if they are relative """
if self.relative_path:
self.file = path / self.file
# call method in super class
super(FileItem, self).absolute_to(path)
class Config:
extra = "ignore" | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/generics/data_items.py | data_items.py |
import abc
import json
import os
import sys
import warnings
from pathlib import Path
from typing import Tuple, Set, TYPE_CHECKING, NamedTuple
import joblib
try:
from tde.measures.boundary import Boundary
from tde.measures.coverage import Coverage
from tde.measures.grouping import Grouping
from tde.measures.ned import Ned
from tde.measures.token_type import TokenType
from tde.readers.disc_reader import Disc
from tde.readers.gold_reader import Gold
except ImportError:
Boundary, Coverage, Grouping, Ned = ..., ..., ..., ...
TokenType, Disc, Gold = ..., ..., ...
warnings.warn('tde module was not installed')
from zerospeech.misc import exit_after
from zerospeech.generics import FileItem
from zerospeech.tasks import Task
if TYPE_CHECKING:
from zerospeech.datasets import Dataset
from zerospeech.submissions import Submission
class TDEItems(NamedTuple):
wrd_path: Path
phn_path: Path
input_classes: Path
class TDETask(Task, abc.ABC):
""" TDE Task """
_name = "tde-task"
tasks: Tuple
metrics: Set = {'grouping', 'matching', 'boundary', 'token_type', 'nlp'}
njobs: int = 1
result_filename: str = "scores.json"
grouping_max_time: int = 7200
@staticmethod
def read_discovered(item: Path, gold: Gold):
""" Load discovered Intervals """
# Disc class prints a bunch of nonsense, so we force it to be quiet
sys.stdout = open(os.devnull, 'w')
try:
return Disc(str(item), gold)
finally:
sys.stdout = sys.__stdout__
def gather_metrics(self, gold: Gold, discovered: Disc, lang: str):
scores = dict(
matching=dict(), boundary=dict(), token=dict(), type=dict(), nlp=dict(), grouping=dict()
)
# Boundary
if 'boundary' in self.metrics:
with self.console.status(f"Computing {lang} boundary"):
boundary = Boundary(gold, discovered)
boundary.compute_boundary()
scores['boundary'].update(dict(
precision=boundary.precision,
recall=boundary.recall,
fscore=boundary.fscore
))
self.console.print(f"Boundary computed for {lang} :heavy_check_mark:", style="bold green")
# Token & Type
if 'token_type' in self.metrics:
with self.console.status(f"Computing {lang} token & type"):
token_type = TokenType(gold, discovered)
token_type.compute_token_type()
scores['token'] = dict()
scores['type'] = dict()
scores['token']['precision'], scores['type']['precision'] = token_type.precision
scores['token']['recall'], scores['type']['recall'] = token_type.recall
scores['token']['fscore'], scores['type']['fscore'] = token_type.fscore
scores['nlp']['nwords'] = len(token_type.type_seen),
self.console.print(f"Token & Type computed for {lang} :heavy_check_mark:", style="bold green")
# NLP
if 'nlp' in self.metrics:
with self.console.status(f"Computing {lang} NLP"):
coverage = Coverage(gold, discovered)
coverage.compute_coverage()
ned = Ned(discovered)
ned.compute_ned()
scores["nlp"].update(dict(
ned=ned.ned,
coverage=coverage.coverage,
npairs=ned.n_pairs
))
self.console.print(f"NLP computed for {lang} :heavy_check_mark:", style="bold green")
# Grouping
if 'grouping' in self.metrics:
@exit_after(self.grouping_max_time)
def compute_grouping():
""" Compute grouping within allocated time """
with self.console.status(f"Computing {lang} Grouping"):
grouping = Grouping(discovered)
grouping.compute_grouping()
return dict(
precision=grouping.precision,
recall=grouping.recall,
fscore=grouping.fscore
)
try:
grouping_score = compute_grouping()
scores['grouping'].update(grouping_score)
self.console.print(f"Grouping computed for {lang} :heavy_check_mark:", style="bold green")
except KeyboardInterrupt:
scores['grouping'].update(dict(
precision=None,
recall=None,
fscore=None
))
self.console.print(f"Grouping computing for {lang} was aborted due to timeout !!", style="bold red")
def score_or_none(data):
if len(data):
return data
return None
return {m: score_or_none(score) for m, score in scores.items()}
@staticmethod
def load_gold(wrd: Path, phn: Path) -> Gold:
""" Load gold object for current language set """
# load gold files
return Gold(
wrd_path=str(wrd),
phn_path=str(phn)
)
def _eval_lang(self, lang: str, items: TDEItems):
""" Evaluate tde for specific language """
self.console.print(f"Loading gold for {lang}...")
gold = self.load_gold(wrd=items.wrd_path, phn=items.phn_path)
# load discovered intervals
self.console.print(f"Loading class discovery for {lang}...")
discovered = self.read_discovered(
items.input_classes, gold
)
self.console.print(f"Gathering metrics for {lang} ...")
return lang, self.gather_metrics(gold, discovered, lang)
@abc.abstractmethod
def gather_items(self, lang: str, submission: "Submission", dataset: "Dataset") -> TDEItems:
pass
def eval(self, submission: "Submission", dataset: "Dataset"):
""" Evaluate the submission """
print(f"Running with {self.njobs} cores!!")
# Run evaluation with multiprocess if specified
eval_items = {
lang: self.gather_items(lang=lang, submission=submission, dataset=dataset)
for lang in self.tasks
}
res = joblib.Parallel(n_jobs=self.njobs)(
joblib.delayed(self._eval_lang)(lang, items) for lang, items in eval_items.items()
)
scores = dict(res)
self.console.print(f":pencil: writing scores {self.result_filename}", style="underline yellow4")
with (submission.score_dir / self.result_filename).open('w') as fp:
json.dump(scores, fp) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/tde.py | tde.py |
from typing import Tuple, TYPE_CHECKING
import pandas as pd
from .params import LexicalParams
from zerospeech.data_loaders import load_dataframe
from zerospeech.generics import FileItem
from zerospeech.tasks import Task
if TYPE_CHECKING:
from zerospeech.submissions.sLM21 import SLM21Submission
from zerospeech.datasets import SLM21Dataset
default_params = LexicalParams()
class LexicalTask(Task):
_name = "lexical"
by_pair: bool = default_params.by_pair
by_length: bool = default_params.by_length
by_frequency: bool = default_params.by_frequency
result_filenames = default_params.result_filenames
sets: Tuple = ('dev', 'test')
@staticmethod
def load_and_format(lexical_item: FileItem, gold_item: FileItem):
""" Loads & formats submission data and gold data """
gold_values = load_dataframe(gold_item, header=0, index_col='filename').astype(
{'frequency': pd.Int64Dtype()})
lexical_values = load_dataframe(lexical_item, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# merge the gold and score using filenames, then remove the columns
# 'phones' and 'filename' as we don't use them for evaluation
data = pd.merge(gold_values, lexical_values, on='filename', how='inner')
data.reset_index(inplace=True)
# if all non-words have their textual version set to NaN, we take their phonemic version instead.
if data[data.correct == 0]['word'].isnull().sum() == len(data[data.correct == 0]):
data['word'] = data['phones']
data.drop(columns=['phones', 'filename'], inplace=True)
# going from a word per line to a pair (word, non word) per line
words = data.loc[data['correct'] == 1].reset_index().rename(lambda x: 'w_' + x, axis=1)
non_words = data.loc[data['correct'] == 0].reset_index().rename(lambda x: 'nw_' + x, axis=1)
data = pd.merge(words, non_words, left_on=['w_voice', 'w_id'], right_on=['nw_voice', 'nw_id'])
data.drop(
['w_index', 'nw_index', 'nw_voice', 'nw_frequency',
'w_correct', 'nw_correct', 'nw_id', 'nw_length'],
axis=1, inplace=True)
data.rename(
{'w_id': 'id', 'w_voice': 'voice', 'w_frequency': 'frequency',
'w_word': 'word', 'nw_word': 'non word', 'w_length': 'length',
'w_score': 'score word', 'nw_score': 'score non word'},
axis=1, inplace=True)
return data
@staticmethod
def eval_by_pair(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with the computed scores by (word, non word) pair
Parameters
----------
data : pandas.DataFrame
The result of `load_data`
Returns
-------
by_pair : pandas.DataFrame
The evaluated (word, non word) pairs, the data frame has the columns:
'word', 'non word' 'frequency', 'length' and 'score'.
"""
# compute the score for each pair in an additional 'score' column, then
# delete the 'score word' and 'score non word' columns that become useless
score = data.loc[:, ['score word', 'score non word']].to_numpy()
data['score'] = (
0.5 * (score[:, 0] == score[:, 1])
+ (score[:, 0] > score[:, 1]))
data.drop(columns=['score word', 'score non word'], inplace=True)
# finally get the mean score across voices for all pairs
score = data.groupby('id').apply(lambda x: (
x.iat[0, 3], # word
x.iat[0, 5], # non word
x.iat[0, 2], # frequency
x.iat[0, 4], # length
x['score'].mean()))
return pd.DataFrame(
score.to_list(),
columns=['word', 'non word', 'frequency', 'length', 'score'])
@staticmethod
def eval_by_frequency(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with mean scores by frequency bands
The frequency is defined as the number of occurrences of the word in the
LibriSpeech dataset. The following frequency bands are considered : oov,
1-5, 6-20, 21-100 and >100.
Parameters
----------
data: pandas.DataFrame
The output of `evaluate_by_pair`
Returns
-------
by_frequency : pandas.DataFrame
The score collapsed on frequency bands, the data frame has the
following columns: 'frequency', 'score'.
"""
bands = pd.cut(
data.frequency,
[0, 1, 5, 20, 100, float('inf')],
labels=['oov', '1-5', '6-20', '21-100', '>100'],
right=False)
return data.score.groupby(bands).agg(
n='count', score='mean', std='std').reset_index()
@staticmethod
def eval_by_length(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with mean scores by word length
Parameters
----------
data: pandas.DataFrame
The output of `evaluate_by_pair`
Returns
-------
by_length : pandas.DataFrame
The score collapsed on word length, the data frame has the
following columns: 'length', 'score'.
"""
return data.score.groupby(data.length).agg(
n='count', score='mean', std='std').reset_index()
def run_lexical_eval(self, lexical_item: FileItem, gold_item: FileItem):
data = self.load_and_format(lexical_item, gold_item)
by_pair, by_frequency, by_length = None, None, None
by_pair = self.eval_by_pair(data)
if self.by_frequency:
by_frequency = self.eval_by_frequency(by_pair)
if self.by_length:
by_length = self.eval_by_length(by_pair)
if self.by_pair:
by_pair.drop(['frequency', 'length'], axis=1, inplace=True)
else:
by_pair = None
return by_pair, by_frequency, by_length
def eval(self, submission: "SLM21Submission", dataset: "SLM21Dataset"):
""" Run the selected lexical evaluations & write results """
output_dir = submission.score_dir
self.sets = submission.sets
if 'dev' in self.sets:
sub = submission.items.lexical_dev
gold = dataset.index.subsets.lexical_dev.items.gold
with self.console.status('Running lexical_dev evaluation....', spinner="aesthetic"):
by_pair, by_frequency, by_length = self.run_lexical_eval(sub, gold)
if by_pair is not None:
filename = output_dir / f"{self.result_filenames['dev']['by_pair']}"
self.console.print(f":pencil: writing {self.result_filenames['dev']['by_pair']}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
if by_frequency is not None:
filename = output_dir / f"{self.result_filenames['dev']['by_frequency']}"
self.console.print(f":pencil: writing {self.result_filenames['dev']['by_frequency']}",
style="underline yellow4")
by_frequency.to_csv(filename, index=False, float_format='%.4f')
if by_length is not None:
filename = output_dir / f"{self.result_filenames['dev']['by_length']}"
self.console.print(f":pencil: writing {self.result_filenames['dev']['by_length']}",
style="underline yellow4")
by_length.to_csv(filename, index=False, float_format='%.4f')
if 'test' in self.sets:
sub = submission.items.lexical_test
gold = dataset.index.subsets.lexical_test.items.gold
with self.console.status('Running lexical_dev evaluation....', spinner="aesthetic"):
by_pair, by_frequency, by_length = self.run_lexical_eval(sub, gold)
if by_pair is not None:
filename = output_dir / f"{self.result_filenames['test']['by_pair']}"
self.console.print(f":pencil: writing {self.result_filenames['test']['by_pair']}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
if by_frequency is not None:
filename = output_dir / f"{self.result_filenames['test']['by_frequency']}"
self.console.print(f":pencil: writing {self.result_filenames['test']['by_frequency']}",
style="underline yellow4")
by_frequency.to_csv(filename, index=False, float_format='%.4f')
if by_length is not None:
filename = output_dir / f"{self.result_filenames['test']['by_length']}"
self.console.print(f":pencil: writing {self.result_filenames['test']['by_length']}",
style="underline yellow4")
by_length.to_csv(filename, index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/lm/lexical.py | lexical.py |
from typing import TYPE_CHECKING
import pandas as pd
from zerospeech.data_loaders import load_dataframe
from zerospeech.generics import FileItem
from zerospeech.tasks import Task
from .params import SyntacticParams
if TYPE_CHECKING:
from zerospeech.submissions.sLM21 import SLM21Submission
from ...datasets import SLM21Dataset
default_params = SyntacticParams()
class SyntacticTask(Task):
"""Allows the computation of the score by sentences pair and by syntax type."""
_name = "syntactic"
sets = ('dev', 'test')
result_filenames = default_params.result_filenames
@staticmethod
def syntactic_by_pair(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with the scores by (sentence, non sentence) pair"""
# compute the score for each pair in an additional 'score' column, then
# delete the 'score word' and 'score non word' columns that become useless
score = data.loc[:, ['score sentence', 'score non sentence']].to_numpy()
data['score'] = (
0.5 * (score[:, 0] == score[:, 1])
+ (score[:, 0] > score[:, 1]))
data.drop(columns=['score sentence', 'score non sentence'], inplace=True)
# finally get the mean score across voices for all pairs
score = data.groupby(['type', 'subtype', 'id']).apply(lambda x: (
x.iat[0, 2], # type
x.iat[0, 3], # subtype
x.iat[0, 4], # sentence
x.iat[0, 5], # non sentence
x['score'].mean()))
return pd.DataFrame(
score.to_list(),
columns=['type', 'subtype', 'sentence', 'non sentence', 'score'])
@staticmethod
def syntactic_by_type(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with mean scores by syntax error type"""
return data.score.groupby([data['type']]).agg(
n='count', score='mean', std='std').reset_index()
def run_syntactic_comparison(self, gold: FileItem, sub_file: FileItem):
""" This function creates a syntactic comparison based on inputs
data_formatting:
Each line of the data frame contains a pair of (correct,
incorrect) sentences and has the following columns: 'id', 'voice', 'type',
'sentence', 'score sentence', 'non sentence', 'score non sentence'.
"""
gold_df = load_dataframe(gold, header=0, index_col='filename')
sub_df = load_dataframe(sub_file, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# merge the gold and score using filenames, then remove the columns
# 'phones' and 'filename' as we don't use them for evaluation
data = pd.concat([gold_df, sub_df], axis=1)
data.reset_index(drop=True, inplace=True)
# going from a word per line to a pair (word, non word) per line
data = pd.concat([
data.loc[data['correct'] == 1].reset_index().rename(
lambda x: 's_' + x, axis=1),
data.loc[data['correct'] == 0].reset_index().rename(
lambda x: 'ns_' + x, axis=1)], axis=1)
data.drop(
['s_index', 'ns_index', 'ns_voice', 'ns_type', 'ns_subtype',
's_correct', 'ns_correct', 'ns_id'],
axis=1, inplace=True)
data.rename(
{'s_id': 'id',
's_voice': 'voice',
's_type': 'type',
's_subtype': 'subtype',
's_transcription': 'sentence',
'ns_transcription': 'non sentence',
's_score': 'score sentence',
'ns_score': 'score non sentence'},
axis=1, inplace=True)
by_pair = self.syntactic_by_pair(data)
by_type = self.syntactic_by_type(by_pair)
# remove (type, subtype) from by_pair data since by_type is complete
by_pair.drop(['type', 'subtype'], axis=1, inplace=True)
return by_pair, by_type
def eval(self, submission: "SLM21Submission", dataset: "SLM21Dataset"):
""" Executes syntactic comparison on required sets and writes results to score_dir """
output_dir = submission.score_dir
self.sets = submission.sets
if 'dev' in self.sets:
gold_file = dataset.index.subsets.syntactic_dev.items.gold
sub_file = submission.items.syntactic_dev
with self.console.status('Running syntactic_dev evaluation....', spinner="aesthetic"):
by_pair, by_type = self.run_syntactic_comparison(gold_file, sub_file)
filename = output_dir / f"{self.result_filenames['dev']['by_pair']}"
self.console.print(f":pencil: writing {self.result_filenames['dev']['by_pair']}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
filename = output_dir / f"{self.result_filenames['dev']['by_type']}"
self.console.print(f":pencil: writing {self.result_filenames['dev']['by_type']}",
style="underline yellow4")
by_type.to_csv(filename, index=False, float_format='%.4f')
if 'test' in self.sets:
gold_file = dataset.index.subsets.syntactic_test.items.gold
sub_file = submission.items.syntactic_test
with self.console.status('Running syntactic_test evaluation....', spinner="aesthetic"):
by_pair, by_type = self.run_syntactic_comparison(gold_file, sub_file)
filename = output_dir / f"{self.result_filenames['test']['by_pair']}"
self.console.print(f":pencil: writing {self.result_filenames['test']['by_pair']}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
filename = output_dir / f"{self.result_filenames['test']['by_type']}"
self.console.print(f":pencil: writing {self.result_filenames['test']['by_type']}",
style="underline yellow4")
by_type.to_csv(filename, index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/lm/syntactic.py | syntactic.py |
import enum
import functools
import json
from pathlib import Path
from typing import Any, Dict, Literal
import numpy as np
import yaml
from pydantic import BaseModel
from zerospeech.tasks import BenchmarkParameters
FileNameType = Dict[str, Dict[str, str]]
# using metrics from scipy.spatial.distance.cdist
_SciPyMetrics = ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice',
'euclidean', 'hamming', 'jaccard', 'jensenshannon', 'kulczynski1', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
# Enumeration of metrics used for semantics benchmark
SemanticMetrics = enum.Enum('SemanticMetrics', {f"{k}": k for k in _SciPyMetrics})
class SemanticPooling(str, enum.Enum):
min = 'min'
max = 'max'
mean = 'mean'
sum = 'sum'
last = 'last'
lastlast = 'lastlast'
off = 'off'
@property
def fn(self):
if self == self.max:
return functools.partial(np.max, axis=0)
elif self == self.min:
return functools.partial(np.min, axis=0)
elif self == self.mean:
return functools.partial(np.mean, axis=0)
elif self == self.sum:
return functools.partial(np.sum, axis=0)
elif self == self.last:
return lambda x: x[-1]
elif self == self.lastlast:
return lambda x: x[-2]
elif self == self.off:
return lambda x: x
else:
raise ValueError(
f'pooling method must be {",".join([f.value for f in self])}' # noqa: enum typing is bad
)
class SemanticParams(BaseModel):
metric: SemanticMetrics = SemanticMetrics('euclidean')
pooling: SemanticPooling = SemanticPooling.mean
synthetic: bool = True
librispeech: bool = True
correlations: bool = True
n_jobs: int = 1
result_filenames: FileNameType = dict(
dev=dict(
pairs='score_semantic_dev_pairs.csv',
correlations='score_semantic_dev_correlation.csv'
),
test=dict(
pairs='score_semantic_test_pairs.csv',
correlations='score_semantic_test_correlation.csv'
)
)
class Config:
json_encoders = {
SemanticMetrics: lambda x: str(x.value),
}
class LexicalParams(BaseModel):
by_pair: bool = True
by_length: bool = True
by_frequency: bool = True
result_filenames: FileNameType = dict(
dev=dict(
by_pair='score_lexical_dev_by_pair.csv',
by_frequency='score_lexical_dev_by_frequency.csv',
by_length='score_lexical_dev_by_length.csv'
),
test=dict(
by_pair='score_lexical_test_by_pair.csv',
by_frequency='score_lexical_test_by_frequency.csv',
by_length='score_lexical_test_by_length.csv'
)
)
FileTypes = Literal['.npy', '.txt']
class SyntacticParams(BaseModel):
score_files_type: FileTypes = '.npy'
result_filenames: FileNameType = dict(
dev=dict(
by_pair='score_syntactic_dev_by_pair.csv',
by_type='score_syntactic_dev_by_type.csv'
),
test=dict(
by_pair='score_syntactic_test_by_pair.csv',
by_type='score_syntactic_test_by_type.csv'
)
)
class SLM21BenchmarkParameters(BenchmarkParameters):
lexical: LexicalParams = LexicalParams()
syntactic: SyntacticParams = SyntacticParams()
semantic: SemanticParams = SemanticParams()
def get_lexical(self) -> Dict[str, Any]:
return {
"quiet": self.quiet,
**self.lexical.dict()
}
def get_semantic(self) -> Dict[str, Any]:
return {
"quiet": self.quiet,
**self.semantic.dict()
}
def get_syntactic(self) -> Dict[str, Any]:
return {
"quiet": self.quiet,
**self.syntactic.dict()
}
def to_meta(self) -> Dict[str, Any]:
""" Convert into leaderboard meta entry """
# filtering non-interfaced param values
excluded = {
'lexical': True,
'syntactic': True,
'semantic': {'result_filenames', 'correlations'}
}
return dict(self._iter(to_dict=True, exclude=excluded))
def export(self, file: Path):
# filtering non-interfaced param values
excluded = {
'lexical': True,
'syntactic': True,
'semantic': {'result_filenames', 'correlations'}
}
# conversion order self -> json -> pydict -> yaml
# json is added in before pydict to leverage the pydantic serializer for
# more complex types as Enum, datetimes, etc. as a simpler chain of
# self -> pydict -> yaml leaves those unserialised and the yaml serializer fails.
# see https://pydantic-docs.helpmanual.io/usage/types/#standard-library-types
as_obj = json.loads(self.json(exclude=excluded))
with file.open('w') as fp:
yaml.dump(as_obj, fp)
class ProsodyLMParameters(BenchmarkParameters):
""" Parameters for the prosodic benchmark """
results_filename: str = "score_prosodic_{0}_{1}_{2}.csv"
def to_meta(self) -> Dict[str, Any]:
""" Convert into leaderboard meta entry """
# filtering non-interfaced param values
excluded = {'results_filename'}
return dict(self._iter(to_dict=True, exclude=excluded))
def export(self, file: Path):
# filtering non-interfaced param values
excluded = {'result_filename'}
# conversion order self -> json -> pydict -> yaml
# json is added in before pydict to leverage the pydantic serializer for
# more complex types as Enum, datetimes, etc. as a simpler chain of
# self -> pydict -> yaml leaves those unserialised and the yaml serializer fails.
# see https://pydantic-docs.helpmanual.io/usage/types/#standard-library-types
as_obj = json.loads(self.json(exclude=excluded))
with file.open('w') as fp:
yaml.dump(as_obj, fp) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/lm/params.py | params.py |
from pathlib import Path
from typing import Dict, Optional, TYPE_CHECKING
import joblib
import numpy as np
import pandas as pd
import scipy.spatial
import scipy.stats
from zerospeech.data_loaders import load_dataframe, load_numpy_array
from zerospeech.generics import FileItem, FileListItem
from zerospeech.tasks import Task
from .params import SemanticParams, SemanticMetrics, SemanticPooling
if TYPE_CHECKING:
from zerospeech.submissions.sLM21 import SLM21Submission
from ...datasets import SLM21Dataset
default_params = SemanticParams()
class SemanticTask(Task):
_name = "semantic"
metric: SemanticMetrics = default_params.metric
pooling: SemanticPooling = default_params.pooling
synthetic: bool = default_params.synthetic
librispeech: bool = default_params.librispeech
correlations: bool = default_params.correlations
n_jobs: int = default_params.n_jobs
result_filenames = default_params.result_filenames
sets = ('dev', 'test')
def compute_correlation(self, pairs: pd.DataFrame) -> Optional[pd.DataFrame]:
""""Returns the Spearman's correlation between human and machine scores"""
if not self.correlations:
return None
def _correlation(df):
# choose 'similarity' or 'relatedness' column (the one with no NaN)
human = df.similarity if df.relatedness.hasnans else df.relatedness
# return spearman correlation. Humans score are similarity (high when
# close) so we take the opposite to have a quantity close to a distance
# (low when close)
return 100 * scipy.stats.spearmanr( # noqa: bad __init__ in scipy ?
- human.to_numpy(), df.score.to_numpy())[0]
# for each (type/dataset) combination, compute spearman correlation
series = pairs.groupby([pairs['type'], pairs['dataset']]).apply(_correlation)
# transform raw result in a usable dataframe
return series.to_frame().rename(columns={0: 'correlation'}).reset_index()
def compute_distance(self, pairs_row: pd.Series, gold_df: pd.DataFrame, pool: pd.DataFrame):
# keep only current type in gold
gold_df = gold_df[gold_df['type'] == pairs_row['type']]
if pairs_row['type'] == 'librispeech':
# get the list of tokens corresponding to the given pair of words
tokens_1 = gold_df['filename'][gold_df['word'] == pairs_row['word_1']]
tokens_2 = gold_df['filename'][gold_df['word'] == pairs_row['word_2']]
assert 0 < len(tokens_1) <= 10 and 0 < len(tokens_2) <= 10
x = np.asarray(pool[pool['filename'].isin(tokens_1)]['pooling'].tolist())
y = np.asarray(pool[pool['filename'].isin(tokens_2)]['pooling'].tolist())
# compute the mean distance across all pairs of tokens after pooling
return scipy.spatial.distance.cdist( # noqa: bad __init__ for scipy.spatial ??
x, y, metric=str(self.metric.value)).mean()
elif pairs_row['type'] == 'synthetic':
# get the list of tokens corresponding to the given pair of words
tokens_1 = gold_df[['filename', 'voice']][gold_df['word'] == pairs_row['word_1']]
tokens_2 = gold_df[['filename', 'voice']][gold_df['word'] == pairs_row['word_2']]
tokens = tokens_1.merge(tokens_2, on='voice').drop(['voice'], axis=1)
# compute the mean of distances within a given voice
dist = 0
for _, (filename_x, filename_y) in tokens.iterrows():
x = pool[pool['filename'] == filename_x]['pooling'].item()
y = pool[pool['filename'] == filename_y]['pooling'].item()
dist += scipy.spatial.distance.cdist( # noqa: bad __init__ for scipy.spatial ??
np.atleast_2d(x), np.atleast_2d(y), metric=str(self.metric.value))[0][0]
return dist / len(tokens)
def build_file_index(
self, synthetic: FileListItem, librispeech: FileListItem
) -> Dict[str, Dict[str, Path]]:
file_index = {}
if self.librispeech:
file_index['librispeech'] = {
f"{p.stem}": p
for p in librispeech
}
if self.synthetic:
file_index['synthetic'] = {
f"{p.stem}": p
for p in synthetic
}
return file_index
def semantic_eval(self, file_index: Dict[str, Dict[str, Path]],
gold: FileItem, pairs: FileItem):
""" Semantically evaluate a subset """
pairs_df = load_dataframe(pairs, header=0)
gold_df = load_dataframe(gold, header=0)
if not self.synthetic:
gold_df = gold_df.drop(gold_df[gold_df['type'] == 'synthetic'].index)
pairs_df = pairs_df.drop(pairs_df[pairs_df['type'] == 'synthetic'].index)
if not self.librispeech:
gold_df = gold_df.drop(gold_df[gold_df['type'] == 'librispeech'].index)
pairs_df = pairs_df.drop(pairs_df[pairs_df['type'] == 'librispeech'].index)
def compute(_row: pd.Series):
""" Compute pooling from submission array """
fname = file_index.get(_row[0], {}).get(_row[1], None)
if fname is None:
return _row[1], _row[0], None
data = load_numpy_array(fname)
# values
return _row[1], _row[0], self.pooling.fn(data)
# compute pooling from g
res = joblib.Parallel(n_jobs=self.n_jobs)(joblib.delayed(compute)(x) for _, x in gold_df.iterrows())
pool = pd.DataFrame(res, columns=['filename', 'type', 'pooling'])
pairs_df['score'] = [
self.compute_distance(pairs_row, gold_df, pool)
for _, pairs_row in pairs_df.iterrows()
]
correlation = self.compute_correlation(pairs_df)
return pairs_df, correlation
def eval(self, submission: "SLM21Submission", dataset: "SLM21Dataset"):
""" Run the selected semantic evaluations & write results """
outputs_dir = submission.score_dir
self.sets = submission.sets
if 'dev' in self.sets:
gold = dataset.index.subsets.semantic_dev.items.gold
pairs = dataset.index.subsets.semantic_dev.items.pairs
file_index = self.build_file_index(
synthetic=submission.items.semantic_dev_synthetic,
librispeech=submission.items.semantic_dev_librispeech
)
with self.console.status('Running semantic_dev evaluation....', spinner="aesthetic"):
res_pairs, correlation = self.semantic_eval(file_index, gold, pairs)
filename = outputs_dir / self.result_filenames['dev']['pairs']
self.console.print(f":pencil: writing {self.result_filenames['dev']['pairs']}",
style="underline yellow4")
res_pairs.to_csv(filename, index=False, float_format='%.4f')
if self.correlations and correlation is not None:
filename = outputs_dir / self.result_filenames['dev']['correlations']
self.console.print(f":pencil: writing {self.result_filenames['dev']['correlations']}",
style="underline yellow4")
correlation.to_csv(filename, index=False, float_format='%.4f')
if 'test' in self.sets:
gold = dataset.index.subsets.semantic_test.items.gold
pairs = dataset.index.subsets.semantic_test.items.pairs
file_index = self.build_file_index(
synthetic=submission.items.semantic_test_synthetic,
librispeech=submission.items.semantic_test_librispeech
)
with self.console.status('Running semantic_test evaluation....', spinner="aesthetic"):
res_pairs, correlation = self.semantic_eval(file_index, gold, pairs)
filename = outputs_dir / self.result_filenames['test']['pairs']
self.console.print(f":pencil: writing {self.result_filenames['test']['pairs']}",
style="underline yellow4")
res_pairs.to_csv(filename, index=False, float_format='%.4f')
if self.correlations and correlation is not None:
filename = outputs_dir / self.result_filenames['test']['correlations']
self.console.print(f":pencil: writing {self.result_filenames['test']['correlations']}",
style="underline yellow4")
correlation.to_csv(filename, index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/lm/semantic.py | semantic.py |
from typing import TYPE_CHECKING
import pandas as pd
from zerospeech.data_loaders import load_dataframe
from zerospeech.generics import FileItem
from zerospeech.tasks import Task
from .params import ProsodyLMParameters
if TYPE_CHECKING:
from zerospeech.submissions.prosAudit import ProsodySubmission
from zerospeech.datasets import ProsAuditLMDataset
default_params = ProsodyLMParameters()
class ProsodicTask(Task):
""" Allows computation of the score for the prosodic task (todo: write better desc)"""
_name = "prosaudit"
sets = ('dev', 'test')
tasks = ('english',)
result_filename: str = default_params.results_filename
@staticmethod
def prosodic_by_pair(data: pd.DataFrame) -> pd.DataFrame:
""" Returns a dataframe with the scores by (something non something) pair"""
# compute the score for each pair in an additional 'score' column, then
# delete the 'score word' and 'score non word' columns that become useless
score = data.loc[:, ['score sentence', 'score non sentence']].to_numpy()
data['score'] = (
0.5 * (score[:, 0] == score[:, 1])
+ (score[:, 0] > score[:, 1]))
data.drop(columns=['score sentence', 'score non sentence'], inplace=True)
# finally get the mean score across voices for all pairs
score = data.groupby(['type', 'id']).apply(lambda x: (
x.iat[0, 0], # id
# x.iat[0, 1], # voice
x.iat[0, 2], # type
# x.iat[0, 3], # subtype
# x.iat[0, 4], # sentence
# x.iat[0, 5], # non sentence
x['score'].mean()))
return pd.DataFrame(
score.to_list(),
columns=['id', 'type', 'score'])
@staticmethod
def prosodic_by_type(data: pd.DataFrame) -> pd.DataFrame:
"""Returns a data frame with mean scores by syntax error type"""
return data.score.groupby([data['type']]).agg(
n='count', score='mean', std='std').reset_index()
def run_prosodic_comparison(self, gold: FileItem, sub_file: FileItem):
""" This function create a prosodic comparison based on inputs
data_formatting:
Each line of the data frame contains a pair of (correct, incorrect) sentences
and has the following column: 'id', 'filename', 'type', 'correct'
"""
gold_df = load_dataframe(gold, header=0, index_col='filename')
sub_df = load_dataframe(sub_file, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# merge the gold and score using filenames, then remove the columns
# 'phones' and 'filename' as we don't use them for evaluation
data = pd.concat([gold_df, sub_df], axis=1)
data.reset_index(drop=True, inplace=True)
# going from a word per line to a pair (word, non word) per line
data = pd.concat([
data.loc[data['correct'] == 1].reset_index().rename(
lambda x: 's_' + x, axis=1),
data.loc[data['correct'] == 0].reset_index().rename(
lambda x: 'ns_' + x, axis=1)], axis=1)
data.drop(
['s_index', 'ns_index', 'ns_voice', 'ns_type', 'ns_subtype',
's_correct', 'ns_correct', 'ns_id'],
axis=1, inplace=True)
data.rename(
{'s_id': 'id',
's_voice': 'voice',
's_type': 'type',
's_subtype': 'subtype',
's_transcription': 'sentence',
'ns_transcription': 'non sentence',
's_score': 'score sentence',
'ns_score': 'score non sentence'},
axis=1, inplace=True)
by_pair = self.prosodic_by_pair(data)
by_type = self.prosodic_by_type(by_pair)
# remove (type, subtype) from by_pair data since by_type is complete
# by_pair.drop(['type', 'subtype'], axis=1, inplace=True)
return by_pair, by_type
def eval(self, submission: "ProsodySubmission", dataset: "ProsAuditLMDataset"):
""" Evaluate prosody for the given submission """
output_dir = submission.score_dir
self.sets = submission.sets
self.tasks = submission.tasks
if 'dev' in self.sets:
if 'english' in self.tasks:
gold_file = dataset.index.subsets.english_dev.items.gold
sub_file = submission.items.english_dev
with self.console.status('Running prosodic english_dev evaluation', spinner="aesthetic"):
by_pair, by_type = self.run_prosodic_comparison(gold_file, sub_file)
filename = output_dir / f"{self.result_filename.format('english', 'dev', 'by_pair')}"
self.console.print(f":pencil: writing {filename.name}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
filename = output_dir / f"{self.result_filename.format('english', 'dev', 'by_type')}"
self.console.print(f":pencil: writing {filename.name}",
style="underline yellow4")
by_type.to_csv(filename, index=False, float_format='%.4f')
if 'test' in self.sets:
if 'english' in self.tasks:
gold_file = dataset.index.subsets.english_test.items.gold
sub_file = submission.items.english_test
with self.console.status('Running prosodic english_test evaluation', spinner="aesthetic"):
by_pair, by_type = self.run_prosodic_comparison(gold_file, sub_file)
filename = output_dir / f"{self.result_filename.format('english', 'test', 'by_pair')}"
self.console.print(f":pencil: writing {filename.name}",
style="underline yellow4")
by_pair.to_csv(filename, index=False, float_format='%.4f')
filename = output_dir / f"{self.result_filename.format('english', 'test', 'by_type')}"
self.console.print(f":pencil: writing {filename.name}",
style="underline yellow4")
by_type.to_csv(filename, index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/lm/prosody.py | prosody.py |
import json
from enum import Enum
from pathlib import Path
from typing import Optional, Dict, Any, Literal
import yaml
from zerospeech.tasks import BenchmarkParameters
class ABXMode(str, Enum):
""" ABX mode of computation """
all = 'all'
within = 'within'
across = 'across'
def as_set(self):
if self == self.all:
return self.within, self.across
else:
return self,
class ABXDistanceMode(str, Enum):
""" Enumeration for distance mode for abx algorithm"""
euclidian = 'euclidian'
cosine = 'cosine'
kl = 'kl'
kl_symmetric = 'kl_symmetric'
FileNameType = Dict[str, Dict[str, str]]
FileTypesTXT = Literal['.npy', '.txt']
class ABXParameters(BenchmarkParameters):
# Path to a CPC checkpoint
path_checkpoint: Optional[str] = None
# size of a single feature
feature_size: Optional[float] = float(0.1)
# Use the GPU to compute distances
cuda: bool = True
# Choose the mode of the ABX score to compute
mode: ABXMode = ABXMode.all
# Choose the kind of distance to use to compute
distance_mode: ABXDistanceMode = ABXDistanceMode.cosine
# Max size of a group while computing the ABX score
max_size_group: int = 10
# When computing the ABX across score, maximum
# number of speaker X to sample per couple A,B.
max_x_across: int = 5
# location to output the results
out: Optional[str] = None
score_file_type: FileTypesTXT = '.npy'
result_filename: str = "score_phonetic.csv"
def get_task(self):
return self.dict()
def to_meta(self) -> Dict[str, Any]:
""" Convert into leaderboard meta entry """
excluded = {'path_checkpoint', 'out', 'result_filename'}
return dict(self._iter(to_dict=True, exclude=excluded))
def export(self, file: Path):
# filtering non-interfaced param values
excluded = {'path_checkpoint', 'out'}
# conversion order self -> json -> pydict -> yaml
# json is added in before pydict to leverage the pydantic serializer for
# more complex types as Enum, datetimes, etc. as a simpler chain of
# self -> pydict -> yaml leaves those unserialised and the yaml serializer fails.
# see https://pydantic-docs.helpmanual.io/usage/types/#standard-library-types
as_obj = json.loads(self.json(exclude=excluded))
with file.open('w') as fp:
yaml.dump(as_obj, fp) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/abx/abx17/params.py | params.py |
import abc
import warnings
from pathlib import Path
from typing import Optional, Tuple, Dict, List, TYPE_CHECKING
import pandas as pd
try:
import libriabx
except ImportError:
libriabx = ...
warnings.warn("abx17 extension not installed")
from .params import ABXParameters, ABXMode, ABXDistanceMode
from zerospeech.generics import FileListItem, FileItem
from zerospeech.settings import get_settings
from zerospeech.out import warning_console
from zerospeech.tasks import Task
if TYPE_CHECKING:
from zerospeech.datasets import Dataset
from zerospeech.submissions import Submission
st = get_settings()
default_params = ABXParameters()
extract_return_type = Tuple[str, FileListItem, FileItem]
class SimpleABXTask(Task, abc.ABC):
""" Abstract abx-LS task """
_name = "abx-LS"
# Path to a CPC checkpoint
path_checkpoint: Optional[str] = default_params.path_checkpoint
# size of a single feature
feature_size: Optional[float] = default_params.feature_size
# Use the GPU to compute distances
cuda: bool = default_params.cuda
# Choose the mode of the ABX score to compute
mode: ABXMode = default_params.mode
# Choose the kind of distance to use to compute
distance_mode: ABXDistanceMode = default_params.distance_mode
# Max size of a group while computing the ABX score
max_size_group: int = default_params.max_size_group
# When computing the ABX across score, maximum
# number of speaker X to sample per couple A,B.
max_x_across: int = default_params.max_x_across
# location to output the results
out: Optional[str] = default_params.out
sets: Tuple = ('dev', 'test')
tasks: Tuple = ('clean', 'other')
result_filename = default_params.result_filename
def abx_args(self, file_list: List[Path], file_ext, item_file):
""" Build ABX arguments from class attributes """
if libriabx:
abx_args = libriabx.AbxArguments.load_from_file_list(
file_list=file_list,
path_item_file=str(item_file),
distance_mode=self.distance_mode,
feature_size=self.feature_size,
cuda=self.cuda,
file_extension=file_ext,
path_checkpoint=self.path_checkpoint,
mode=self.mode,
max_size_group=self.max_size_group,
max_x_across=self.max_x_across
)
# bugfix: _is_mounted is not set by constructor should be fixed in v1.0.6
abx_args._is_mounted = True
return abx_args
else:
raise ValueError('No abx backend detected')
def get_abx(self, sub_files: FileListItem, item_file: FileItem) -> Dict[str, float]:
""" Run abx evaluations on a fileList using a specific .item file
Returns:
scores<Dict[str, float]>: where keys represent abx mode (across, within) and float represents the score.
"""
if None in (sub_files, item_file):
return {f'{t.value}': '-' for t in self.mode.as_set()}
arg_obj = self.abx_args(sub_files.files_list, sub_files.file_type.ext, item_file.file)
if libriabx:
res = libriabx.abx_eval(arg_obj)
else:
raise ValueError('No abx backend detected')
return res
@abc.abstractmethod
def extract_sets(self, submission: "Submission", dataset: "Dataset") -> extract_return_type:
""" Extract relevant data for abx from submission & dataset """
pass
@abc.abstractmethod
def format_results(self, results: Dict) -> pd.DataFrame:
""" Format the results as a dataframe """
pass
def eval(self, submission: "Submission", dataset: "Dataset"):
""" Simple ABX evaluation """
output_dir = submission.score_dir
results = {}
abx_sets = self.extract_sets(submission, dataset)
if self.cuda:
warning_console.print("WARNING: gpu mode is set. You can disable this in the parameters.")
for label, item_file, file_list in abx_sets:
self.console.print(f'==> Calculating abx distances for {label}')
results[label] = self.get_abx(
sub_files=file_list,
item_file=item_file
)
as_df = self.format_results(results)
filename = output_dir / self.result_filename
self.console.print(f":pencil: writing {self.result_filename}",
style="underline yellow4")
as_df.to_csv(filename, index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/abx/abx17/task.py | task.py |
import json
from enum import Enum
from pathlib import Path
from typing import Optional, Dict, Any, Literal
import yaml
try:
from zrc_abx2.eval_ABX import SEED
except ImportError:
# use this default value if variable does not exist
SEED = 3459
from zerospeech.tasks import BenchmarkParameters
class ABXFileTypes(str, Enum):
""" Input file type for abx"""
pt = '.pt'
npy = '.npy'
txt = '.txt'
wav = '.wav'
flac = '.flac'
mp3 = '.mp3'
class ABXSpeakerMode(str, Enum):
""" ABX mode of computation """
all = 'all'
within = 'within'
across = 'across'
def as_set(self):
if self == self.all:
return self.within, self.across
else:
return self,
class ContextMode(str, Enum):
""" ABX context mode of computation """
all = "all"
phoneme_any = "phoneme-any"
phoneme_within = "phoneme-within"
triphone_within = 'triphone-within'
def as_set(self):
if self == self.all:
return self.phoneme_within, self.phoneme_any, self.triphone_within
else:
return self,
def as_abx2_value(self) -> str:
if self == self.phoneme_within:
return "within"
elif self == self.phoneme_any:
return "any"
elif self == self.triphone_within:
return "within"
else:
raise ValueError('Current context has not representable value in abx2 module')
class ABXDistanceMode(str, Enum):
""" Enumeration for distance mode for abx algorithm"""
euclidian = 'euclidian'
cosine = 'cosine'
kl = 'kl'
kl_symmetric = 'kl_symmetric'
class PoolingMode(str, Enum):
""" Pooling method """
none = "none"
mean = "mean"
hamming = "hamming"
FileNameType = Dict[str, Dict[str, str]]
FileTypesTXT = Literal['.npy', '.txt']
class ABX2Parameters(BenchmarkParameters):
# Path to a CPC checkpoint
path_checkpoint: Optional[str] = None
# size of a single feature
feature_size: Optional[float] = float(0.01)
# Use the GPU to compute distances
cuda: bool = True
# Choose the mode of the ABX score to compute
speaker_mode: ABXSpeakerMode = ABXSpeakerMode.all
# Choose the context type of the ABX score to compute
context: ContextMode = ContextMode.all
# Choose the kind of distance to use to compute
distance_mode: ABXDistanceMode = 'cosine'
# Max size of a group while computing the ABX score
max_size_group: int = 10
# When computing the ABX across score, maximum
# number of speaker X to sample per couple A,B.
max_x_across: int = 5
# Default seed to use
seed: int = SEED
# location to output the results
out: Optional[str] = None
score_file_type: FileTypesTXT = '.npy'
result_filename: str = "score_all_phonetic"
def get_task(self):
return self.dict()
def to_meta(self) -> Dict[str, Any]:
""" Convert into leaderboard meta entry """
excluded = {'path_checkpoint', 'out', 'result_filename'}
return dict(self._iter(to_dict=True, exclude=excluded))
def export(self, file: Path):
# filtering non-interfaced param values
excluded = {'path_checkpoint', 'out'}
# conversion order self -> json -> pydict -> yaml
# json is added in before pydict to leverage the pydantic serializer for
# more complex types as Enum, datetimes, etc. as a simpler chain of
# self -> pydict -> yaml leaves those unserialised and the yaml serializer fails.
# see https://pydantic-docs.helpmanual.io/usage/types/#standard-library-types
as_obj = json.loads(self.json(exclude=excluded))
with file.open('w') as fp:
yaml.dump(as_obj, fp) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/abx/abxLS_phoneme/params.py | params.py |
import abc
import json
import warnings
from pathlib import Path
from typing import Optional, Tuple, Dict, List, Any, TYPE_CHECKING
import pandas as pd
try:
import zrc_abx2
from vdataset import mount, unmount
except ImportError:
zrc_abx2 = ...
mount, unmount = ..., ...
warnings.warn("abxLS extension not installed")
from .params import ABX2Parameters, ABXSpeakerMode, ABXDistanceMode, ContextMode
from zerospeech.generics import FileItem, FileListItem
from zerospeech.settings import get_settings
from zerospeech.out import warning_console
from zerospeech.tasks import Task
if TYPE_CHECKING:
from zerospeech.datasets import Dataset
from zerospeech.submissions import Submission
st = get_settings()
default_params = ABX2Parameters()
extract_return_type = Tuple[str, FileItem, FileListItem, ContextMode]
class SimpleABXPhonemeTask(Task, abc.ABC):
""" Abstract abx-LS task """
_name = "abx-LS"
# Path to a CPC checkpoint
path_checkpoint: Optional[str] = default_params.path_checkpoint
# size of a single feature
feature_size: Optional[float] = default_params.feature_size
# Use the GPU to compute distances
cuda: bool = default_params.cuda
# Choose the mode of the ABX score to compute
speaker_mode: ABXSpeakerMode = default_params.speaker_mode
# Choose the context type of the ABX score to compute
context: ContextMode = default_params.context
# Choose the kind of distance to use to compute
distance_mode: ABXDistanceMode = default_params.distance_mode
# Max size of a group while computing the ABX score
max_size_group: int = default_params.max_size_group
# When computing the ABX across score, maximum
# number of speaker X to sample per couple A,B.
max_x_across: int = default_params.max_x_across
# Default seed to use
seed: int = default_params.seed
# location to output the results
out: Optional[str] = default_params.out
sets: Tuple = ('dev', 'test')
tasks: Tuple = ('clean', 'other')
result_filename = default_params.result_filename
def abx_args(self, file_list: List[Path], file_ext, item_file, context: ContextMode):
""" Build ABX arguments from class attributes """
if zrc_abx2:
path_data = mount(file_list)
abx2_context = context.as_abx2_value()
abx_args = zrc_abx2.EvalArgs(
path_data=str(path_data),
path_item_file=str(item_file),
speaker_mode=self.speaker_mode,
context_mode=abx2_context,
distance_mode=self.distance_mode,
feature_size=self.feature_size,
cuda=self.cuda,
file_extension=file_ext,
path_checkpoint=self.path_checkpoint,
max_size_group=self.max_size_group,
max_x_across=self.max_x_across,
seed=self.seed
)
return abx_args
else:
raise ValueError('No abx backend detected')
def get_abx(
self, sub_files: FileListItem, item_file: FileItem, context: ContextMode
) -> List[Dict[str, Any]]:
""" Run abx evaluations on a fileList using a specific .item file
Returns:
scores<Dict[str, float]>: where keys represent abx mode (across, within) and float represents the score.
"""
if None in (sub_files, item_file):
return [{f'{t.value}': '-' for t in self.speaker_mode.as_set()}]
arg_obj = self.abx_args(sub_files.files_list, sub_files.file_type.ext, item_file.file, context)
if zrc_abx2:
res = zrc_abx2.EvalABX().eval_abx(arg_obj)
else:
raise ValueError('No abx backend detected')
# release folder location
unmount(arg_obj.path_data)
return res
@abc.abstractmethod
def extract_sets(self, submission: "Submission",
dataset: "Dataset", context: ContextMode = ContextMode.all) -> List[extract_return_type]:
""" Extract relevant data for abx from submission & dataset """
pass
@abc.abstractmethod
def format_results(self, results: Dict) -> pd.DataFrame:
""" Format the results as a dataframe """
pass
def eval(self, submission: "Submission", dataset: "Dataset"):
""" Simple Phoneme ABX evaluation """
output_dir = submission.score_dir
results = {}
abx_sets = self.extract_sets(submission, dataset)
if self.cuda:
warning_console.print("WARNING: gpu mode is set. You can disable this in the parameters.")
for label, item_file, file_list, context in abx_sets:
self.console.print(f'==> Calculating abx distances for {label}')
results[label] = self.get_abx(
sub_files=file_list,
item_file=item_file,
context=context
)
as_df = self.format_results(results)
filename = output_dir / self.result_filename
with filename.with_suffix('.raw.json').open('w') as fp:
json.dump(results, fp, indent=4)
self.console.print(f":pencil: writing {self.result_filename}",
style="underline yellow4")
as_df.to_csv(filename.with_suffix('.csv'), index=False, float_format='%.4f') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/tasks/abx/abxLS_phoneme/task.py | task.py |
import collections
import functools
import shutil
from datetime import datetime
from pathlib import Path
from typing import Tuple, Optional
import numpy as np
import pandas as pd
from pydantic import Field
from zerospeech import validators
from zerospeech.data_loaders import load_dataframe
from zerospeech.datasets import ZRC2017Dataset
from zerospeech.generics import (
FileTypes, FileListItem, Namespace, Item, FileItem
)
from zerospeech.leaderboards import EntryDetails, LeaderboardBenchmarkName, LeaderboardEntry
from zerospeech.leaderboards.abx17 import ABX17LeaderboardEntry, ABX17LeaderboardScores
from zerospeech.misc import load_obj
from zerospeech.tasks.abx.abx17 import ABXParameters
from ._model import MetaFile, Submission, SubmissionValidation, validation_fn, add_item, ScoreDir
class ABX17SubmissionValidator(SubmissionValidation):
""" File Validation for an ABX17 submission"""
dataset: ZRC2017Dataset = Field(default_factory=lambda: ZRC2017Dataset.load())
@staticmethod
def basic_abx_checks(item_list: FileListItem, abx_item: FileItem, tag: str):
# wav_list are compared to items inside item file
df = pd.read_csv(abx_item.file, sep=' ')
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_stem_check,
expected=[str(f) for f in df['#file']]
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
results = validators.numpy_array_list_check(
item_list, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item(tag, results)
return results
@validation_fn(target='english_1s')
def validate_english_1s(self, english_1s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_1s_item
return self.basic_abx_checks(item_list=english_1s, abx_item=abx_item,
tag='english_1s')
@validation_fn(target='english_10s')
def validate_english_10s(self, english_10s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_10s_item
return self.basic_abx_checks(item_list=english_10s, abx_item=abx_item,
tag='english_10s')
@validation_fn(target='english_120s')
def validate_english_120s(self, english_120s: FileListItem):
abx_item = self.dataset.index.subsets.english.items.abx_120s_item
return self.basic_abx_checks(item_list=english_120s, abx_item=abx_item,
tag='english_120s')
@validation_fn(target='french_1s')
def validate_french_1s(self, french_1s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_1s_item
return self.basic_abx_checks(item_list=french_1s, abx_item=abx_item,
tag='french_1s')
@validation_fn(target='french_10s')
def validate_french_10s(self, french_10s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_10s_item
return self.basic_abx_checks(item_list=french_10s, abx_item=abx_item,
tag='french_10s')
@validation_fn(target='french_120s')
def validate_french_120s(self, french_120s: FileListItem):
abx_item = self.dataset.index.subsets.french.items.abx_120s_item
return self.basic_abx_checks(item_list=french_120s, abx_item=abx_item,
tag='french_120s')
@validation_fn(target='mandarin_1s')
def validate_mandarin_1s(self, mandarin_1s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_1s_item
return self.basic_abx_checks(item_list=mandarin_1s, abx_item=abx_item,
tag='mandarin_1s')
@validation_fn(target='mandarin_10s')
def validate_mandarin_10s(self, mandarin_10s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_10s_item
return self.basic_abx_checks(item_list=mandarin_10s, abx_item=abx_item,
tag='mandarin_10s')
@validation_fn(target='mandarin_120s')
def validate_mandarin_120s(self, mandarin_120s: FileListItem):
abx_item = self.dataset.index.subsets.mandarin.items.abx_120s_item
return self.basic_abx_checks(item_list=mandarin_120s, abx_item=abx_item,
tag='mandarin_120s')
@validation_fn(target='german_1s')
def validate_german_1s(self, german_1s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_1s_item
return self.basic_abx_checks(item_list=german_1s, abx_item=abx_item,
tag='german_1s')
@validation_fn(target='german_10s')
def validate_german_10s(self, german_10s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_10s_item
return self.basic_abx_checks(item_list=german_10s, abx_item=abx_item,
tag='german_10s')
@validation_fn(target='german_120s')
def validate_german_120s(self, german_120s: FileListItem):
abx_item = self.dataset.index.subsets.german.items.abx_120s_item
return self.basic_abx_checks(item_list=german_120s, abx_item=abx_item,
tag='german_120s')
@validation_fn(target='wolof_1s')
def validate_wolof_1s(self, wolof_1s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_1s_item
return self.basic_abx_checks(item_list=wolof_1s, abx_item=abx_item,
tag='wolof_1s')
@validation_fn(target='wolof_10s')
def validate_wolof_10s(self, wolof_10s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_10s_item
return self.basic_abx_checks(item_list=wolof_10s, abx_item=abx_item,
tag='wolof_10s')
@validation_fn(target='wolof_120s')
def validate_wolof_120s(self, wolof_120s: FileListItem):
abx_item = self.dataset.index.subsets.wolof.items.abx_120s_item
return self.basic_abx_checks(item_list=wolof_120s, abx_item=abx_item,
tag='wolof_120s')
class ABX17ScoreDir(ScoreDir):
params: Optional[ABXParameters] = ABXParameters()
@property
def scores(self):
csv_file = (self.location / self.params.result_filename).with_suffix('.csv')
return load_dataframe(csv_file)
def get_details(self) -> EntryDetails:
""" Build entry details """
train_set = ""
gpu_budget = ""
if self.meta_file is not None:
train_set = self.meta_file.model_info.train_set
gpu_budget = self.meta_file.model_info.gpu_budget
return EntryDetails(
train_set=train_set,
benchmarks=[LeaderboardBenchmarkName.ABX_17],
gpu_budget=gpu_budget,
parameters=self.params.to_meta()
)
def build_scores(self) -> ABX17LeaderboardScores:
""" extract scores from csv """
score_template = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
for _, row in self.scores.iterrows():
score_template[row['language']][f"t_{row['duration']}"][row['type']] = row["score"]
return ABX17LeaderboardScores.parse_obj(score_template)
def build_meta_data(self):
""" Build leaderboard metadata """
return dict(
model_id=self.meta_file.model_info.model_id,
submission_id="",
index=None,
submission_date=datetime.now(),
submitted_by=self.meta_file.username,
description=self.meta_file.model_info.system_description,
publication=dict(
author_short=self.meta_file.publication.author_label,
authors=self.meta_file.publication.authors,
paper_title=self.meta_file.publication.paper_title,
paper_ref=self.meta_file.publication.paper_url,
bib_ref=self.meta_file.publication.bib_reference,
paper_url=self.meta_file.publication.paper_url,
pub_year=self.meta_file.publication.publication_year,
team_name=self.meta_file.publication.team,
institution=self.meta_file.publication.institution,
code=self.meta_file.code_url,
DOI=self.meta_file.publication.DOI,
open_science=self.meta_file.open_source,
),
details=dict(
train_set=self.meta_file.model_info.train_set,
benchmarks=[],
gpu_budget=self.meta_file.model_info.gpu_budget,
parameters=self.params.to_meta(),
)
)
def build_leaderboard(self) -> LeaderboardEntry:
""" Build leaderboard entry for the current submission """
self.load_meta()
return ABX17LeaderboardEntry.parse_obj(
dict(
**self.build_meta_data(),
scores=self.build_scores()
)
)
class ABX17Submission(Submission):
""" Submission for ABX-17 """
sets: Tuple = ('1s', '10s', '120s')
tasks: Tuple = ('english', 'french', 'mandarin', 'german', 'wolof')
@classmethod
def load(
cls, path: Path, *,
tasks=('english', 'french', 'mandarin', 'german', 'wolof'),
sets=('1s', '10s', '120s')
):
# submission object
submission = cls(
sets=sets,
tasks=tasks,
location=path
)
# if params not set export defaults
if not submission.params_file.is_file():
params = ABXParameters()
params.result_filename = "scores.csv"
params.export(submission.params_file)
# Load items
file_ext = submission.params.score_file_type.replace('.', '')
file_ext = FileTypes(file_ext)
items = dict()
if 'english' in tasks:
if '1s' in sets:
items['english_1s'] = FileListItem.from_dir(
path / 'english/1s', f_type=file_ext
)
if '10s' in sets:
items['english_10s'] = FileListItem.from_dir(
path / 'english/10s', f_type=file_ext
)
if '120s' in sets:
items['english_120s'] = FileListItem.from_dir(
path / 'english/120s', f_type=file_ext
)
if 'french' in tasks:
if '1s' in sets:
items['french_1s'] = FileListItem.from_dir(
path / 'french/1s', f_type=file_ext
)
if '10s' in sets:
items['french_10s'] = FileListItem.from_dir(
path / 'french/10s', f_type=file_ext
)
if '120s' in sets:
items['french_120s'] = FileListItem.from_dir(
path / 'french/120s', f_type=file_ext
)
if 'mandarin' in tasks:
if '1s' in sets:
items['mandarin_1s'] = FileListItem.from_dir(
path / 'mandarin/1s', f_type=file_ext
)
if '10s' in sets:
items['mandarin_10s'] = FileListItem.from_dir(
path / 'mandarin/10s', f_type=file_ext
)
if '120s' in sets:
items['mandarin_120s'] = FileListItem.from_dir(
path / 'mandarin/120s', f_type=file_ext
)
if 'german' in tasks:
# retro-compatibility with old format
gloc = path / 'LANG1'
if not gloc.is_dir():
gloc = path / 'german'
if '1s' in sets:
items['german_1s'] = FileListItem.from_dir(
gloc / '1s', f_type=file_ext
)
if '10s' in sets:
items['german_10s'] = FileListItem.from_dir(
gloc / '10s', f_type=file_ext
)
if '120s' in sets:
items['german_120s'] = FileListItem.from_dir(
gloc / '120s', f_type=file_ext
)
if 'wolof' in tasks:
# retro-compatibility with old format
gloc = path / 'LANG2'
if not gloc.is_dir():
gloc = path / 'wolof'
if '1s' in sets:
items['wolof_1s'] = FileListItem.from_dir(
gloc / '1s', f_type=file_ext
)
if '10s' in sets:
items['wolof_10s'] = FileListItem.from_dir(
gloc / '10s', f_type=file_ext
)
if '120s' in sets:
items['wolof_120s'] = FileListItem.from_dir(
gloc / '120s', f_type=file_ext
)
submission.items = Namespace[Item](store=items)
return submission
def load_parameters(self) -> "ABXParameters":
if self.params_file.is_file():
obj = load_obj(self.params_file)
return ABXParameters.parse_obj(obj)
return ABXParameters()
def __validate_submission__(self):
""" Run validation on the submission data """
self.validation_output += ABX17SubmissionValidator().validate(self)
def get_scores(self):
""" Load score Dir"""
return ABX17ScoreDir(
submission_dir=self.location,
location=self.score_dir,
params=self.params
)
def __zippable__(self):
return [
("", self.meta_file),
("", self.params_file),
*[("english/1s/", f) for f in self.items.english_1s],
*[("english/10s/", f) for f in self.items.english_10s],
*[("english/120s/", f) for f in self.items.english_120s],
*[("french/1s/", f) for f in self.items.french_1s],
*[("french/10s/", f) for f in self.items.french_10s],
*[("french/120s/", f) for f in self.items.french_120s],
*[("mandarin/1s/", f) for f in self.items.mandarin_1s],
*[("mandarin/10s/", f) for f in self.items.mandarin_10s],
*[("mandarin/120s/", f) for f in self.items.mandarin_120s],
*[("german/1s/", f) for f in self.items.german_1s],
*[("german/10s/", f) for f in self.items.german_10s],
*[("german/120s/", f) for f in self.items.german_120s],
*[("wolof/1s/", f) for f in self.items.wolof_1s],
*[("wolof/10s/", f) for f in self.items.wolof_10s],
*[("wolof/120s/", f) for f in self.items.wolof_120s],
*[("scores/", f) for f in self.score_dir.iterdir()]
]
@classmethod
def init_dir(cls, location: Path):
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
(location / 'english' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'english' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'english' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'french' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'mandarin' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'german' / "120s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "1s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "10s").mkdir(exist_ok=True, parents=True)
(location / 'wolof' / "120s").mkdir(exist_ok=True, parents=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create parameters file
ABXParameters().export(location / ABXParameters.file_stem)
# create meta-template
template = MetaFile.to_template(benchmark_name="abx17")
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md') | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/abx17.py | abx17.py |
import json
import os
import shutil
import sys
from datetime import datetime
from pathlib import Path
from typing import Tuple, Dict, Any, Optional, Type, List
import yaml
from pydantic import Field
from zerospeech.datasets.zrc_2017 import ZRC2017Dataset
from zerospeech.generics import FileItem, Item, Namespace
from zerospeech.leaderboards import EntryDetails, LeaderboardBenchmarkName, LeaderboardEntry
from zerospeech.leaderboards.tde17 import TDE17Scores, TDE17Entry
from zerospeech.misc import load_obj
from zerospeech.tasks import BenchmarkParameters, tde
from zerospeech.validators import BASE_VALIDATOR_FN_TYPE
from . import ScoreDir
from ._model import (
MetaFile, Submission, validation_fn, SubmissionValidation,
ValidationResponse, ValidationError, ValidationOK
)
class TDE17BenchmarkParams(BenchmarkParameters):
""" Parameters for the TDE-17 benchmark """
# location to output the results
njobs: int = 1 # CPU cores to use for eval
out: Optional[str] = None
result_filename: str = "scores.json"
def get_task(self):
return self.dict()
def to_meta(self) -> Dict[str, Any]:
""" Convert into leaderboard meta entry """
excluded = {'result_filename', 'out'}
return dict(self._iter(to_dict=True, exclude=excluded))
def export(self, file: Path):
# filtering non-interfaced param values
excluded = {'result_filename', 'out'}
# conversion order self -> json -> pydict -> yaml
# json is added in before pydict to leverage the pydantic serializer for
# more complex types as Enum, datetime, etc. as a simpler chain of
# self -> pydict -> yaml leaves those unserialised and the yaml serializer fails.
# see https://pydantic-docs.helpmanual.io/usage/types/#standard-library-types
as_obj = json.loads(self.json(exclude=excluded))
with file.open('w') as fp:
yaml.dump(as_obj, fp)
def tde_class_file_check(
file_location: Path, additional_checks: Optional[List[BASE_VALIDATOR_FN_TYPE]] = None
) -> List[ValidationResponse]:
""" Check a TDE class file """
if not file_location.is_file():
return [ValidationError(
'Given TDE Disc file does not exist !!!', data=file_location.name, location=file_location.parent
)]
# Disc class prints a bunch of nonsense, so we force it to be quiet
sys.stdout = open(os.devnull, 'w')
try:
disc = tde.Disc(str(file_location))
except Exception as e: # noqa: broad exception on purpose
return [ValidationError(f'{e}', data=file_location)]
finally:
sys.stdout = sys.__stdout__
results = [ValidationOK('File is a Disc TDE file !', data=file_location)]
if additional_checks:
for fn in additional_checks:
results.extend(fn(disc))
return results
class TDE17SubmissionValidation(SubmissionValidation):
dataset: ZRC2017Dataset = Field(default_factory=lambda: ZRC2017Dataset.load())
@property
def params_class(self) -> Type[TDE17BenchmarkParams]:
return TDE17BenchmarkParams
@validation_fn(target='english')
def validating_english(self, class_file: FileItem):
return tde_class_file_check(class_file.file)
@validation_fn(target='french')
def validating_english(self, class_file: FileItem):
return tde_class_file_check(class_file.file)
@validation_fn(target='mandarin')
def validating_english(self, class_file: FileItem):
return tde_class_file_check(class_file.file)
@validation_fn(target='german')
def validating_english(self, class_file: FileItem):
return tde_class_file_check(class_file.file)
@validation_fn(target='wolof')
def validating_english(self, class_file: FileItem):
return tde_class_file_check(class_file.file)
class TDE17ScoreDir(ScoreDir):
params: Optional[TDE17BenchmarkParams] = TDE17BenchmarkParams()
@property
def scores(self):
pass
def get_details(self) -> EntryDetails:
""" Build entry details """
train_set = ""
gpu_budget = ""
if self.meta_file is not None:
train_set = self.meta_file.model_info.train_set
gpu_budget = self.meta_file.model_info.gpu_budget
return EntryDetails(
train_set=train_set,
benchmarks=[LeaderboardBenchmarkName.TDE_17],
gpu_budget=gpu_budget,
parameters=self.params.to_meta()
)
def build_scores(self) -> TDE17Scores:
df = self.scores
# todo: compile scores into format
return ...
def build_meta_data(self):
""" Build leaderboard metadata """
return dict(
model_id=self.meta_file.model_info.model_id,
submission_id="",
index=None,
submission_date=datetime.now(),
submitted_by=self.meta_file.username,
description=self.meta_file.model_info.system_description,
publication=dict(
author_short=self.meta_file.publication.author_label,
authors=self.meta_file.publication.authors,
paper_title=self.meta_file.publication.paper_title,
paper_ref=self.meta_file.publication.paper_url,
bib_ref=self.meta_file.publication.bib_reference,
paper_url=self.meta_file.publication.paper_url,
pub_year=self.meta_file.publication.publication_year,
team_name=self.meta_file.publication.team,
institution=self.meta_file.publication.institution,
code=self.meta_file.code_url,
DOI=self.meta_file.publication.DOI,
open_science=self.meta_file.open_source,
),
details=dict(
train_set=self.meta_file.model_info.train_set,
benchmarks=[],
gpu_budget=self.meta_file.model_info.gpu_budget,
parameters=self.params.to_meta(),
)
)
def build_leaderboard(self) -> LeaderboardEntry:
""" Build leaderboard entry for the current submission """
self.load_meta()
return TDE17Entry.parse_obj(
dict(
**self.build_meta_data(),
scores=self.build_scores()
)
)
class TDE17Submission(Submission):
""" Submission for TDE-17 """
sets: Tuple = ('1s', '10s', '120s')
tasks: Tuple = ('english', 'french', 'mandarin', 'german', 'wolof')
@classmethod
def load(
cls, path: Path, *,
tasks=('english', 'french', 'mandarin', 'german', 'wolof'),
sets=('1s', '10s', '120s')
):
items = dict()
if 'english' in tasks:
items['english'] = FileItem.from_file(
path / 'english.txt'
)
if 'french' in tasks:
items['french'] = FileItem.from_file(
path / 'french.txt'
)
if 'mandarin' in tasks:
items['mandarin'] = FileItem.from_file(
path / 'mandarin.txt'
)
if 'german' in tasks:
items['german'] = FileItem.from_file(
path / 'german.txt'
)
if 'wolof' in tasks:
items['wolof'] = FileItem.from_file(
path / 'wolof.txt'
)
submission = cls(
sets=sets,
tasks=tasks,
location=path,
items=Namespace[Item](store=items)
)
# if params not set export defaults
if not submission.params_file.is_file():
TDE17BenchmarkParams().export(submission.params_file)
return submission
def load_parameters(self) -> "TDE17BenchmarkParams":
if self.params_file.is_file():
obj = load_obj(self.params_file)
return TDE17BenchmarkParams.parse_obj(obj)
return TDE17BenchmarkParams()
def __validate_submission__(self):
""" Run validation on the submission data """
self.validation_output += TDE17SubmissionValidation().validate(self)
@classmethod
def init_dir(cls, location: Path):
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
# create necessary files
(location / 'english.txt').touch(exist_ok=True)
(location / 'french.txt').touch(exist_ok=True)
(location / 'mandarin.txt').touch(exist_ok=True)
(location / 'german.txt').touch(exist_ok=True)
(location / 'wolof.txt').touch(exist_ok=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create params template
TDE17BenchmarkParams().export(location / TDE17BenchmarkParams.file_stem)
# create meta template
template = MetaFile.to_template(benchmark_name="tde17")
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md')
def __zippable__(self):
return [
("", self.meta_file),
("", self.params_file),
*[("", self.location / f"{f}.txt") for f in self.tasks],
*[("scores/", f) for f in self.score_dir.iterdir()]
] | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/tde17.py | tde17.py |
import datetime
import functools
import shutil
from pathlib import Path
from typing import Optional, Dict
import pandas as pd
from pydantic import Field
from zerospeech import validators, data_loaders
from zerospeech.datasets import ProsAuditLMDataset
from zerospeech.generics import (
FileItem, Item, Namespace
)
from zerospeech.leaderboards import LeaderboardEntry
from zerospeech.leaderboards.prosaudit import ProsAuditLeaderboardEntry, ProsAuditEntryScores
from zerospeech.misc import load_obj
from zerospeech.tasks.lm import ProsodyLMParameters
from ._model import ScoreDir, MetaFile, SubmissionValidation, validation_fn, Submission, add_item
class ProsodySubmissionValidation(SubmissionValidation):
""" Class that contains all function to validate a ProsAudit Submission"""
dataset: ProsAuditLMDataset = Field(default_factory=lambda: ProsAuditLMDataset.load())
@validation_fn(target="english_dev")
def validation_english_dev(self, english_dev: FileItem):
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.english_dev.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(english_dev, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('english_dev', results)
return results
@validation_fn(target="english_test")
def validation_english_dev(self, english_test: FileItem):
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.english_test.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(english_test, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('english_test', results)
return results
class ProsAuditScoreDir(ScoreDir):
params = ProsodyLMParameters
@property
def english_dev_score_by_pair(self) -> Optional[pd.DataFrame]:
csv_file = self.location / self.params.results_filename.format('english', 'dev', 'by_pair')
if csv_file.is_file():
return data_loaders.load_dataframe(csv_file)
return None
@property
def english_dev_score_by_type(self) -> Optional[pd.DataFrame]:
csv_file = self.location / self.params.results_filename.format('english', 'dev', 'by_type')
if csv_file.is_file():
return data_loaders.load_dataframe(csv_file)
return None
@property
def english_test_score_by_pair(self) -> Optional[pd.DataFrame]:
csv_file = self.location / self.params.results_filename.format('english', 'test', 'by_pair')
if csv_file.is_file():
return data_loaders.load_dataframe(csv_file)
return None
@property
def english_test_score_by_type(self) -> Optional[pd.DataFrame]:
csv_file = self.location / self.params.results_filename.format('english', 'test', 'by_type')
if csv_file.is_file():
return data_loaders.load_dataframe(csv_file)
return None
def build_scores(self) -> ProsAuditEntryScores:
""" Build scores entry from submission scores """
dev_df = self.english_dev_score_by_type
dev_lexical: Dict = dev_df.loc[dev_df['type'] == 'lexical'].to_dict(orient="records")[0]
dev_protosyntax: Dict = dev_df.loc[dev_df['type'] == 'protosyntax'].to_dict(orient="records")[0]
test_df = self.english_test_score_by_type
test_lexical: Dict = test_df.loc[test_df['type'] == 'lexical'].to_dict(orient="records")[0]
test_protosyntax: Dict = test_df.loc[test_df['type'] == 'protosyntax'].to_dict(orient="records")[0]
return ProsAuditEntryScores.parse_obj(
dict(
protosyntax=dict(
dev=dict(
score=dev_protosyntax.get('score'),
n=dev_protosyntax.get('n'),
std=dev_protosyntax.get('std'),
),
test=dict(
score=test_protosyntax.get('score'),
n=test_protosyntax.get('n'),
std=test_protosyntax.get('std'),
)
),
lexical=dict(
dev=dict(
score=dev_lexical.get('score'),
n=dev_lexical.get('n'),
std=dev_lexical.get('std'),
),
test=dict(
score=test_lexical.get('score'),
n=test_lexical.get('n'),
std=test_lexical.get('std'),
)
)
)
)
def build_meta_data(self):
""" Build leaderboard metadata """
return dict(
model_id=self.meta_file.model_info.model_id,
submission_id="",
index=None,
submission_date=datetime.datetime.now(),
submitted_by=self.meta_file.username,
description=self.meta_file.model_info.system_description,
publication=dict(
author_short=self.meta_file.publication.author_label,
authors=self.meta_file.publication.authors,
paper_title=self.meta_file.publication.paper_title,
paper_ref=self.meta_file.publication.paper_url,
bib_ref=self.meta_file.publication.bib_reference,
paper_url=self.meta_file.publication.paper_url,
pub_year=self.meta_file.publication.publication_year,
team_name=self.meta_file.publication.team,
institution=self.meta_file.publication.institution,
code=self.meta_file.code_url,
DOI=self.meta_file.publication.DOI,
open_science=self.meta_file.open_source,
),
details=dict(
train_set=self.meta_file.model_info.train_set,
benchmarks=[],
gpu_budget=self.meta_file.model_info.gpu_budget,
parameters=dict(),
)
)
def build_leaderboard(self) -> LeaderboardEntry:
""" Build leaderboard entry """
self.load_meta()
return ProsAuditLeaderboardEntry.parse_obj(
dict(
**self.build_meta_data(),
scores=self.build_scores()
)
)
class ProsodySubmission(Submission):
sets = ('dev', 'test')
tasks = ('english',)
@classmethod
def load(cls, path: Path, *, sets=('dev', 'test'),
tasks=('english', 'french', 'japanese')):
""" Load sLMProsody submission """
submission = cls(
sets=sets,
tasks=tasks,
location=path
)
if not submission.params_file.is_file():
ProsodyLMParameters().export(submission.params_file)
items = dict()
if 'english' in tasks:
if 'dev' in sets:
items['english_dev'] = FileItem.from_file(path / 'english_dev.txt')
if 'test' in sets:
items['english_test'] = FileItem.from_file(path / 'english_test.txt')
submission.items = Namespace[Item](store=items)
return submission
def __zippable__(self):
""" Files to include in archive """
return [
("", self.meta_file),
("", self.params_file),
("", self.items.english_dev.file),
("", self.items.english_test.file),
*[("scores/", f) for f in self.score_dir.iterdir()]
]
@classmethod
def init_dir(cls, location: Path):
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
(location / 'english_dev.txt').touch(exist_ok=True)
(location / 'english_test.txt').touch(exist_ok=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create parameters file
# create meta-template
template = MetaFile.to_template(benchmark_name="prosAudit")
ProsodyLMParameters().export(location / ProsodyLMParameters.file_stem)
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md')
def load_parameters(self) -> ProsodyLMParameters:
if self.params_file.is_file():
obj = load_obj(self.params_file)
return ProsodyLMParameters.parse_obj(obj)
return ProsodyLMParameters()
def __validate_submission__(self):
""" Validate that all files are present in submission """
self.validation_output += ProsodySubmissionValidation().validate(self)
def get_scores(self) -> ProsAuditScoreDir:
return ProsAuditScoreDir(
submission_dir=self.location,
location=self.score_dir,
params=self.params
) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/prosAudit.py | prosAudit.py |
import functools
import shutil
import uuid
from datetime import datetime
from pathlib import Path
from typing import Tuple, List, Optional, Dict
import numpy as np
import pandas as pd
from pydantic import Extra, Field
import zerospeech.validators as validators
from zerospeech.data_loaders import load_dataframe
from zerospeech.datasets import SLM21Dataset
from zerospeech.generics import (
FileItem, Namespace, Item, FileListItem, FileTypes
)
from zerospeech.leaderboards import EntryDetails, LeaderboardBenchmarkName
from zerospeech.leaderboards.sLM21 import (
SLM21LeaderboardEntry, LexicalScores, ScoreTuple,
LexicalExtras, SLM21Scores, SemanticScores,
SemanticScoreSets, SLM21Extras, SyntacticExtras,
SemanticExtras
)
from zerospeech.misc import load_obj
from zerospeech.tasks.lm import SLM21BenchmarkParameters
from ._model import MetaFile, ScoreDir, SubmissionValidation, validation_fn, add_item, Submission
class SLM21SubmissionValidator(SubmissionValidation):
""" Class that contains all functions to validate a sLM21 submission """
dataset: SLM21Dataset = Field(default_factory=lambda: SLM21Dataset.load())
@validation_fn(target='lexical_dev')
def validating_lexical_dev(self, lexical_dev: FileItem):
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.lexical_dev.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(lexical_dev, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('lexical_dev', results)
return results
@validation_fn(target='lexical_test')
def validating_lexical_test(self, lexical_test: FileItem):
# check that file is a correct space separated list with two columns
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.lexical_test.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(lexical_test, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('lexical_test', results)
return results
@validation_fn(target='semantic_dev_synthetic')
def validating_semantic_dev_synthetic(self, semantic_dev_synthetic: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.semantic_dev.items.synthetic_wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
)
]
# Check file list
results = validators.numpy_array_list_check(
semantic_dev_synthetic, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('semantic_dev_synthetic', results)
return results
@validation_fn(target='semantic_dev_librispeech')
def validating_semantic_dev_librispeech(self, semantic_dev_librispeech: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.semantic_dev.items.librispeech_wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
)
]
# Check file list
results = validators.numpy_array_list_check(
semantic_dev_librispeech, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('semantic_dev_librispeech', results)
return results
@validation_fn(target='semantic_test_synthetic')
def validating_semantic_test_synthetic(self, semantic_test_synthetic: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.semantic_test.items.synthetic_wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
)
]
# Check file list
results = validators.numpy_array_list_check(
semantic_test_synthetic, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('semantic_test_synthetic', results)
return results
@validation_fn(target='semantic_test_librispeech')
def validating_semantic_test_librispeech(self, semantic_test_librispeech: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.semantic_test.items.librispeech_wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
)
]
# Check file list
results = validators.numpy_array_list_check(
semantic_test_librispeech, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('semantic_test_librispeech', results)
return results
@validation_fn(target='syntactic_dev')
def validating_syntactic_dev(self, syntactic_dev: FileItem):
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.syntactic_dev.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(syntactic_dev, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('syntactic_dev', results)
return results
@validation_fn(target='syntactic_test')
def validating_syntactic_test(self, syntactic_test: FileItem):
additional_df_checks = [
# Verify that result df has expected columns
functools.partial(
validators.dataframe_column_check, expected_columns=['score']),
# Verify that result df has all filenames in set
functools.partial(
validators.dataframe_index_check,
expected=[f.stem for f in self.dataset.index.subsets.syntactic_test.items.wav_list.files_list]
),
# Verify that scores are in float
functools.partial(
validators.dataframe_type_check,
col_name='score',
expected_type=float
)
]
# check dataframe
results = validators.dataframe_check(syntactic_test, additional_df_checks, sep=' ', header=None,
names=['filename', 'score'], index_col='filename')
# add item tag
add_item('syntactic_test', results)
return results
class SLM21ScoreDir(ScoreDir):
""" Data representation of the sLM21 scores directory """
params: Optional[SLM21BenchmarkParameters] = SLM21BenchmarkParameters()
class Config:
arbitrary_types_allowed = Extra.ignore
@property
def semantic_size(self) -> Dict[str, pd.DataFrame]:
""" Get semantic size from original dataset """
dataset = SLM21Dataset.load()
dev_size = pd.read_csv(dataset.index.subsets.semantic_dev.items.pairs.file, header=0) \
.groupby(['type', 'dataset'], as_index=False).size()
test_size = pd.read_csv(dataset.index.subsets.semantic_test.items.pairs.file, header=0) \
.groupby(['type', 'dataset'], as_index=False).size()
return dict(dev=dev_size, test=test_size)
@property
def lexical_dev_by_pair(self):
csv_file = self.location / self.params.lexical.result_filenames['dev']['by_pair']
return load_dataframe(csv_file)
@property
def lexical_test_by_pair(self):
csv_file = self.location / self.params.lexical.result_filenames['test']['by_pair']
return load_dataframe(csv_file)
@property
def lexical_dev_by_frequency(self):
csv_file = self.location / self.params.lexical.result_filenames['dev']['by_frequency']
return load_dataframe(csv_file)
@property
def lexical_test_by_frequency(self):
csv_file = self.location / self.params.lexical.result_filenames['test']['by_frequency']
return load_dataframe(csv_file)
@property
def lexical_dev_by_length(self):
csv_file = self.location / self.params.lexical.result_filenames['dev']['by_length']
return load_dataframe(csv_file)
@property
def lexical_test_by_length(self):
csv_file = self.location / self.params.lexical.result_filenames['test']['by_length']
return load_dataframe(csv_file)
@property
def semantic_dev_correlation(self):
csv_file = self.location / self.params.semantic.result_filenames['dev']['correlations']
return load_dataframe(csv_file)
@property
def semantic_test_correlation(self):
csv_file = self.location / self.params.semantic.result_filenames['test']['correlations']
return load_dataframe(csv_file)
@property
def syntactic_dev_by_pair(self):
csv_file = self.location / self.params.syntactic.result_filenames['dev']['by_pair']
return load_dataframe(csv_file)
@property
def syntactic_test_by_pair(self):
csv_file = self.location / self.params.syntactic.result_filenames['test']['by_pair']
return load_dataframe(csv_file)
@property
def syntactic_dev_by_type(self):
csv_file = self.location / self.params.syntactic.result_filenames['dev']['by_type']
return load_dataframe(csv_file)
@property
def syntactic_test_by_type(self):
csv_file = self.location / self.params.syntactic.result_filenames['test']['by_type']
return load_dataframe(csv_file)
def lexical_scores(self) -> LexicalScores:
""" Extract lexical resume of scores """
dev_score = self.lexical_dev_by_pair['score'].mean()
test_score = self.lexical_test_by_pair['score'].mean()
def _score_invocab(frame):
# filter out OOVs
frame = frame[frame['frequency'] != 'oov']
# weighted mean
return np.average(
frame['score'].to_numpy(),
weights=frame['n'].to_numpy())
# weighted scores
dev_invocab = _score_invocab(self.lexical_dev_by_frequency)
test_invocab = _score_invocab(self.lexical_test_by_frequency)
return LexicalScores(
all=ScoreTuple(dev=dev_score, test=test_score),
in_vocab=ScoreTuple(dev=dev_invocab, test=test_invocab)
)
def lexical_extras(self):
""" Extract lexical detailed scores """
frequency_dev = self.lexical_dev_by_frequency
frequency_test = self.lexical_test_by_frequency
by_frequency = pd.merge(frequency_dev, frequency_test,
how="outer", on=['frequency'], suffixes=("_dev", "_test"))
length_dev = self.lexical_dev_by_length
length_test = self.lexical_test_by_length
by_length = pd.merge(length_dev, length_test, how="outer", on=['length'], suffixes=('_dev', '_test'))
return LexicalExtras.parse_obj(dict(
by_length=by_length.to_dict(orient='records'),
by_frequency=by_frequency.to_dict(orient='records')
))
def syntactic_scores(self) -> ScoreTuple:
""" Extract syntactic score resume """
dev_mean = self.syntactic_dev_by_pair['score'].mean()
test_mean = self.syntactic_test_by_pair['score'].mean()
return ScoreTuple(dev=dev_mean, test=test_mean)
def syntactic_extras(self) -> List[SyntacticExtras]:
""" Extract syntactic detailed scores """
dev_types = self.syntactic_dev_by_type
test_types = self.syntactic_test_by_type
# merge
merged = pd.merge(dev_types, test_types, how="outer", on=["type"], suffixes=("_dev", "_test"))
merged.rename(columns={'type': 'typeset'}, inplace=True)
return [SyntacticExtras(**se) for se in merged.to_dict(orient='records')]
def semantic_scores(self) -> SemanticScores:
""" Extract semantic score resume """
dev_correlations = self.semantic_dev_correlation
test_correlations = self.semantic_test_correlation
# Mean
dev_librispeech_mean = dev_correlations[dev_correlations['type'] == 'librispeech']['correlation'].mean()
dev_synthetic_mean = dev_correlations[dev_correlations['type'] == 'synthetic']['correlation'].mean()
test_librispeech_mean = test_correlations[test_correlations['type'] == 'librispeech']['correlation'].mean()
test_synthetic_mean = test_correlations[test_correlations['type'] == 'synthetic']['correlation'].mean()
# Weighted Mean
semantic_size = self.semantic_size
dev_correlations['size'] = semantic_size['dev']['size']
dev_librispeech_wmean = np.average(
dev_correlations[dev_correlations['type'] == 'librispeech']['correlation'].to_numpy(),
weights=dev_correlations[dev_correlations['type'] == 'librispeech']['size'].to_numpy())
dev_synthetic_wmean = np.average(
dev_correlations[dev_correlations['type'] == 'synthetic']['correlation'].to_numpy(),
weights=dev_correlations[dev_correlations['type'] == 'synthetic']['size'].to_numpy())
test_correlations['size'] = semantic_size['test']['size']
test_librispeech_wmean = np.average(
test_correlations[test_correlations['type'] == 'librispeech']['correlation'].to_numpy(),
weights=test_correlations[test_correlations['type'] == 'librispeech']['size'].to_numpy())
test_synthetic_wmean = np.average(
test_correlations[test_correlations['type'] == 'synthetic']['correlation'].to_numpy(),
weights=test_correlations[test_correlations['type'] == 'synthetic']['size'].to_numpy())
return SemanticScores(
normal=SemanticScoreSets(
synthetic=ScoreTuple(dev=dev_synthetic_mean, test=test_synthetic_mean),
librispeech=ScoreTuple(dev=dev_librispeech_mean, test=test_librispeech_mean),
),
weighted=SemanticScoreSets(
synthetic=ScoreTuple(dev=dev_synthetic_wmean, test=test_synthetic_wmean),
librispeech=ScoreTuple(dev=dev_librispeech_wmean, test=test_librispeech_wmean),
)
)
def semantic_extras(self) -> List[SemanticExtras]:
""" Extract semantic score resume """
dev_correlations = self.semantic_dev_correlation
test_correlations = self.semantic_test_correlation
ndev_correlations = dev_correlations \
.set_index(['dataset', dev_correlations.groupby('dataset').cumcount()])['correlation'] \
.unstack() \
.reset_index()
ndev_correlations.columns = ['dataset', 'librispeech', 'synthetic']
ndev_correlations["set"] = "dev"
ntest_correlations = test_correlations \
.set_index(['dataset', test_correlations.groupby('dataset').cumcount()])['correlation'] \
.unstack() \
.reset_index()
ntest_correlations.columns = ['dataset', 'librispeech', 'synthetic']
ntest_correlations["set"] = "test"
# DeprecationWarning from pandas: append is to be replaced by concat
correlations = pd.concat([ndev_correlations, ntest_correlations], axis=0)
# correlations = ndev_correlations.append(ntest_correlations)
return [SemanticExtras(**se) for se in correlations.to_dict(orient='records')]
def build_scores(self) -> SLM21Scores:
""" Extract all score resume """
return SLM21Scores(
lexical=self.lexical_scores(),
syntactic=self.syntactic_scores(),
semantic=self.semantic_scores()
)
def build_extras(self) -> SLM21Extras:
""" Extract all detailed scores """
return SLM21Extras(
lexical=self.lexical_extras(),
syntactic=self.syntactic_extras(),
semantic=self.semantic_extras()
)
def get_details(self) -> EntryDetails:
""" Build entry details """
train_set = ""
gpu_budget = ""
if self.meta_file is not None:
train_set = self.meta_file.model_info.train_set
gpu_budget = self.meta_file.model_info.gpu_budget
return EntryDetails(
train_set=train_set,
benchmarks=[LeaderboardBenchmarkName.sLM_21],
gpu_budget=gpu_budget,
parameters=self.params.to_meta()
)
def build_leaderboard(self) -> SLM21LeaderboardEntry:
""" Build leaderboard entry from calculated scores """
model_id = ""
submission_id = str(uuid.uuid4())
submitted_by = ""
description = ""
if self.meta_file is not None:
model_id = self.meta_file.model_info.model_id
submitted_by = self.meta_file.username
description = self.meta_file.model_info.system_description
return SLM21LeaderboardEntry(
model_id=model_id,
submission_id=submission_id,
index=-1,
submission_date=datetime.now(),
submitted_by=submitted_by,
description=description,
publication=self.get_publication_info(),
details=self.get_details(),
scores=self.build_scores(),
extras=self.build_extras()
)
class SLM21Submission(Submission):
""" Submission for SLM21 Benchmark """
sets: Tuple = ('dev', 'test')
tasks: Tuple = ('lexical', 'syntactic', 'semantic')
@classmethod
def load(cls, path: Path, *,
tasks=('lexical', 'syntactic', 'semantic'),
sets=('dev', 'test')):
""" Load submission for sLM21 benchmark (filter by available tasks & sets) """
# submission object
submission = cls(
sets=sets,
tasks=tasks,
location=path
)
# if params not set export defaults
if not submission.params_file.is_file():
SLM21BenchmarkParameters().export(submission.params_file)
# Load items
items = dict()
# include lexical task for each set
if 'lexical' in tasks:
lexical_dir = path / 'lexical'
if 'dev' in sets:
items['lexical_dev'] = FileItem.from_file(lexical_dir / "dev.txt")
if 'test' in sets:
items['lexical_test'] = FileItem.from_file(lexical_dir / "test.txt")
# include syntactic for each set
if 'syntactic' in tasks:
syntactic_dir = path / 'syntactic'
if 'dev' in sets:
items['syntactic_dev'] = FileItem.from_file(syntactic_dir / "dev.txt")
if 'test' in sets:
items['syntactic_test'] = FileItem.from_file(syntactic_dir / "test.txt")
# include semantic task for each set
file_ext = submission.params.syntactic.score_files_type.replace('.', '')
file_ext = FileTypes(file_ext)
if 'semantic' in tasks:
semantic_dir = path / 'semantic'
if 'dev' in sets:
items['semantic_dev_synthetic'] = FileListItem.from_dir(
semantic_dir / "dev/synthetic", f_type=file_ext
)
items['semantic_dev_librispeech'] = FileListItem.from_dir(
semantic_dir / "dev/librispeech", f_type=file_ext
)
if 'test' in sets:
items['semantic_test_synthetic'] = FileListItem.from_dir(
semantic_dir / "test/synthetic", f_type=file_ext
)
items['semantic_test_librispeech'] = FileListItem.from_dir(
semantic_dir / "test/librispeech", f_type=file_ext
)
submission.items = Namespace[Item](store=items)
return submission
def __zippable__(self) -> List[Tuple[str, Path]]:
return [
("", self.meta_file),
("", self.params_file),
("lexical/", self.items.lexical_dev.file),
("lexical/", self.items.lexical_test.file),
("lexical/", self.items.lexical_test.file),
("syntactic/", self.items.syntactic_dev.file),
("syntactic/", self.items.syntactic_test.file),
*[("semantic/dev/synthetic/", f) for f in self.items.semantic_dev_synthetic.files_list],
*[("semantic/dev/librispeech/", f) for f in self.items.semantic_dev_librispeech.files_list],
*[("semantic/test/synthetic/", f) for f in self.items.semantic_test_synthetic.files_list],
*[("semantic/test/librispeech/", f) for f in self.items.semantic_test_librispeech.files_list],
*[("scores/", f) for f in self.score_dir.iterdir()]
]
@classmethod
def init_dir(cls, location: Path):
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
(location / 'lexical').mkdir(exist_ok=True, parents=True)
(location / 'syntactic').mkdir(exist_ok=True, parents=True)
(location / 'semantic/dev/synthetic').mkdir(exist_ok=True, parents=True)
(location / 'semantic/dev/librispeech').mkdir(exist_ok=True, parents=True)
(location / 'semantic/dev/synthetic').mkdir(exist_ok=True, parents=True)
(location / 'semantic/dev/librispeech').mkdir(exist_ok=True, parents=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create parameters file
SLM21BenchmarkParameters().export(location / SLM21BenchmarkParameters.file_stem)
# create meta-template
template = MetaFile.to_template(benchmark_name="sLM21")
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md')
def load_parameters(self) -> SLM21BenchmarkParameters:
if self.params_file.is_file():
obj = load_obj(self.params_file)
return SLM21BenchmarkParameters.parse_obj(obj)
return SLM21BenchmarkParameters()
def __validate_submission__(self):
""" Run validation on the submission data """
self.validation_output += SLM21SubmissionValidator().validate(self)
def get_scores(self):
""" """
pass | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/sLM21.py | sLM21.py |
import functools
import shutil
from datetime import datetime
from pathlib import Path
from typing import Tuple, Optional, List
import numpy as np
from pydantic import Field
import zerospeech.validators as validators
from zerospeech.data_loaders import load_dataframe
from zerospeech.datasets import AbxLSDataset
from zerospeech.generics import (
FileListItem, Namespace, Item, FileTypes
)
from zerospeech.leaderboards import EntryDetails, LeaderboardBenchmarkName, LeaderboardEntry
from zerospeech.leaderboards.abxLS import (
ABXLSEntry, ABXLSScoreSubType
)
from zerospeech.misc import load_obj
from zerospeech.settings import get_settings
from zerospeech.tasks.abx import abxLS_phoneme
from ._model import ScoreDir, MetaFile, SubmissionValidation, validation_fn, add_item, Submission
st = get_settings()
class AbxLSSubmissionValidator(SubmissionValidation):
""" File Validation for an ABXLS submission """
dataset: AbxLSDataset = Field(default_factory=lambda: AbxLSDataset.load())
@validation_fn(target='dev_clean')
def validate_dev_clean(self, dev_clean: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.dev_clean.items.wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
# Check file list
results = validators.numpy_array_list_check(
dev_clean, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('dev_clean', results)
return results
@validation_fn(target='dev_other')
def validate_dev_other(self, dev_other: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.dev_other.items.wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
# Check file list
results = validators.numpy_array_list_check(
dev_other, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('dev_other', results)
return results
@validation_fn(target='test_clean')
def validate_test_clean(self, test_clean: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.test_clean.items.wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
# Check file list
results = validators.numpy_array_list_check(
test_clean, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('test_clean', results)
return results
@validation_fn(target='test_other')
def validate_test_other(self, test_other: FileListItem):
f_list_checks = [
# Verify that all necessary files are present
functools.partial(
validators.file_list_checker,
expected=self.dataset.index.subsets.test_other.items.wav_list.files_list
)
]
additional_checks = [
# Verify that type of array is float
functools.partial(
validators.numpy_dtype_check,
dtype=np.dtype('float')
),
# Verify that array has 2 dimensions
functools.partial(
validators.numpy_dimensions_check,
ndim=2
),
# Verify that files have the same dimensions
validators.numpy_col_comparison(1)
]
# Check file list
results = validators.numpy_array_list_check(
test_other, f_list_checks=f_list_checks, additional_checks=additional_checks
)
# add item tag
add_item('test_other', results)
return results
class ABXLSScoreDir(ScoreDir):
params: Optional[abxLS_phoneme.ABX2Parameters] = abxLS_phoneme.ABX2Parameters()
@property
def scores_phonetic(self):
csv_file = (self.location / self.params.result_filename).with_suffix('.csv')
return load_dataframe(csv_file)
def get_details(self) -> EntryDetails:
""" Build entry details """
train_set = ""
gpu_budget = ""
if self.meta_file is not None:
train_set = self.meta_file.model_info.train_set
gpu_budget = self.meta_file.model_info.gpu_budget
return EntryDetails(
train_set=train_set,
benchmarks=[LeaderboardBenchmarkName.ABX_LS],
gpu_budget=gpu_budget,
parameters=self.params.to_meta()
)
def build_scores(self) -> List[ABXLSScoreSubType]:
""" Extract & format scores """
scores = []
for _, row in self.scores_phonetic.iterrows():
try:
seed = int(row['seed'])
except ValueError:
seed = None
scores.append(
ABXLSScoreSubType(
subset=row['subset'],
granularity=row['granularity'],
speaker_mode=row['speaker_mode'],
context_mode=row['context_mode'],
score=row['score'],
pooling=row['pooling'],
seed=seed
)
)
return scores
def build_meta_data(self):
""" Build leaderboard metadata """
return dict(
model_id=self.meta_file.model_info.model_id,
submission_id="",
index=None,
submission_date=datetime.now(),
submitted_by=self.meta_file.username,
description=self.meta_file.model_info.system_description,
publication=dict(
author_short=self.meta_file.publication.author_label,
authors=self.meta_file.publication.authors,
paper_title=self.meta_file.publication.paper_title,
paper_ref=self.meta_file.publication.paper_url,
bib_ref=self.meta_file.publication.bib_reference,
paper_url=self.meta_file.publication.paper_url,
pub_year=self.meta_file.publication.publication_year,
team_name=self.meta_file.publication.team,
institution=self.meta_file.publication.institution,
code=self.meta_file.code_url,
DOI=self.meta_file.publication.DOI,
open_science=self.meta_file.open_source,
),
details=dict(
train_set=self.meta_file.model_info.train_set,
benchmarks=[],
gpu_budget=self.meta_file.model_info.gpu_budget,
parameters=self.params.to_meta(),
)
)
def build_leaderboard(self) -> LeaderboardEntry:
""" Build leaderboard entry for the current submission """
self.load_meta()
return ABXLSEntry.parse_obj(
dict(
**self.build_meta_data(),
scores=self.build_scores()
)
)
class AbxLSSubmission(Submission):
""" Submission for ABX-LS-ROB Benchmark """
sets: Tuple = ('dev', 'test')
tasks: Tuple = ('clean', 'other')
@classmethod
def load(cls, path: Path, *,
tasks=('clean', 'other'),
sets=('dev', 'test')):
""" Load submission for ABX-Ls-ROB benchmark (filter by available tasks & sets) """
# submission object
submission = cls(
sets=sets,
tasks=tasks,
location=path
)
# if params not set export defaults
if not submission.params_file.is_file():
abxLS_phoneme.ABX2Parameters().export(submission.params_file)
# Load items
file_ext = submission.params.score_file_type.replace('.', '')
file_ext = FileTypes(file_ext)
items = dict()
if 'clean' in tasks:
if 'dev' in sets:
items['dev_clean'] = FileListItem.from_dir(
path / 'dev-clean', f_type=file_ext
)
if 'test' in sets:
items['test_clean'] = FileListItem.from_dir(
path / 'test-clean', f_type=file_ext
)
if 'other' in tasks:
if 'dev' in sets:
items['dev_other'] = FileListItem.from_dir(
path / 'dev-other', f_type=file_ext
)
if 'test' in sets:
items['test_other'] = FileListItem.from_dir(
path / 'test-other', f_type=file_ext
)
submission.items = Namespace[Item](store=items)
return submission
def load_parameters(self) -> abxLS_phoneme.ABX2Parameters:
if self.params_file.is_file():
obj = load_obj(self.params_file)
return abxLS_phoneme.ABX2Parameters.parse_obj(obj)
return abxLS_phoneme.ABX2Parameters()
def __validate_submission__(self):
""" Run validation on the submission data """
self.validation_output += AbxLSSubmissionValidator().validate(self)
@classmethod
def init_dir(cls, location: Path):
""" Create template submission directory """
# create sub-directories
location.mkdir(exist_ok=True, parents=True)
(location / 'dev-clean').mkdir(exist_ok=True, parents=True)
(location / 'dev-other').mkdir(exist_ok=True, parents=True)
(location / 'test-clean').mkdir(exist_ok=True, parents=True)
(location / 'test-other').mkdir(exist_ok=True, parents=True)
# scores dir
(location / 'scores').mkdir(exist_ok=True, parents=True)
# create parameters file
abxLS_phoneme.ABX2Parameters().export(location / abxLS_phoneme.ABX2Parameters.file_stem)
# create meta-template
template = MetaFile.to_template(benchmark_name="abxLS")
template.to_yaml(
file=location / MetaFile.file_stem,
excluded={
"file_stem": True,
"model_info": {"model_id"},
"publication": {"bib_reference", "DOI"}
}
)
instruction_file = Path(__file__).parent / "instructions.md"
if instruction_file.is_file():
shutil.copy(instruction_file, location / 'help.md')
def __zippable__(self):
return [
("", self.meta_file),
("", self.params_file),
*[("dev-clean/", f) for f in self.items.dev_clean.files_list],
*[("dev-other/", f) for f in self.items.dev_other.files_list],
*[("test-clean/", f) for f in self.items.test_clean.files_list],
*[("test-other/", f) for f in self.items.test_other.files_list],
*[("scores/", f) for f in self.score_dir.iterdir()]
]
def get_scores(self):
""" Load score Dir"""
return ABXLSScoreDir(
submission_dir=self.location,
location=self.score_dir,
params=self.params
) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/abxLS.py | abxLS.py |
import re
from datetime import datetime
from pathlib import Path
from typing import Optional, Union, Dict, ClassVar, Literal
import yaml
from pydantic import BaseModel, AnyUrl, ValidationError
from zerospeech.misc import load_obj
from zerospeech.leaderboards import PublicationEntry
from .validation_context import ValidationContext
BenchmarkList = Literal[
"prosAudit", "sLM21", "abxLS", "abx17", "tde17",
"test-prosAudit", "test-sLM21", "test-abxLS", "test-abx17", "test-tde17",
]
def check_no_template(obj, root: str = "") -> ValidationContext:
""" Check that object str fields do not have template values (in <,> tags)"""
ctx = ValidationContext()
if root:
root = f"{root}."
for key in obj.__fields__.keys():
attr = getattr(obj, key)
if isinstance(attr, str):
ctx.error_assertion(
re.match(r'<.*>', attr) is None,
msg=f"{root}{key}: has value from template"
)
return ctx
class PublicationInfo(BaseModel):
author_label: str = ""
authors: str
paper_title: Optional[str]
paper_url: Optional[str]
publication_year: int
bib_reference: Optional[str]
DOI: Optional[str]
institution: str
team: Optional[str]
def get_validation(self) -> ValidationContext:
""" Get Validation of PublicationInfo"""
ctx = ValidationContext()
# check no template
ctx += check_no_template(self, root="publication")
# publication.authors (required)
ctx.error_assertion(
self.authors is not None and len(self.authors) > 0,
msg="publication.authors : was not found or empty"
)
# publication.authors (required)
ctx.error_assertion(
self.institution is not None and len(self.institution) > 0,
msg="publication.institution : was not found or empty"
)
# publication.authors (required)
ctx.error_assertion(
self.publication_year is not None,
msg="publication.publication_year : was not found or empty"
)
# publication.paper_title (recommended)
ctx.warn_assertion(
self.paper_title is not None and len(self.paper_title) > 0,
msg="publication.paper_title : It is recommended to add the publication title "
)
# publication.paper_title (recommended)
ctx.warn_assertion(
self.paper_title is not None and len(self.paper_title) > 0,
msg="publication.paper_title : It is recommended to add the publication title "
)
# publication.paper_title (recommended)
ctx.warn_assertion(
self.paper_url is not None and len(self.paper_url) > 0,
msg="publication.paper_url : It is recommended to add the publication URL"
)
return ctx
class ModelInfo(BaseModel):
model_id: Optional[str]
system_description: str
train_set: str
gpu_budget: Optional[str]
def get_validation(self) -> ValidationContext:
""" Get Validation of ModelInfo"""
ctx = ValidationContext()
# check no template
ctx += check_no_template(self, root="model_info")
# model_info.system_description (required)
ctx.error_assertion(
self.system_description is not None and len(self.system_description) > 0,
msg="model_info.system_description : was not found or empty"
)
# model_info.train_set (required)
ctx.error_assertion(
self.train_set is not None and len(self.train_set) > 0,
msg="model_info.train_set : was not found or empty"
)
# model_info.gpu_budget (recommended)
ctx.warn_assertion(
self.gpu_budget is not None,
msg="model_info.gpu_budget : It is recommended to add a GPU budget (GPU training time estimation)"
)
return ctx
class MetaFile(BaseModel):
username: Optional[str]
submission_id: Optional[str]
benchmark_name: BenchmarkList
model_info: ModelInfo
publication: PublicationInfo
open_source: bool
code_url: Optional[Union[AnyUrl, str]]
file_stem: ClassVar[str] = "meta.yaml"
validation_context: ValidationContext = ValidationContext()
class Config:
arbitrary_types_allowed = True
fields = {
'validation_context': {'exclude': True},
'file_stem': {'exclude': True}
}
@classmethod
def from_file(cls, file: Path, enforce: bool = False):
""" Load meta from object file (YAML, JSON, etc) """
if not file.is_file() and not enforce:
return None
return cls.parse_obj(load_obj(file))
@classmethod
def to_template(cls, benchmark_name: BenchmarkList):
return cls(
username="<<auto-generated(str>): username on the zerospeech.com platform>",
benchmark_name=benchmark_name,
model_info=ModelInfo(
model_id="<auto-generated>",
system_description="<required(str): a description of the system>",
train_set="<required(str): the dateset used to train the system>",
gpu_budget="<optional(str): the number of gpu hours used to train the system>",
),
publication=PublicationInfo(
author_label="<required(str): a short label used for reference (ex: author1 et al.)>",
authors="<required(str): the full names of the authors of the system (separated by commas)>",
paper_title="<optional(str): the title of the paper referencing the system/submission>",
paper_url="<optional(str): A URL referencing the paper online (arxiv.org or other)>",
publication_year=datetime.now().year,
institution="<required(str): name of the institution (University, company, etc..)>",
team="<optional(str): name of the team>"
),
open_source=True,
code_url="<optional(str): a url to a github or other with the code used to create this system>"
)
def set_system_values(self, submission_location: Path, username: str, author_label: str):
""" Update or set values managed by the submit system """
self.username = username
self.publication.author_label = author_label
# write to file
self.to_yaml(submission_location / self.file_stem, excluded=dict())
def set_model_id(self, submission_location: Path, model_id: str):
self.model_info.model_id = model_id
# write to file
self.to_yaml(submission_location / self.file_stem, excluded=dict())
def set_submission_id(self, submission_location: Path, submission_id: str):
self.submission_id = submission_id
# write to file
self.to_yaml(submission_location / self.file_stem, excluded=dict())
def get_publication_info(self) -> PublicationEntry:
pub = self.publication
paper_ref = None
if pub.authors and pub.paper_title:
paper_ref = f"{pub.authors} ({pub.publication_year} {pub.paper_title})"
return PublicationEntry(
author_short=pub.author_label,
authors=pub.authors,
paper_title=pub.paper_title,
paper_ref=paper_ref,
bib_ref=pub.bib_reference,
paper_url=pub.paper_url,
pub_year=pub.publication_year,
team_name=pub.team,
institution=pub.institution,
code=self.code_url,
DOI=pub.DOI,
open_science=self.open_source
)
def to_yaml(self, file: Path, excluded: Dict):
with file.open("w") as fp:
yaml.dump(dict(self._iter(to_dict=True, exclude=excluded)), fp)
def is_valid(self) -> bool:
"""Check if meta.yaml has minimal values for submission """
validation = ValidationContext()
# check no template
validation += check_no_template(self)
# username (required)
validation.error_assertion(
self.username is not None and len(self.username) > 0,
msg="Username is required"
)
# url (recommended)
validation.warn_assertion(
self.code_url is not None and len(self.code_url) > 0,
msg="code_url : If possible we would appreciate a URL to the code of the system"
)
validation += self.model_info.get_validation()
validation += self.publication.get_validation()
validation.add_filename(filename=self.file_stem)
self.validation_context = validation
return not validation.fails()
@classmethod
def benchmark_from_submission(cls, location: Path) -> Optional[BenchmarkList]:
""" Extract the benchmark name from a given submission """
meta_file = location / cls.file_stem
if not meta_file.is_file():
return None
with meta_file.open() as fp:
meta_obj = yaml.load(fp, Loader=yaml.FullLoader)
try:
meta = cls.parse_obj(meta_obj)
return meta.benchmark_name
except ValidationError as e:
print(e)
return None | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/_model/meta_file.py | meta_file.py |
import abc
import copy
from typing import Optional, Protocol, Literal, List, Union
from zerospeech.out import error_console, warning_console
class ValidationResponse(abc.ABC):
""" Abstract class defining a Message object specifying validation Errors/Warnings/Checks """
def __init__(self, msg, *, data=None, item_name=None, filename=None, location=None):
self.item_name = item_name
self.filename = filename
self.location = location
self.msg = msg
self.data = data
def valid(self):
return getattr(self, '__is_valid__', False)
def warning(self):
return getattr(self, '__is_warning__', False)
def error(self):
return getattr(self, '__is_error__', False)
def ok(self):
return getattr(self, '__is_ok__', False)
def __str__(self):
item_name = '-'
if self.item_name:
item_name = self.item_name
filename = '[-'
if self.filename:
filename = f"[{self.filename}"
location = ':-]'
if self.location:
location = f":{self.location}]"
msg = ''
if self.msg:
msg = self.msg
cls_name = self.__class__.__name__
return f'{cls_name}({item_name}){filename}{location}>> {msg}'
class ValidationWarning(ValidationResponse):
""" Class designating a validation warning """
__is_warning__ = True
__is_valid__ = True
class ValidationError(ValidationResponse):
""" Class designating a validation error """
__is_error__ = True
__is_valid__ = False
class ValidationOK(ValidationResponse):
""" Class designating a successful validation check """
__is_ok__ = True
__is_valid__ = True
class HasStr(Protocol):
def __str__(self) -> str:
""" Convert to string """
pass
class ValidationContext:
def __init__(self):
""" Initially context is empty """
self._outputs: List[ValidationResponse] = []
def __len__(self):
""" Return the number of Responses in context """
return len(self._outputs)
def __invert__(self) -> List[ValidationResponse]:
""" Bitwise invert extracts the context
ex:
ctx = ValidationContext()
res: List[ValidationResponse] = ~ctx
"""
return self._outputs
def __lshift__(self, item: Union[ValidationResponse, List[ValidationResponse]]):
""" Extend outputs
ex:
ctx = ValidationContext()
# appends a list of responses into the context
res: List[ValidationResponse] = ... # do validation stuff
ctx << res
# append singular item to the context
a_random_resp: ValidationResponse = ... # some other validation
ctx << a_random_resp
"""
if isinstance(item, list):
self._outputs.extend(item)
else:
if not isinstance(item, ValidationResponse):
raise ValueError(f'Cannot extend item of type {type(item)}')
self._outputs.append(item)
def __add__(self, other: "ValidationContext") -> "ValidationContext":
""" Addition creates new context
ex:
ctx1: ValidationContext = ... # a validation process
ctx2: ValidationContext = ... # another validation process
# ctx3 contains ctx1 and ctx2 responses
ctx3: ValidationContext = ctx1 + ctx2
"""
if not isinstance(other, self.__class__):
raise ValueError(f'Cannot add item of type {type(other)}')
nw_ctx = self.__class__()
nw_ctx._outputs = [
*self._outputs,
*other._outputs
]
return nw_ctx
def __iadd__(self, other: "ValidationContext") -> "ValidationContext":
""" Allow += of two contexts """
if not isinstance(other, self.__class__):
raise ValueError(f'Cannot add item of type {type(other)}')
return self + other
def assertion(self, expr: bool, as_type: Literal['error', 'warning'], msg: str, **kwargs):
""" Create an assertion """
if not expr:
if as_type == 'error':
self._outputs.append(
ValidationError(
msg, item_name=kwargs.get("item_name", None),
data=kwargs.get("data", None), filename=kwargs.get("filename", None),
location=kwargs.get("location", None)
)
)
elif as_type == 'warning':
self._outputs.append(
ValidationWarning(
msg, item_name=kwargs.get("item_name", None),
data=kwargs.get("data", None), filename=kwargs.get("filename", None),
location=kwargs.get("location", None)
)
)
def error_assertion(
self, expr: bool, *, msg: str, item_name: Optional[str] = None,
filename: Optional[str] = None, location: Optional[str] = None,
data: Optional[HasStr] = None
):
""" Create an error assertion """
self.assertion(
expr, as_type='error', msg=msg, item_name=item_name, filename=filename,
location=location, data=data
)
def warn_assertion(
self, expr: bool, *, msg: str, item_name: Optional[str] = None,
filename: Optional[str] = None, location: Optional[str] = None,
data: Optional[HasStr] = None
):
""" Create an error assertion """
self.assertion(
expr, as_type='warning', msg=msg, item_name=item_name, filename=filename,
location=location, data=data
)
def add_filename(self, filename):
""" Add filename to all assertions """
for i in self._outputs:
i.filename = filename
def add_item(self, item_name: str):
""" Add item_name to all assertions """
for i in self._outputs:
i.item_name = item_name
def print(self, allow_warnings: bool = True, limit: int = -1):
""" Print Outputs """
error_list = [r for r in self._outputs if not r.ok()]
if limit > 0:
error_list = error_list[:limit]
if not allow_warnings:
error_list = [r for r in self._outputs if not r.warning()]
for item in error_list:
if item.warning() and allow_warnings:
warning_console.print(item)
else:
error_console.print(item)
def fails(self) -> bool:
""" Check if Validation Fails """
# only errors fail the validation
return len([r for r in self._outputs if not r.valid()]) != 0
def has_warnings(self) -> bool:
""" Check if Validation has warnings """
return len([r for r in self._outputs if r.warning()]) > 0
def get_ok(self) -> "ValidationContext":
""" Filter Ok Messages """
vtx = ValidationContext()
vtx << [r for r in self._outputs if r.ok()]
return vtx
def get_warnings(self) -> "ValidationContext":
""" Filter Warning Messages """
vtx = ValidationContext()
vtx << [r for r in self._outputs if r.warning()]
return vtx
def get_errors(self) -> "ValidationContext":
"""Filter Error Messages """
vtx = ValidationContext()
vtx << [r for r in self._outputs if r.error()]
return vtx | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/_model/validation_context.py | validation_context.py |
import abc
from functools import wraps
from pathlib import Path
from typing import List, Type, TYPE_CHECKING
from typing import Optional, ClassVar
from pydantic import BaseModel
from pydantic import Field
from zerospeech.generics import Item, Namespace
from zerospeech.out import error_console, warning_console
from zerospeech.tasks import BenchmarkParameters
from .meta_file import MetaFile
from .score_dir import ScoreDir
from .validation_context import ValidationResponse, ValidationWarning, ValidationContext
if TYPE_CHECKING:
from zerospeech.datasets import Dataset
def validation_fn(target: str):
""" Wrapper function to mark validation items """
def fn_wrapper(method):
@wraps(method)
def _impl(self, *method_args, **method_kwargs):
return method(self, *method_args, **method_kwargs)
_impl._validation_fn = True
_impl._validation_target = target
return _impl
return fn_wrapper
def add_item(item_name: str, resp: List[ValidationResponse]):
""" Add item name to Validation Response list """
for r in resp:
r.item_name = item_name
def add_filename(filename: str, resp: List[ValidationResponse]):
""" Add filename to a Validation Response List """
for r in resp:
r.filename = filename
def show_errors(resp: List[ValidationResponse], allow_warnings: bool = True):
""" Print ValidationResponse Error (and warnings) """
error_list = [r for r in resp if not r.ok()]
if not allow_warnings:
error_list = [r for r in resp if not r.warning()]
for item in error_list:
if item.warning() and allow_warnings:
warning_console.log(item)
else:
error_console.log(item)
# errors only fail the validation
return len([r for r in resp if not r.valid()]) != 0
class SubmissionValidation(BaseModel, abc.ABC):
dataset: "Dataset"
def _is_validation_fn(self, fn_name):
fn = getattr(self, fn_name, {})
return getattr(fn, '_validation_fn', False)
def _get_validation_target(self, fn_name):
fn = getattr(self, fn_name, {})
return getattr(fn, '_validation_target')
def validate(self, submission: 'Submission') -> ValidationContext:
""" Run validation --> A validation context """
vd_ctx = ValidationContext()
validators_items = {
f"{self._get_validation_target(a)}": getattr(self, a)
for a in dir(self) if self._is_validation_fn(a)
}
for name, item in iter(submission.items):
validator = validators_items.get(name, None)
if validator is not None:
res = validator(item)
vd_ctx << res
else:
vd_ctx << ValidationWarning("no validation found", item_name=name)
return vd_ctx
class Submission(BaseModel, abc.ABC):
location: Path
items: Optional[Namespace[Item]]
params_obj: Optional["BenchmarkParameters"] = None
meta_obj: Optional[MetaFile] = None
__score_dir__: Optional[Path] = None
__score_cls__: ClassVar[Type[ScoreDir]]
validation_output: ValidationContext = ValidationContext()
class Config:
arbitrary_types_allowed = True
@property
def valid(self) -> bool:
if len(self.validation_output) == 0:
self.__validate_submission__()
return not self.validation_output.fails()
@property
def params_file(self):
return self.location / BenchmarkParameters.file_stem
@property
def meta_file(self):
return self.location / 'meta.yaml'
@property
def leaderboard_file(self) -> Path:
return self.score_dir / "leaderboard.json"
@property
def params(self):
if self.params_obj is None:
self.params_obj = self.load_parameters()
return self.params_obj
@property
def meta(self):
if self.meta_obj is None:
self.meta_obj = MetaFile.from_file(self.meta_file)
return self.meta_obj
@property
def score_dir(self) -> Path:
""" Get scores location """
if self.__score_dir__ is None:
return self.location / 'scores'
return self.__score_dir__
@score_dir.setter
def score_dir(self, score_location: Path):
""" Set alternative scores location """
self.__score_dir__ = score_location
def has_scores(self) -> bool:
""" Check if score dir is emtpy """
return len(list(self.score_dir.rglob('*'))) > 0
def get_scores(self):
if self.score_dir.is_dir():
return self.__score_cls__(
location=self.score_dir,
submission_dir=self.location,
meta_file=self.meta,
params=self.params
)
return None
@classmethod
@abc.abstractmethod
def load(cls, path: Path, **kwargs):
pass
@classmethod
@abc.abstractmethod
def init_dir(cls, location: Path):
""" Initialise a directory for submission """
pass
@abc.abstractmethod
def load_parameters(self) -> BenchmarkParameters:
pass
@abc.abstractmethod
def __validate_submission__(self):
pass
@abc.abstractmethod
def __zippable__(self):
pass | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/submissions/_model/submission.py | submission.py |
from datetime import datetime
from typing import List, Optional, Dict, Union
from pydantic import BaseModel, AnyHttpUrl, Field, validator
from ._types import LeaderboardBenchmarkName
class ABXScoreTuple(BaseModel):
within: Optional[float]
across: Optional[float]
class CatScores(BaseModel):
precision: Optional[float]
recall: Optional[float]
fscore: Optional[float]
@validator('*', pre=True)
def fix_float(cls, v):
""" Sometimes NoneType is not marked as None but as a string"""
try:
return float(v)
except (ValueError, TypeError):
return None
class NLPScores(BaseModel):
ned: Optional[float]
coverage: Optional[float]
nwords: Optional[int]
npairs: Optional[int]
@validator('ned', 'npairs', pre=True)
def fix_float(cls, v):
""" Sometimes NoneType is not marked as None but as a string"""
try:
return int(v)
except (ValueError, TypeError):
return None
@validator('nwords', 'coverage', pre=True)
def fix_int(cls, v):
""" Sometimes NoneType is not marked as None but as a string"""
try:
return float(v)
except (ValueError, TypeError):
return None
class TDEScoreTuple(BaseModel):
grouping: Optional[CatScores]
token: Optional[CatScores]
type: Optional[CatScores]
boundary: Optional[CatScores]
matching: Optional[CatScores]
nlp: Optional[NLPScores]
class EntryDetails(BaseModel):
train_set: Optional[str]
benchmarks: List[LeaderboardBenchmarkName]
gpu_budget: Optional[str]
parameters: Dict = Field(default_factory=dict)
class PublicationEntry(BaseModel):
author_short: Optional[str]
authors: Optional[str]
paper_title: Optional[str]
paper_ref: Optional[str]
bib_ref: Optional[str]
paper_url: Optional[Union[AnyHttpUrl, str]]
pub_year: Optional[int]
team_name: Optional[str]
institution: str
code: Optional[AnyHttpUrl]
DOI: Optional[str]
open_science: bool = False
class LeaderboardScores(BaseModel):
pass
class LeaderboardExtras(BaseModel):
pass
class LeaderboardEntry(BaseModel):
model_id: Optional[str]
submission_id: str = ""
index: Optional[int]
submission_date: Optional[datetime]
submitted_by: Optional[str]
description: str
publication: PublicationEntry
details: EntryDetails
scores: LeaderboardScores
extras: Optional[LeaderboardExtras]
class Leaderboard(BaseModel):
_type: LeaderboardBenchmarkName
last_modified: datetime = Field(default_factory=lambda: datetime.now())
data: List[LeaderboardEntry]
def sort_by(self, key: str):
""" Sort entries of leaderboard by a specific key"""
self.data.sort(key=lambda x: getattr(x, key)) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/_models.py | _models.py |
from typing import List, Optional
from pydantic import BaseModel, Field
from ._models import LeaderboardScores, LeaderboardEntry, LeaderboardExtras, Leaderboard
from ._types import LeaderboardBenchmarkName
class LexicalByLength(BaseModel):
length: int
dev_score: float = Field(alias="score_dev")
dev_std: float = Field(alias="std_dev")
dev_n: float = Field(alias="n_dev")
test_score: float = Field(alias="score_test")
test_std: float = Field(alias="std_test")
test_n: float = Field(alias="n_test")
class Config:
allow_population_by_field_name = True
class LexicalByFrequency(BaseModel):
frequency: str
dev_score: float = Field(alias="score_dev")
dev_std: float = Field(alias="std_dev")
dev_n: float = Field(alias="n_dev")
test_score: float = Field(alias="score_test")
test_std: float = Field(alias="std_test")
test_n: float = Field(alias="n_test")
class Config:
allow_population_by_field_name = True
class LexicalExtras(BaseModel):
by_length: List[LexicalByLength]
by_frequency: List[LexicalByFrequency]
class SyntacticExtras(BaseModel):
typeset: str
dev_score: float = Field(alias="score_dev")
dev_std: float = Field(alias="std_dev")
dev_n: float = Field(alias="n_dev")
test_score: float = Field(alias="score_test")
test_std: float = Field(alias="std_test")
test_n: float = Field(alias="n_test")
class Config:
allow_population_by_field_name = True
class SemanticExtras(BaseModel):
set: str
dataset: str
librispeech: float
synthetic: float
class SLM21Extras(LeaderboardExtras):
lexical: LexicalExtras
syntactic: List[SyntacticExtras]
semantic: List[SemanticExtras]
class ScoreTuple(BaseModel):
dev: Optional[float]
test: Optional[float]
class LexicalScores(BaseModel):
in_vocab: Optional[ScoreTuple]
all: ScoreTuple
class SemanticScoreSets(BaseModel):
synthetic: ScoreTuple
librispeech: ScoreTuple
class SemanticScores(BaseModel):
normal: SemanticScoreSets
weighted: SemanticScoreSets
class SLM21Scores(LeaderboardScores):
lexical: LexicalScores
syntactic: ScoreTuple
semantic: SemanticScores
class SLM21LeaderboardEntry(LeaderboardEntry):
scores: SLM21Scores
extras: SLM21Extras
class SLM21Leaderboard(Leaderboard):
data: List[SLM21LeaderboardEntry]
_type = LeaderboardBenchmarkName.sLM_21 | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/sLM21.py | sLM21.py |
import warnings
from pathlib import Path
from typing import Dict, List, Any
from ._models import Leaderboard
from ._types import LeaderboardBenchmarkName
from .abxLS import (
ABXLSLeaderboard, ABXLSEntry
)
from .exporters import ABXLSExporter, Slm21Exporter, TDE17Exporter
from .sLM21 import (
SLM21Leaderboard, SLM21LeaderboardEntry
)
from .tde17 import (
TDE17Leaderboard, TDE17Entry
)
try:
from vocolab_ext import LeaderboardManager
from vocolab_ext.leaderboards import LeaderboardEntryBase
except ImportError:
warnings.warn("vocolab_ext is not installed")
# Fake variables to prevent errors
LeaderboardManager = ...
LeaderboardEntryBase = ...
def get_benchmark(name: str) -> LeaderboardBenchmarkName:
try:
return LeaderboardBenchmarkName(name)
except ValueError:
raise ValueError("Leaderboard name not found !!!")
class VocolabLeaderboardManager(LeaderboardManager):
"""Class that wraps the usage of leaderboards"""
def __init__(self, ld: Leaderboard):
self.leaderboard = ld
@classmethod
def load_leaderboard_from_obj(cls, name: str, obj: Dict):
"""Load self from an object"""
bench = get_benchmark(name)
if bench == LeaderboardBenchmarkName.ABX_LS:
return cls(ld=ABXLSLeaderboard.parse_obj(obj))
elif bench == LeaderboardBenchmarkName.sLM_21:
return cls(ld=SLM21Leaderboard.parse_obj(obj))
elif bench == LeaderboardBenchmarkName.TDE_17:
return cls(ld=TDE17Leaderboard.parse_obj(obj))
raise TypeError('Unknown leaderboard type')
@classmethod
def load_entry_from_obj(cls, name: str, obj: Dict):
""" Load entry from benchmark name """
bench = get_benchmark(name)
if bench == LeaderboardBenchmarkName.ABX_LS:
return ABXLSEntry.parse_obj(obj)
elif bench == LeaderboardBenchmarkName.sLM_21:
return SLM21LeaderboardEntry.parse_obj(obj)
elif bench == LeaderboardBenchmarkName.TDE_17:
return TDE17Entry.parse_obj(obj)
raise TypeError('Unknown leaderboard type')
@classmethod
def create_from_entries(cls, name: str, entries: List[Any]):
""" Create leaderboard from a list of entries"""
bench = get_benchmark(name)
if bench == LeaderboardBenchmarkName.ABX_LS:
return cls(ld=ABXLSLeaderboard(
data=entries
))
elif bench == LeaderboardBenchmarkName.sLM_21:
return cls(ld=SLM21Leaderboard(
data=entries
))
elif bench == LeaderboardBenchmarkName.TDE_17:
return cls(ld=TDE17Leaderboard(
data=entries
))
@staticmethod
def extract_base_from_entry(entry: Any) -> LeaderboardEntryBase:
publication = getattr(entry, "publication", object())
return LeaderboardEntryBase(
submission_id=getattr(entry, "submission_id", ""),
model_id=getattr(entry, "model_id", ""),
description=getattr(entry, "description", ""),
authors=getattr(publication, "authors", ""),
author_label=getattr(publication, "author_short", None),
submission_date=getattr(entry, "submission_date", None),
submitted_by=getattr(entry, "submitted_by", None)
)
@staticmethod
def update_entry_from_base(entry: Any, base: LeaderboardEntryBase):
entry.submission_id = base.submission_id
entry.model_id = base.model_id
entry.description = base.description
entry.submission_date = base.submission_date
entry.submitted_by = base.submitted_by
entry.publication.authors = base.authors
entry.publication.authors_short = base.author_label
return entry
@staticmethod
def write_entry(entry: Any, file: Path):
with file.open('w') as fp:
fp.write(entry.json(ident=4))
def export_as_csv(self, file: Path):
""" Export leaderboard into a csv format """
if isinstance(self.leaderboard, ABXLSLeaderboard):
exporter = ABXLSExporter(leaderboard=self.leaderboard, output_file=file)
elif isinstance(self.leaderboard, SLM21Leaderboard):
exporter = Slm21Exporter(leaderboard=self.leaderboard, output_file=file)
elif isinstance(self.leaderboard, TDE17Leaderboard):
exporter = TDE17Exporter(leaderboard=self.leaderboard, output_file=file)
else:
raise ValueError("Unknown Leaderboard Type")
# export to csv
exporter.to_csv()
def export_as_json(self, file: Path):
""" Export leaderboard into a json file """
with file.open("w") as fp:
fp.write(self.leaderboard.json(indent=4)) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/vocolab_ext.py | vocolab_ext.py |
import argparse
import functools
import sys
from io import StringIO
from pathlib import Path
from typing import Optional, List, Dict
import pandas as pd
from rich.console import Console
from zerospeech.leaderboards.tde17 import TDE17Leaderboard, TDE17Entry
from zerospeech.leaderboards.utils import open_json, clean_label, format_score
from .base import LeaderboardExporter, CSVExporter
console = Console()
void_console = Console(file=StringIO())
def restrict_entry(e: TDE17Entry) -> Dict:
_format_score = functools.partial(format_score, percent=False)
return dict(
label=clean_label(e.publication.author_short),
model_id=e.model_id,
submission_di=e.submission_id,
# EN
en_ned=_format_score(e.scores.english.nlp.ned),
en_cov=_format_score(e.scores.english.nlp.coverage),
en_wrds=e.scores.english.nlp.nwords,
# FR
fr_ned=_format_score(e.scores.french.nlp.ned),
fr_cov=_format_score(e.scores.french.nlp.coverage),
fr_wrds=e.scores.french.nlp.nwords,
# Mandarin
cmn_ned=_format_score(e.scores.mandarin.nlp.ned),
cmn_cov=_format_score(e.scores.mandarin.nlp.coverage),
cmn_wrds=e.scores.mandarin.nlp.nwords,
# Wolof
wol_ned=_format_score(e.scores.wolof.nlp.ned),
wol_cov=_format_score(e.scores.wolof.nlp.coverage),
wol_wrds=e.scores.wolof.nlp.nwords,
)
class TDE17Exporter(LeaderboardExporter, CSVExporter):
leaderboard: TDE17Leaderboard
quiet: bool = False
@property
def console(self):
if not self.quiet:
return console
return void_console
def restricted_entries(self):
return [
restrict_entry(e)
for e in self.leaderboard.data
]
@classmethod
def from_cmd(cls, argv: Optional[List[str]] = None):
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser("ABXLS leaderboard to CSV")
parser.add_argument('location', help='Location of leaderboard (url/path)')
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-o', '--output-file', default="tde17.csv", help="File to output results")
args = parser.parse_args(argv)
if not args.quiet:
console.print("Loading...", style="bold orange3")
ld_data = open_json(args.location)
# return ld_data
return cls(
leaderboard=ld_data,
quiet=args.quiet,
output_file=Path(args.output_file)
)
def to_csv(self):
df = pd.DataFrame(self.restricted_entries())
self.console.print(f"Writing {self.output_file}...")
df.to_csv(self.output_file)
def cmd():
""" Command line entrypoint """
exp = TDE17Exporter.from_cmd()
# for entry in exp['data']:
# try:
# _ = TDE17Entry.parse_obj(entry)
# except ValidationError as e:
# print(f"failed with: {entry['model_id']}")
exp.export()
exp.console.print("Leaderboard exported successfully", style="bold green") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/exporters/tde17.py | tde17.py |
import argparse
import functools
import itertools
import sys
from io import StringIO
from pathlib import Path
from typing import Optional, List, Tuple, Dict
import pandas as pd
from rich.console import Console
from zerospeech.leaderboards.sLM21 import SLM21Leaderboard, SLM21LeaderboardEntry
from zerospeech.leaderboards.utils import open_json, clean_label, format_score
from .base import LeaderboardExporter, CSVExporter
console = Console()
void_console = Console(file=StringIO())
def restrict_entry(e: SLM21LeaderboardEntry, percent: bool = True) -> Tuple[Dict, Dict]:
_format_score = functools.partial(format_score, percent=percent)
base_info = dict(
label=clean_label(e.publication.author_short),
model_id=e.model_id,
submission_di=e.submission_id
)
dev_set = dict(
**base_info,
set="dev",
lexical_all=_format_score(e.scores.lexical.all.dev),
lexical_in_vocab=_format_score(e.scores.lexical.in_vocab.dev),
syntactic=_format_score(e.scores.syntactic.dev),
semantic_synth=e.scores.semantic.normal.synthetic.dev,
semantic_libri=e.scores.semantic.normal.librispeech.dev,
semantic_w_synth=e.scores.semantic.weighted.synthetic.dev,
semantic_w_libri=e.scores.semantic.weighted.librispeech.dev
)
test_set = dict(
**base_info,
set="test",
lexical_all=_format_score(e.scores.lexical.all.test),
lexical_in_vocab=_format_score(e.scores.lexical.in_vocab.test),
syntactic=_format_score(e.scores.syntactic.test),
semantic_synth=e.scores.semantic.normal.synthetic.test,
semantic_libri=e.scores.semantic.normal.librispeech.test,
semantic_w_synth=e.scores.semantic.weighted.synthetic.test,
semantic_w_libri=e.scores.semantic.weighted.librispeech.test
)
return dev_set, test_set
class Slm21Exporter(LeaderboardExporter, CSVExporter):
leaderboard: SLM21Leaderboard
# split dev & test set
split_sets: bool = True
# keep values as percentage
as_percentage: bool = True
quiet: bool = False
@property
def console(self):
if not self.quiet:
return console
return void_console
def restricted_entries(self):
return [
restrict_entry(e)
for e in self.leaderboard.data
]
@classmethod
def from_cmd(cls, argv: Optional[List[str]] = None):
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser("ABXLS leaderboard to CSV")
parser.add_argument('location', help='Location of leaderboard (url/path)')
parser.add_argument('-s', '--split-sets', action='store_true', help="Split dev and test")
parser.add_argument('-p', '--as-percentage', action='store_true', help="Scores are shown as percentages")
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-o', '--output-file', default="slm21.csv", help="File to output results")
args = parser.parse_args(argv)
if not args.quiet:
console.print("Loading...", style="bold orange3")
ld_data = open_json(args.location)
return cls(
leaderboard=ld_data,
split_sets=args.split_sets,
as_percentage=args.as_percentage,
quiet=args.quiet,
output_file=Path(args.output_file)
)
def to_csv(self):
entries_restricted = self.restricted_entries()
dev_restricted, test_restricted = zip(*entries_restricted)
if not self.split_sets:
all_restricted = [
x for x in itertools.chain.from_iterable(
itertools.zip_longest(dev_restricted, test_restricted)
)
if x
]
self.console.print(f"Writing {self.output_file}...")
df_all = pd.DataFrame(all_restricted)
df_all.to_csv(self.output_file)
else:
dev_file = self.output_file.parent / f"dev_{self.output_file.name}"
self.console.print(f"Writing {dev_file}...")
df_dev = pd.DataFrame(dev_restricted)
del df_dev['set']
df_dev.to_csv(dev_file)
test_file = self.output_file.parent / f"test_{self.output_file.name}"
self.console.print(f"Writing {test_file}...")
df_test = pd.DataFrame(test_restricted)
del df_test['set']
df_test.to_csv(test_file)
def cmd():
""" Command line entrypoint """
exp = Slm21Exporter.from_cmd()
exp.export()
exp.console.print("Leaderboard exported successfully", style="bold green") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/exporters/sLM21.py | sLM21.py |
import argparse
import functools
import itertools
import json
import sys
from io import StringIO
from pathlib import Path
from typing import Dict, Tuple, List, Optional
import pandas as pd
from rich.console import Console
from zerospeech.leaderboards.abxLS import ABXLSLeaderboard
from zerospeech.leaderboards.utils import open_json, clean_label, format_score
from .base import CSVExporter, LeaderboardExporter
console = Console()
void_console = Console(file=StringIO())
def restrict_entry(entry: Dict, percent: bool = True) -> Tuple[Dict, Dict]:
_format_score = functools.partial(format_score, percent=percent)
pub_info = entry.get('publication', {})
base_info = dict(
label=clean_label(pub_info.get('author_short', '-')),
model_id=entry.get('model_id', '-'),
submission_id=entry.get('submission_id', '-'),
)
score = entry.get('scores', {})
dev_set = dict(
**base_info,
set="dev",
# phoneme any
# clean
phoneme_any_clean_within=_format_score(score['phoneme']['any']['clean']['within']['dev']['score']),
phoneme_any_clean_across=_format_score(score['phoneme']['any']['clean']['across']['dev']['score']),
# other
phoneme_any_other_within=_format_score(score['phoneme']['any']['other']['within']['dev']['score']),
phoneme_any_other_across=_format_score(score['phoneme']['any']['other']['across']['dev']['score']),
# phoneme within
# clean
phoneme_within_clean_within=_format_score(score['phoneme']['within']['clean']['within']['dev']['score']),
phoneme_within_clean_across=_format_score(score['phoneme']['within']['clean']['across']['dev']['score']),
# other
phoneme_within_other_within=_format_score(score['phoneme']['within']['other']['within']['dev']['score']),
phoneme_within_other_across=_format_score(score['phoneme']['within']['other']['across']['dev']['score']),
# triphone within
# clean
triphone_within_clean_within=_format_score(score['triphone']['within']['clean']['within']['dev']['score']),
triphone_within_clean_across=_format_score(score['triphone']['within']['clean']['across']['dev']['score']),
# other
triphone_within_other_within=_format_score(score['triphone']['within']['other']['within']['dev']['score']),
triphone_within_other_across=_format_score(score['triphone']['within']['other']['across']['dev']['score']),
)
test_set = dict(
**base_info,
set="test",
# phoneme any
# clean
phoneme_any_clean_within=_format_score(score['phoneme']['any']['clean']['within']['test']['score']),
phoneme_any_clean_across=_format_score(score['phoneme']['any']['clean']['across']['test']['score']),
# other
phoneme_any_other_within=_format_score(score['phoneme']['any']['other']['within']['test']['score']),
phoneme_any_other_across=_format_score(score['phoneme']['any']['other']['across']['test']['score']),
# phoneme within
# clean
phoneme_within_clean_within=_format_score(score['phoneme']['within']['clean']['within']['test']['score']),
phoneme_within_clean_across=_format_score(score['phoneme']['within']['clean']['across']['test']['score']),
# other
phoneme_within_other_within=_format_score(score['phoneme']['within']['other']['within']['test']['score']),
phoneme_within_other_across=_format_score(score['phoneme']['within']['other']['across']['test']['score']),
# triphone within
# clean
triphone_within_clean_within=_format_score(score['triphone']['within']['clean']['within']['test']['score']),
triphone_within_clean_across=_format_score(score['triphone']['within']['clean']['across']['test']['score']),
# other
triphone_within_other_within=_format_score(score['triphone']['within']['other']['within']['test']['score']),
triphone_within_other_across=_format_score(score['triphone']['within']['other']['across']['test']['score']),
)
return dev_set, test_set
class ABXLSExporter(LeaderboardExporter, CSVExporter):
leaderboard: ABXLSLeaderboard
# split dev & test set
split_sets: bool = True
# keep values as percentage
as_percentage: bool = True
quiet: bool = False
@property
def console(self):
if not self.quiet:
return console
return void_console
@classmethod
def from_cmd(cls, argv: Optional[List[str]] = None):
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser("ABXLS leaderboard to CSV")
parser.add_argument('location', help='Location of leaderboard (url/path)')
parser.add_argument('-s', '--split-sets', action='store_true', help="Split dev and test")
parser.add_argument('-p', '--as-percentage', action='store_true', help="Scores are shown as percentages")
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('-o', '--output-file', default="abx-ls.csv", help="File to output results")
args = parser.parse_args(argv)
if not args.quiet:
console.print("Loading...", style="bold orange3")
ld_data = open_json(args.location)
return cls(
leaderboard=ld_data,
split_sets=args.split_sets,
as_percentage=args.as_percentage,
quiet=args.quiet,
output_file=Path(args.output_file)
)
def restricted_entries(self) -> List[Tuple[Dict, Dict]]:
# passthrough json to serialise all values
as_dict = json.loads(self.leaderboard.json())
return [
restrict_entry(e, percent=self.as_percentage) for e in as_dict["data"]
]
def to_csv(self):
entries_restricted = self.restricted_entries()
dev_restricted, test_restricted = zip(*entries_restricted)
if not self.split_sets:
all_restricted = [
x for x in itertools.chain.from_iterable(
itertools.zip_longest(dev_restricted, test_restricted)
)
if x
]
self.console.print(f"Writing {self.output_file}...")
df_all = pd.DataFrame(all_restricted)
df_all.to_csv(self.output_file)
else:
dev_file = self.output_file.parent / f"dev_{self.output_file.name}"
self.console.print(f"Writing {dev_file}...")
df_dev = pd.DataFrame(dev_restricted)
del df_dev['set']
df_dev.to_csv(dev_file)
test_file = self.output_file.parent / f"test_{self.output_file.name}"
self.console.print(f"Writing {test_file}...")
df_test = pd.DataFrame(test_restricted)
del df_test['set']
df_test.to_csv(test_file)
def cmd():
""" Command line entrypoint """
exp = ABXLSExporter.from_cmd()
exp.export()
exp.console.print("Leaderboard exported successfully", style="bold green") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/leaderboards/exporters/abxLS.py | abxLS.py |
import warnings
from pathlib import Path
from typing import List, Union, Callable, Any
from zerospeech.data_loaders import load_dataframe, load_numpy_array, FileError
from zerospeech.generics import FileItem, FileListItem, FileTypes
from .base_validators import ValidationError, ValidationOK, ValidationResponse
from .base_validators import BASE_VALIDATOR_FN_TYPE
return_type = List[ValidationResponse]
COMPLEX_VALIDATION_FN = Callable[[Any, List[BASE_VALIDATOR_FN_TYPE]], return_type]
def dataframe_check(
item: FileItem,
additional_checks: List[BASE_VALIDATOR_FN_TYPE], **kwargs
) -> return_type:
""" Check validity & apply additional checks to a Dataframe fileItem """
results = []
if item.file_type not in FileTypes.dataframe_types():
return [ValidationError(f'file type {item.file_type} cannot be converted into a dataframe',
data=item.file)]
try:
df = load_dataframe(item, **kwargs)
except Exception as e: # noqa: broad exception is on purpose
return [ValidationError(f'{e}', data=item.file)]
results.append(ValidationOK(f"File {item.file} is a valid dataframe !"))
for fn in additional_checks:
results.extend(fn(df))
return results
def numpy_array_check(file_item: Union[FileItem, Path],
additional_checks: List[BASE_VALIDATOR_FN_TYPE]) -> return_type:
""" Check validity & apply additional checks to a Numpy fileItem """
warnings.filterwarnings("error")
try:
array = load_numpy_array(file_item)
except (FileError, ValueError, UserWarning):
return [ValidationError('File does not contain a numpy array', data=file_item)]
results = [ValidationOK('File contains a numpy array !', data=file_item)]
for fn in additional_checks:
results.extend(fn(array))
return results
def numpy_array_list_check(
item: FileListItem, f_list_checks: List[BASE_VALIDATOR_FN_TYPE],
additional_checks: List[BASE_VALIDATOR_FN_TYPE]
) -> return_type:
""" Check validity & apply additional checks to a list of Numpy fileItems """
results = []
for fn in f_list_checks:
r = fn(item)
results.extend(r)
for i in item.files_list:
results.extend(numpy_array_check(i, additional_checks))
return results | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/validators/validators.py | validators.py |
from pathlib import Path
from typing import List, Callable, Any, Type
import numpy as np
import pandas as pd
from zerospeech.generics import FileListItem
from zerospeech.submissions import ValidationResponse, ValidationOK, ValidationError
# Type for base functions
BASE_VALIDATOR_FN_TYPE = Callable[[Any], List[ValidationResponse]]
return_type = List[ValidationResponse]
def list_checker(given: List[str], expected: List[str]) -> return_type:
""" Check a list of strings to find if expected items are in it """
given = set(given)
expected = set(expected)
if given != expected:
has_less_files = expected - given
has_more_files = given - expected
res = []
if len(has_more_files) > 0:
for e_file in has_more_files:
res.append(
ValidationError(
"extra file found",
filename=e_file
)
)
elif len(has_less_files) > 0:
res = []
for e_file in has_less_files:
res.append(ValidationError(
"expected file not found",
filename=e_file
))
return res
else:
return [ValidationOK('expected files found')]
def file_list_checker(
item: FileListItem, expected: List[Path]
) -> return_type:
""" Check if a file list has expected files in it (ignoring file suffix) """
file_names = [f.stem for f in item.files_list]
expected_names = [f.stem for f in expected]
return list_checker(given=file_names, expected=expected_names)
def file_list_stem_check(
item: FileListItem, expected: List[str]
) -> return_type:
""" Check if a file list has expected filenames in it (ignoring file suffix)"""
file_names = [f.stem for f in item.files_list]
return list_checker(given=file_names, expected=expected)
def dataframe_column_check(df: pd.DataFrame, expected_columns: List[str]) -> return_type:
""" Check that all columns are present in a dataframe """
columns = list(df.columns)
if columns != expected_columns:
return [ValidationError(f'columns are not expected '
f'expected: {expected_columns}, found: {columns}')]
return [ValidationOK('Columns of dataframe are valid')]
def dataframe_index_check(df: pd.DataFrame, expected: List[str]) -> return_type:
""" Check that specific values are contained in each row"""
# check if all files from the dataset are represented in the filenames
index = list(df.index)
return list_checker(index, expected)
def dataframe_type_check(df: pd.DataFrame, col_name: str, expected_type: Type[Any]) -> return_type:
""" Verify column type matches expected type """
try:
df[col_name].astype(expected_type)
except ValueError:
return [ValidationError(f'Column {col_name} does not march expected type {expected_type}')]
return []
def numpy_dimensions_check(array: np.ndarray, ndim: int):
""" Check ndarray matches specified dimensions"""
if array.ndim != ndim:
return [ValidationError(
f'Array should be of dimensions: {ndim}')]
return []
def numpy_dtype_check(array: np.ndarray, dtype: np.dtype):
""" Check ndarray matches specified type """
if array.dtype != dtype:
return [ValidationError(
f'Array should be of type: {dtype}')]
return []
def numpy_col_comparison(dim: int):
ncols = []
def comparison(array: np.ndarray):
ncols.append(array.shape[dim])
if len(set(ncols)) != 1:
return [
ValidationError(f'Arrays do not match dimensions {dim}')
]
return []
return comparison | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/validators/base_validators.py | base_validators.py |
import functools
from typing import Optional, ClassVar
from ._model import Dataset, DatasetNotFoundError, DatasetsDir, DatasetNotInstalledError
class SLM21Dataset(Dataset):
""" Class interfacing usage of the sLM21 dataset"""
__dataset_name__: ClassVar[str] = "sLM21-dataset"
@classmethod
@functools.lru_cache
def load(cls, load_index: bool = True) -> Optional["SLM21Dataset"]:
""" Load dataset from dir registry """
dataset = DatasetsDir.load().get(cls.__dataset_name__, cls)
if dataset is None:
raise DatasetNotFoundError("The sLM21-dataset does not exist")
if not dataset.installed:
raise DatasetNotInstalledError("The sLM21-dataset is not installed locally")
if load_index:
dataset.load_index()
# convert all paths to absolute paths
dataset.index.make_absolute()
return dataset
class AbxLSDataset(Dataset):
""" Class interfacing usage of the ABX-LS dataset"""
__dataset_name__: ClassVar[str] = "abxLS-dataset"
@classmethod
@functools.lru_cache
def load(cls, load_index: bool = True) -> Optional["AbxLSDataset"]:
""" Load """
dataset = DatasetsDir.load().get(cls.__dataset_name__, cls=cls)
if dataset is None:
raise DatasetNotFoundError(f"The {cls.__dataset_name__} does not exist")
if not dataset.installed:
raise DatasetNotInstalledError(f"The {cls.__dataset_name__} is not installed locally")
if load_index:
dataset.load_index()
# convert all paths to absolute paths
dataset.index.make_absolute()
return dataset
class ProsAuditLMDataset(Dataset):
""" Class interfacing usage of the Prosody LM Benchmark """
__dataset_name__: ClassVar[str] = "prosaudit-dataset"
@classmethod
@functools.lru_cache
def load(cls, load_index: bool = True) -> Optional["ProsAuditLMDataset"]:
dataset = DatasetsDir.load().get(cls.__dataset_name__, cls=cls)
if dataset is None:
raise DatasetNotFoundError(f"The {cls.__dataset_name__} does not exist")
if not dataset.installed:
raise DatasetNotInstalledError(f"The {cls.__dataset_name__} is not installed locally")
if load_index:
dataset.load_index()
# convert all paths to absolute paths
dataset.index.make_absolute()
return dataset | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/datasets/zrc_2021.py | zrc_2021.py |
import json
from pathlib import Path
from typing import Optional, TypeVar, ClassVar, Type, Union
from pydantic import BaseModel, validator
from zerospeech.generics import (
RepoItemDir, ImportableItem, DownloadableItem, Namespace, Subset
)
from zerospeech.misc import extract, download_extract_archive
from zerospeech.out import console
from zerospeech.settings import get_settings
st = get_settings()
T = TypeVar("T")
class DatasetNotInstalledError(Exception):
""" Exception used for a non locally installed dataset """
pass
class DatasetNotFoundError(Exception):
""" Exception used for a non available dataset """
pass
class DatasetIndex(BaseModel):
""" A metadata object indexing all items in a dataset."""
root_dir: Path
subsets: Namespace[Subset]
@validator("subsets", pre=True)
def subsets_parse(cls, values):
return Namespace[Subset](store=values)
def make_relative(self):
""" Convert all the subsets to relative paths """
for _, item in self.subsets:
item.make_relative(self.root_dir)
def make_absolute(self):
""" Convert all the subsets to absolute paths """
for _, item in self.subsets:
item.make_absolute(self.root_dir)
class Dataset(DownloadableItem, ImportableItem):
""" Generic definition of a dataset """
key_name: ClassVar[str] = "datasets"
index: Optional[DatasetIndex] = None
@property
def name(self) -> str:
""" Returns the dataset name """
return getattr(self, "__dataset_name__", '')
def is_external(self) -> bool:
""" Returns true if the dataset is external """
return self.origin.type == "external"
@property
def index_path(self):
""" Path to the index file """
p = self.location / 'index.json'
if not p.is_file():
raise ValueError(f'Dataset {self.origin.name} has no build-in index file')
return p
def load_index(self):
""" Load the dataset index """
with self.index_path.open() as fp:
self.index = DatasetIndex(root_dir=self.location, **json.load(fp))
def pull(self, *, verify: bool = True, quiet: bool = False, show_progress: bool = False):
""" Pull a dataset from remote to the local repository."""
if self.origin.type == "external":
raise ValueError("External datasets cannot be pulled from the repository !!")
md5_hash = ""
if verify:
md5_hash = self.origin.md5sum
# download & extract archive
download_extract_archive(self.origin.zip_url, self.location, int(self.origin.total_size),
filename=self.name, md5sum_hash=md5_hash, quiet=quiet, show_progress=show_progress)
if not quiet:
console.print(f"[green]Dataset {self.name} installed successfully !!")
def import_zip(self, *, archive: Path):
""" Import dataset from an archive """
# extract archive
extract(archive, self.location)
class DatasetsDir(RepoItemDir):
""" Dataset directory manager """
item_type: ClassVar[Union[Type[DownloadableItem], Type[ImportableItem]]] = Dataset
@classmethod
def load(cls):
return cls(root_dir=st.dataset_path) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/datasets/_model.py | _model.py |
import abc
import json
import shutil
from pathlib import Path
from typing import Optional, List, NamedTuple
import pandas as pd
from Crypto.Hash import MD5
from filesplit.split import Split
from pydantic import BaseModel, Extra, Field
from .user_api import SubmissionRequestFileIndexItem
class UploadItem(NamedTuple):
""" Item used for upload iteration """
filepath: Path
filehash: str
filesize: int
class ManifestIndexItem(BaseModel):
""" Model representing a file item in the SplitManifest """
filename: str
filesize: int
filehash: str
def __eq__(self, other: 'ManifestIndexItem'):
return self.filehash == other.filehash
def __hash__(self):
return int(self.filehash, 16)
def to_api(self) -> SubmissionRequestFileIndexItem:
return SubmissionRequestFileIndexItem(
filename=self.filename,
filesize=self.filesize,
filehash=self.filehash
)
def to_item(self, root: Path) -> UploadItem:
return UploadItem(
filepath=root / self.filename,
filehash=self.filehash,
filesize=self.filesize
)
@classmethod
def from_item(cls, item: UploadItem) -> "ManifestIndexItem":
return cls(
filename=item.filepath.name,
filehash=item.filehash,
filesize=item.filesize
)
class Config:
extra = Extra.ignore # or 'allow' str
def md5sum(file_path: Path, chunk_size: int = 8192):
""" Return a md5 hash of a files content """
h = MD5.new()
with file_path.open('rb') as f:
while True:
chunk = f.read(chunk_size)
if len(chunk):
h.update(chunk)
else:
break
return h.hexdigest()
def split_archive(zipfile: Path, chunk_max_size: int = 500000000):
""" Split an archive to multiple paths """
output_dir = zipfile.parent / f'.{zipfile.stem}.parts'
output_dir.mkdir(exist_ok=True, parents=True)
fs = Split(inputfile=str(zipfile), outputdir=str(output_dir))
fs.bysize(size=chunk_max_size)
df = pd.read_csv(output_dir / fs.manfilename)
manifest = [
ManifestIndexItem.parse_obj(
dict(
filehash=md5sum(file_path=output_dir / Path(o['filename']).name),
**o
)
)
for o in df.to_dict(orient='records')
]
return manifest, output_dir
class FileUploadHandler(BaseModel, abc.ABC):
file: Path
filehash: str
@property
def current_dir(self) -> Path:
return self.file.parent
@property
@abc.abstractmethod
def is_multipart(self) -> bool:
pass
@abc.abstractmethod
def api_index(self) -> Optional[List[SubmissionRequestFileIndexItem]]:
pass
@abc.abstractmethod
def mark_completed(self, item: UploadItem):
""" Function to mark item as uploaded """
pass
@classmethod
@abc.abstractmethod
def _create(cls, target_file: Path):
""" Abstract method to create manifest """
pass
@abc.abstractmethod
def clean(self):
""" Clean upload temp files """
pass
@abc.abstractmethod
def __iter__(self):
""" Iterate over files to upload """
pass
@staticmethod
def __resume_path(source: Path):
""" Path builder to resume file """
return source.parent / f"{source.stem}.upload.json"
@property
def resume_file(self) -> Path:
""" Current resume file """
return self.__resume_path(self.file)
def save(self):
""" Save progress to disk """
with self.resume_file.open("w") as fp:
fp.write(self.json(indent=4))
@classmethod
def create_or_load(cls, target_file: Path) -> Optional["FileUploadHandler"]:
""" Create or load manifest """
if not target_file.is_file():
raise FileExistsError(f'{target_file} does not exist')
if cls.__resume_path(target_file).is_file():
""" If resume file exists load this instead of recreating it """
with cls.__resume_path(target_file).open() as fp:
return cls.parse_obj(json.load(fp))
# if no resume file build new manifest
return cls._create(target_file)
class MultipartUploadHandler(FileUploadHandler):
""" Data Model used for the binary split function as a manifest to allow merging """
index: Optional[List[ManifestIndexItem]]
uploaded: List[ManifestIndexItem] = Field(default_factory=list)
parts_dir: Optional[Path]
@property
def is_multipart(self) -> bool:
return True
@property
def api_index(self) -> Optional[List[SubmissionRequestFileIndexItem]]:
return [
i.to_api() for i in self.index
]
def __iter__(self):
""" Iterate over remaining items to upload """
remaining = set(self.index) - set(self.uploaded)
return iter([i.to_item(self.parts_dir) for i in remaining])
def mark_completed(self, item: UploadItem):
self.uploaded.append(ManifestIndexItem.from_item(item))
self.save()
@classmethod
def _create(cls, target_file: Path):
""" Build multipart upload manifest """
file_hash = md5sum(target_file)
# split file & create upload index
files_manifest, output_dir = split_archive(target_file)
manifest = cls(
file=target_file, filehash=file_hash,
index=files_manifest, parts_dir=output_dir
)
# save to disk to allow resume
manifest.save()
return manifest
def clean(self):
""" Clean upload temp files """
self.resume_file.unlink(missing_ok=True)
shutil.rmtree(self.parts_dir)
class SinglePartUpload(FileUploadHandler):
@property
def is_multipart(self) -> bool:
return False
@property
def api_index(self) -> Optional[List[SubmissionRequestFileIndexItem]]:
# in single-part upload this is not used
return None
def mark_completed(self, item: UploadItem):
# in single-part upload this is not used
# resume function always restarts the upload
pass
def __iter__(self):
return iter([
UploadItem(
filepath=self.file,
filehash=self.filehash,
filesize=self.file.stat().st_size
)
])
@classmethod
def _create(cls, target_file: Path):
""" Build single-part upload manifest """
return cls(
file=target_file,
filehash=md5sum(target_file)
)
def clean(self):
""" Clean upload temp files """
self.resume_file.unlink(missing_ok=True) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/upload/file_split.py | file_split.py |
import functools
import json
import shutil
from pathlib import Path
from typing import Tuple, Optional, Any, Union, Dict, TYPE_CHECKING
from pydantic import BaseModel
from rich.console import Console
from zerospeech.benchmarks import BenchmarkList
from zerospeech.data_loaders import zip_zippable
from zerospeech.out import void_console, console as std_console, error_console
from zerospeech.settings import get_settings
from zerospeech.httpw import post as http_post, get as http_get, APIHTTPException
from zerospeech.misc import ScoresNotFound, MetaYamlNotValid, InvalidSubmissionError
from zerospeech.submissions import show_errors
from .file_split import (
FileUploadHandler, MultipartUploadHandler, SinglePartUpload, md5sum, UploadItem
)
from .user_api import CurrentUser, Token
if TYPE_CHECKING:
from zerospeech.submissions import Submission
st = get_settings()
class BenchmarkClosedError(Exception):
""" Benchmark not active or over deadline """
pass
def get_first_author(authors: str) -> Tuple[str, str]:
""" Returns a tuple containing first & last name of first author """
try:
raise NotImplementedError(f'Does not have an good author parser for {authors}')
except (ValueError, NotImplementedError):
# On failure fetch full name from user
usr = CurrentUser.load()
if usr:
return usr.first_name, usr.last_name
return "john", "doe"
def upload_submission(item: UploadItem, *, submission_id: str, token: Token):
""" Function that performs upload of submission content to the api backend """
route, headers = st.api.request_params(
'submission_content_add', token=token, submission_id=submission_id, part_name=item.filepath.name
)
with item.filepath.open('rb') as file_data:
files = dict(file=file_data)
response = http_post(route, headers=headers, files=files, data={})
if response.status_code != 200:
raise APIHTTPException.from_request('submission_content_add', response)
return response.json()
def get_submission_status(submission_id: str, token: Token) -> Dict[str, Any]:
route, _ = st.api.request_params(
"submission_status", token=token, submission_id=submission_id
)
response = http_get(route)
if response.status_code != 200:
raise APIHTTPException.from_request('submission_status', response)
return response.json()
class UploadManifest(BaseModel):
submission_location: Path
multipart: bool
benchmark_id: str
tmp_dir: Optional[Path] = None
archive_filename: Optional[str] = None
submission_id: Optional[str] = None
model_id: Optional[str] = None
submission_validated: bool = False
local_data_set: bool = False
archive_created: bool = False
quiet: bool = False
is_test: bool = False
@staticmethod
def index_stem() -> str:
return ".manifest"
@classmethod
def load(cls, location: Path):
""" Load Manifest from location """
if not (location / cls.index_stem()).is_file():
raise FileNotFoundError("No Index file")
with (location / cls.index_stem()).open() as fp:
return cls.parse_obj(json.load(fp))
def save(self):
""" Save to disk """
with (self.tmp_dir / self.index_stem()).open('w') as fp:
fp.write(self.json(indent=4))
def update(self, field: str, value: Any):
""" Update a field """
setattr(self, field, value)
self.save()
class SubmissionUploader:
@classmethod
def resume(cls, tmp_dir: Path, quiet: bool = False) -> 'SubmissionUploader':
""" Resume Uploader from a manifest file """
man = UploadManifest.load(tmp_dir)
if not quiet:
std_console.print(f"Resuming upload from {tmp_dir}...")
return cls(
submission=man.submission_location,
user_cred=None,
quiet=quiet,
**man.dict(exclude={'submission_location', 'user_logged_in', 'quiet'})
)
@classmethod
def from_submission(
cls, submission: Union[Path, "Submission"], usr: Optional[CurrentUser] = None,
multipart: bool = True, quiet: bool = False
) -> 'SubmissionUploader':
""" Create uploader from submission """
return cls(
submission=submission, user_cred=usr, multipart=multipart, quiet=quiet)
def __init__(
self, submission: Union[Path, "Submission"],
user_cred: Union[CurrentUser, Tuple[str, str], None] = None,
tmp_dir: Optional[Path] = None,
archive_filename: Optional[str] = None,
multipart: bool = True,
quiet: bool = False,
submission_validated: bool = False,
local_data_set: bool = False,
archive_created: bool = False,
model_id: Optional[str] = None,
submission_id: Optional[str] = None,
is_test: bool = False,
benchmark_id: str = ""
):
self._quiet = quiet
with self.console.status("building artifacts"):
if isinstance(submission, Path):
bench = BenchmarkList.from_submission(submission)
# Check benchmark
is_test = bench.is_test
benchmark_id = bench.name
if not bench.is_active():
raise BenchmarkClosedError(f"Benchmark {bench.name} does not accept submissions")
submission = bench.submission.load(submission)
self.submission = submission
if user_cred is None:
usr = CurrentUser.load()
if usr is None:
creds = CurrentUser.get_credentials_from_user()
usr = CurrentUser.login(creds)
self.user = usr
elif isinstance(user_cred, CurrentUser):
self.user = user_cred
else:
self.user = CurrentUser.login(user_cred)
if tmp_dir is None:
self.tmp_dir = st.mkdtemp(auto_clean=False)
else:
self.tmp_dir = tmp_dir
self.console.print(f"UPLOAD DIR :::> {self.tmp_dir}")
self.console.print("\tUse this directory to resume upload if it fails (--resume)", style="dark_orange3 italic")
if archive_filename is None:
self.archive_file = self.tmp_dir / f"{self.submission.location.name}.zip"
else:
self.archive_file = self.tmp_dir / archive_filename
self.upload_handler: Optional[FileUploadHandler] = None
self._manifest = UploadManifest(
submission_location=self.submission.location,
submission_validated=submission_validated,
multipart=multipart,
tmp_dir=self.tmp_dir,
archive_filename=self.archive_file.name,
submission_id=submission_id,
model_id=model_id,
local_data_set=local_data_set,
archive_created=archive_created,
is_test=is_test,
benchmark_id=benchmark_id
)
self._manifest.save()
# fetch system data & update submission
with self.console.status("Building submission..."):
if not self._manifest.local_data_set:
self._fetch_local_data()
# update manifest
self._manifest.update('local_data_set', True)
# check submission
with self.console.status("Checking submission..."):
if not self._manifest.submission_validated:
self._check_submission()
# update manifest
self._manifest.update('submission_validated', True)
with self.console.status("Checking model ID..."):
if self._manifest.model_id is None:
mdi = self._get_model_id()
self.submission.meta.set_model_id(
submission_location=self.submission.location,
model_id=mdi
)
# update manifest
self._manifest.update('model_id', mdi)
# Making archive or load if existing
self._make_archive(multipart)
# update manifest
self._manifest.update('archive_created', True)
with self.console.status("Checking Submission ID"):
if self._manifest.submission_id is None:
self._register_submission()
# make upload function
self.upload_fn = functools.partial(
upload_submission,
submission_id=self.submission.meta.submission_id,
token=self.user.token
)
self.console.print(":heavy_check_mark: Submission valid & ready for upload !!!", style="bold green")
self.console.print(f"\t SUBMISSION_ID: {self._manifest.submission_id}", style="dark_orange3 italic")
self.console.print(f"\t MODEL_ID: {self._manifest.model_id}", style="dark_orange3 italic")
@property
def console(self) -> Console:
if self._quiet:
return void_console
return std_console
@property
def ready(self) -> bool:
"""Check if submission is ready for upload """
if self._manifest.submission_id is None:
error_console.print("No Submission ID")
return False
if self._manifest.model_id is None:
error_console.print("No Model ID")
return False
if not self._manifest.submission_validated:
error_console.print("Submission invalid")
return False
if not self._manifest.local_data_set:
error_console.print("Failed to set all data (check user)")
return False
if not self._manifest.archive_created:
error_console.print("No archive created")
return False
return True
def _get_model_id(self) -> str:
model_id = self.submission.meta.model_info.model_id
authors = self.submission.meta.publication.authors
if model_id is None:
_, first_author_lname = get_first_author(authors)
model_id = self.user.new_model_id(
author_name=first_author_lname, description=self.submission.meta.model_info.system_description,
gpu_budget=self.submission.meta.model_info.gpu_budget,
train_set=self.submission.meta.model_info.train_set,
authors=authors, author_label=f"{first_author_lname} et al.",
institution=self.submission.meta.publication.institution,
team=self.submission.meta.publication.team,
paper_url=self.submission.meta.publication.paper_url,
code_url=self.submission.meta.code_url
)
return model_id.replace('"', '').replace("'", "")
def _fetch_local_data(self):
""" Fetch all system data & update meta.yaml """
author_label = self.submission.meta.publication.author_label
if "et al." not in author_label:
first_author_fname, first_author_lname = get_first_author(
self.submission.meta.publication.authors
)
author_label = f"{first_author_lname.title()}, {first_author_fname[0].upper()}. et al."
self.submission.meta.set_system_values(
submission_location=self.submission.location,
username=self.user.username,
author_label=author_label
)
def _check_submission(self):
""" Performs all checks on submission before upload to API """
if not self.submission.meta.is_valid():
raise MetaYamlNotValid('meta.yaml not valid', ctx=self.submission.meta.validation_context)
# validate submission
if not self.submission.valid:
# todo convert submission validation to use context protocol
show_errors(self.submission.validation_output)
raise InvalidSubmissionError('submission not valid')
# check scores (has_scores)
if not self.submission.has_scores():
raise ScoresNotFound('submission has no scores')
# generate leaderboard
scores = self.submission.get_scores()
ld_data = scores.build_leaderboard()
with (scores.location / scores.leaderboard_file_name).open("w") as fp:
fp.write(ld_data.json(indent=4))
# check model_id
def _make_archive(self, multipart: bool = True):
if not self.archive_file.is_file():
with self.console.status("Creating Archive..."):
zip_zippable(self.submission, self.archive_file)
with self.console.status("Building manifest..."):
if multipart:
self.upload_handler = MultipartUploadHandler.create_or_load(
self.archive_file
)
else:
self.upload_handler = SinglePartUpload.create_or_load(
self.archive_file
)
self.console.print(":heavy_check_mark: archive created !!", style="bold green")
def _register_submission(self):
if self.submission.meta.submission_id is None:
leaderboard_file = ""
if self.submission.has_scores():
leaderboard_file = str(self.submission.leaderboard_file.relative_to(self.submission.location))
filehash = md5sum(self.archive_file)
resp_obj = self.user.make_new_submission(
model_id=self.submission.meta.model_info.model_id,
filename=self.archive_file.name,
filehash=filehash,
benchmark_id=self._manifest.benchmark_id,
has_scores=self.submission.has_scores(),
leaderboard=leaderboard_file,
index=self.upload_handler.api_index,
author_label=self.submission.meta.publication.author_label,
is_test=self._manifest.is_test
)
self.submission.meta.set_submission_id(
submission_location=self.submission.location,
submission_id=resp_obj
)
else:
sub_status = get_submission_status(
self.submission.meta.submission_id, self.user.token
)
status = sub_status.get("status", "")
if status != "uploading":
error_console.print(f"Submission {self.submission.meta.submission_id} has status '{status}' "
f"and does not allow uploading")
error_console.print("Remove the submission_id entry from the meta.yaml to upload to a different id")
self.clean(True)
raise ValueError('Cannot upload to current submission')
# update manifest
self._manifest.update('submission_id', self.submission.meta.submission_id)
def clean(self, quiet: bool = False):
""" Remove all temp files """
if not quiet:
with self.console.status("Cleaning up artifacts..."):
shutil.rmtree(self.tmp_dir)
else:
shutil.rmtree(self.tmp_dir)
def upload(self):
""" Upload items to backend by iterating on upload handler"""
with self.console.status("Uploading..."):
for item in self.upload_handler:
_ = self.upload_fn(item)
self.upload_handler.mark_completed(item)
self.console.print(":heavy_check_mark: upload successful !!", style="bold green") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/upload/submission.py | submission.py |
import json
from datetime import datetime
from pathlib import Path
from typing import Union, Optional, Dict, Tuple, ClassVar, List
from pydantic import BaseModel, EmailStr, Field, AnyHttpUrl
from rich.console import Console
from zerospeech.settings import get_settings, Token
from zerospeech.httpw import post as http_post, get as http_get, APIHTTPException
_st = get_settings()
out = Console()
class NewModelInfo(BaseModel):
""" Info required to create a new model id"""
description: str
gpu_budget: str
train_set: str
authors: str
institution: str
team: str
author_label: Optional[str]
paper_url: Optional[AnyHttpUrl]
code_url: Optional[AnyHttpUrl]
created_at: datetime = Field(default_factory=lambda: datetime.now())
def clean_dict(self):
return json.loads(self.json())
class SubmissionRequestFileIndexItem(BaseModel):
filename: str
filesize: int
filehash: str = ""
class NewSubmissionInfo(BaseModel):
""" Info required to create a new submission """
model_id: str
benchmark_id: str
filename: str
hash: str
has_scores: bool
multipart: bool
author_label: Optional[str]
index: Optional[List[SubmissionRequestFileIndexItem]]
leaderboard: Optional[str]
def clean_dict(self):
return json.loads(self.json())
class _UserAPIMethods:
""" Methods for communicating with the users-api on zerospeech.com """
@staticmethod
def login(credentials: Union[str, EmailStr], password) -> Optional[Token]:
""" Request Session token from the API by providing valid credentials """
route_url, _ = _st.api.request_params(route_name='user_login', token=None)
response = http_post(
route_url,
data={
"grant_type": "password",
"username": credentials,
"password": password,
"scopes": [],
"client_id": _st.api.client_id,
"client_secret": _st.api.client_secret
}
)
if response.status_code != 200:
raise APIHTTPException.from_request('user_login', response)
return Token.parse_obj(response.json())
@staticmethod
def get_user_info(token: Token) -> Dict:
route_url, headers = _st.api.request_params(route_name='user_info', token=token, username=token.username)
response = http_get(
route_url,
headers=headers
)
if response.status_code != 200:
raise APIHTTPException.from_request('user_info', response)
return response.json()
@staticmethod
def make_new_model(username: str, author_name: str, new_model_info: NewModelInfo, token: Token) -> Optional[str]:
route_url, headers = _st.api.request_params(
route_name='new_model', token=token, username=username, author_name=author_name)
response = http_post(
route_url,
json=new_model_info.clean_dict(),
headers=headers
)
if response.status_code != 200:
raise APIHTTPException.from_request('new_model', response)
return response.json().get("model_id", None)
@staticmethod
def make_new_submission(username: str, new_sub_info: NewSubmissionInfo, token: Token) -> Optional[str]:
""" Create a new submission """
route_url, headers = _st.api.request_params(
route_name='new_submission', token=token, username=username)
response = http_post(
route_url,
json=new_sub_info.clean_dict(),
headers=headers
)
if response.status_code != 200:
raise APIHTTPException.from_request('new_submission', response)
return response.json().get('submission_id', None)
class CurrentUser(BaseModel):
""" Dataclass Managing the current user session """
username: str
affiliation: str
first_name: str
last_name: str
email: EmailStr
token: Token
session_file: ClassVar[Path] = _st.user_credentials
@staticmethod
def get_credentials_from_user():
""" Prompt user for authentication credentials """
out.print("Required credentials to perform authentication", style="yellow")
username = out.input("username/email: ")
password = out.input("password: ", password=True)
return username, password
def save(self):
""" Save session to disk"""
with self.session_file.open('w') as fp:
fp.write(self.json(indent=4))
@classmethod
def clear(cls):
""" Clear current user session """
cls.session_file.unlink(missing_ok=True)
@classmethod
def login(cls, credentials: Optional[Tuple[str, str]] = None, auto_save: bool = True):
""" Create a new user session
Parameters:
credentials: allows to specify login/password tuple, if its none the user is prompted.
auto_save: specify whether to save session on disk (default: True)
Returns:
Current session
"""
if credentials is None:
credentials = cls.get_credentials_from_user()
token = _UserAPIMethods.login(credentials[0], credentials[1])
creds = cls(token=token, **_UserAPIMethods.get_user_info(token=token))
if auto_save:
creds.save()
return creds
@classmethod
def load(cls) -> Optional["CurrentUser"]:
""" Load existing session from disk """
if not cls.session_file.is_file():
return None
with cls.session_file.open() as fp:
return cls.parse_obj(json.load(fp))
@classmethod
def load_or_create(cls, credentials: Optional[Tuple[str, str]] = None):
""" Load the existing session or create a new one if it is not present """
if cls.session_file.is_file():
return cls.load()
return cls.login(credentials)
def new_model_id(
self, *, author_name: str, description: str, gpu_budget: str, train_set: str,
authors: str, institution: str, team: str, paper_url: Optional[str], code_url: Optional[str],
author_label: Optional[str]
) -> str:
""" Create a new model id from the given information """
model_dt = _UserAPIMethods.make_new_model(
username=self.username,
author_name=author_name,
new_model_info=NewModelInfo.parse_obj(dict(
description=description,
gpu_budget=gpu_budget,
train_set=train_set,
authors=authors,
institution=institution,
team=team,
paper_url=paper_url,
code_url=code_url,
author_label=author_label
)),
token=self.token
)
return model_dt
def make_new_submission(
self, model_id: str, filename: str, filehash: str,
has_scores: bool, leaderboard: str,
author_label: str, benchmark_id: str, is_test: bool = False,
index: Optional[List[SubmissionRequestFileIndexItem]] = None
):
if is_test:
benchmark_id = "test-challenge"
return _UserAPIMethods.make_new_submission(
username=self.username,
new_sub_info=NewSubmissionInfo(
model_id=model_id,
benchmark_id=benchmark_id,
filename=filename,
hash=filehash,
multipart=index is not None,
author_label=author_label,
has_scores=has_scores,
leaderboard=leaderboard,
index=index
),
token=self.token
) | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/upload/user_api.py | user_api.py |
from typing import Tuple, List, Dict, ClassVar, Type
import pandas as pd
from pydantic import Field
from zerospeech.datasets import ZRC2017Dataset
from zerospeech.generics import FileListItem, FileItem
from zerospeech.submissions import Submission
from zerospeech.submissions.abx17 import ABX17Submission
from zerospeech.tasks.abx.abx17 import SimpleABXTask
from ._model import Benchmark
return_type = List[Tuple[str, FileListItem, FileItem]]
class ABX17Task(SimpleABXTask):
""" ABX task for abx-17 """
def format_results(self, results: Dict) -> pd.DataFrame:
results = [
(dset.split('_')[0], dset.split('_')[1], mode, score)
for dset, v in results.items() for mode, score in v.items()
]
return pd.DataFrame(
results, columns=['language', 'duration', 'type', 'score']
)
def extract_sets(self, submission: "ABX17Submission", dataset: ZRC2017Dataset) -> return_type:
self.sets = submission.sets
self.tasks = submission.tasks
abx_sets = []
if 'english' in self.tasks:
if '1s' in self.sets:
abx_sets.append((
'english_1s',
dataset.index.subsets.english.items.abx_1s_item,
submission.items.english_1s
))
if '10s' in self.sets:
abx_sets.append((
'english_10s',
dataset.index.subsets.english.items.abx_10s_item,
submission.items.english_10s
))
if '120s' in self.sets:
abx_sets.append((
'english_120s',
dataset.index.subsets.english.items.abx_120s_item,
submission.items.english_120s
))
if 'french' in self.tasks:
if '1s' in self.sets:
abx_sets.append((
'french_1s',
dataset.index.subsets.french.items.abx_1s_item,
submission.items.french_1s
))
if '10s' in self.sets:
abx_sets.append((
'french_10s',
dataset.index.subsets.french.items.abx_10s_item,
submission.items.french_10s
))
if '120s' in self.sets:
abx_sets.append((
'french_120s',
dataset.index.subsets.french.items.abx_120s_item,
submission.items.french_120s
))
if 'mandarin' in self.tasks:
if '1s' in self.sets:
abx_sets.append((
'mandarin_1s',
dataset.index.subsets.mandarin.items.abx_1s_item,
submission.items.mandarin_1s
))
if '10s' in self.sets:
abx_sets.append((
'mandarin_10s',
dataset.index.subsets.mandarin.items.abx_10s_item,
submission.items.mandarin_10s
))
if '120s' in self.sets:
abx_sets.append((
'mandarin_120s',
dataset.index.subsets.mandarin.items.abx_120s_item,
submission.items.mandarin_120s
))
if 'german' in self.tasks:
if '1s' in self.sets:
abx_sets.append((
'german_1s',
dataset.index.subsets.german.items.abx_1s_item,
submission.items.german_1s
))
if '10s' in self.sets:
abx_sets.append((
'german_10s',
dataset.index.subsets.german.items.abx_10s_item,
submission.items.german_10s
))
if '120s' in self.sets:
abx_sets.append((
'german_120s',
dataset.index.subsets.german.items.abx_120s_item,
submission.items.german_120s
))
if 'wolof' in self.tasks:
if '1s' in self.sets:
abx_sets.append((
'wolof_1s',
dataset.index.subsets.wolof.items.abx_1s_item,
submission.items.wolof_1s
))
if '10s' in self.sets:
abx_sets.append((
'wolof_10s',
dataset.index.subsets.wolof.items.abx_10s_item,
submission.items.wolof_10s
))
if '120s' in self.sets:
abx_sets.append((
'wolof_120s',
dataset.index.subsets.wolof.items.abx_120s_item,
submission.items.wolof_120s
))
return abx_sets
class ABX17Benchmark(Benchmark):
"""abx-LS is a benchmark on acoustic Units using the ABX metric.
This benchmark has 2 sub-tasks :
- clean
- other
Each task has two subsets: dev, test
For ABX measuring we use this module : https://github.com/zerospeech/libri-light-abx
"""
_name: ClassVar[str] = "abx17"
_doc_url: ClassVar[str] = "https://zerospeech.com/tasks/task_1/tasks_goals/"
__submission_cls__: Type[Submission] = ABX17Submission
dataset: ZRC2017Dataset = Field(default_factory=lambda: ZRC2017Dataset.load())
def run(self, submission: "ABX17Submission"):
""" Run abx-17 tasks """
params = submission.params
self.console.print(f'Running {self.name} benchmark on {submission.location.name}')
# create output
submission.score_dir.mkdir(exist_ok=True, parents=True)
task = ABX17Task.parse_obj(params.get_task())
task.eval(submission, self.dataset)
self.console.print('[green]:heavy_check_mark:[/green]Evaluation of benchmark completed successfully ')
self.console.print(f"Scores can be found @ {submission.score_dir}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/benchmarks/abx17.py | abx17.py |
from typing import Tuple, ClassVar, Type
from pydantic import Field
from zerospeech.datasets import ZRC2017Dataset
from zerospeech.submissions import Submission
from zerospeech.submissions.tde17 import TDE17Submission
from zerospeech.tasks import tde
from ._model import Benchmark
class TDE17Task(tde.TDETask):
tasks: Tuple = ('english', 'french', 'mandarin', 'german', 'wolof')
def gather_items(self, lang: str, submission: "TDE17Submission", dataset: ZRC2017Dataset):
current_data = dataset.index.subsets.get(lang)
if current_data is None:
raise ValueError(f'Language {lang} was not found in {dataset.name}')
current_input_classes_file = submission.items.get(lang)
if current_input_classes_file is None:
raise ValueError(f'Language {lang} was not found in current submission : {submission.location}')
return tde.TDEItems(
wrd_path=current_data.items.alignment_words.file,
phn_path=current_data.items.alignment_phones.file,
input_classes=current_input_classes_file.file
)
class TDE17Benchmark(Benchmark):
"""tde-17 is a benchmark on Spoken term Discovery / Word segmentation
This benchmark has 5 sub-tasks (one for each language)
- english
- french
- mandarin
- german
- wolof
Each task has 3 subsets: 1s, 10s, 120s
These subsets split the same amount of speech into different segments.
- 1s has 1-second segments
- 10s has 10-second segments
- 120s has 120-second segments
For the TDE eval we use this module : https://github.com/zerospeech/tdev2
"""
_name: ClassVar[str] = "tde17"
_doc_url: ClassVar[str] = "https://zerospeech.com/tasks/task_2/tasks_goals/"
__submission_cls__: Type[Submission] = TDE17Submission
dataset: "ZRC2017Dataset" = Field(default_factory=lambda: ZRC2017Dataset.load())
def run(self, submission: "TDE17Submission"):
""" Run TDE-17 tasks """
params = submission.params
self.console.print(f'Running {self.name} benchmark on {submission.location.name}')
# create output dir
submission.score_dir.mkdir(exist_ok=True, parents=True)
task = TDE17Task.parse_obj(params.dict())
task.eval(submission, self.dataset)
self.console.print('[green]:heavy_check_mark:[/green]Evaluation of benchmark completed successfully ')
self.console.print(f"Scores can be found @ {submission.score_dir}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/benchmarks/tde17.py | tde17.py |
import enum
from datetime import date
from pathlib import Path
from typing import Type, Optional, TYPE_CHECKING
from pydantic import BaseModel
from zerospeech.benchmarks import sLM21, abxLS, tde17, abx17, prosAudit
from zerospeech.httpw import get as http_get, APIHTTPException
from zerospeech.misc import InvalidSubmissionError
from zerospeech.settings import get_settings
from zerospeech.submissions import MetaFile
from ._model import Benchmark
st = get_settings()
class InvalidBenchmarkError(Exception):
pass
class _InfoSchema(BaseModel):
label: str
start_date: date
end_date: Optional[date]
active: bool
url: str
evaluator: Optional[int]
auto_eval: bool
@property
def is_open(self):
""" Check if benchmark is open to submissions """
if self.end_date is not None:
return self.active and self.end_date >= date.today()
return self.active
class BenchmarkList(str, enum.Enum):
""" Simplified enum """
def __new__(cls, benchmark: Type[Benchmark]):
""" Allow setting parameters on enum """
label = benchmark._name # noqa: allow private access
obj = str.__new__(cls, label)
obj._value_ = label
obj._benchmark = benchmark
obj._doc_url = benchmark._doc_url # noqa: allow private access
obj.is_test = False
return obj
sLM21 = sLM21.SLM21Benchmark
abx_LS = abxLS.AbxLSBenchmark
pros_audit = prosAudit.SLMProsodyBenchmark
# TODO: implement score_dir, leaderboard & validation for 2017 (tde & abx)
abx_17 = abx17.ABX17Benchmark
tde_17 = tde17.TDE17Benchmark
@classmethod
def from_submission(cls, location: Path) -> "BenchmarkList":
benchmark_name = MetaFile.benchmark_from_submission(location)
if benchmark_name is None:
raise InvalidSubmissionError("meta.yaml not found or invalid")
try:
if benchmark_name.startswith("test-"):
benchmark_name = benchmark_name.replace("test-", "")
bench = cls(benchmark_name)
bench.is_test = True
else:
bench = cls(benchmark_name)
return bench
except ValueError:
raise InvalidBenchmarkError(f"{benchmark_name} is not a valid benchmark !!")
@property
def benchmark(self) -> Type[Benchmark]:
""" Benchmark Class (used for typing mostly) """
return self._benchmark
@property
def doc_url(self) -> str:
return self._doc_url
@property
def name(self) -> str:
return self._value_
def info(self) -> _InfoSchema:
""" Get benchmark information from back-end"""
benchmark_id = self.value
if self.is_test:
benchmark_id = "test-challenge"
route, _ = st.api.request_params('benchmark_info', benchmark_id=benchmark_id)
response = http_get(route)
if response.status_code != 200:
raise APIHTTPException.from_request('benchmark_info', response)
return _InfoSchema.parse_obj(response.json())
def is_active(self) -> bool:
""" Check if benchmark accepts new submissions """
return self.info().is_open | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/benchmarks/misc.py | misc.py |
import abc
from pathlib import Path
from typing import TYPE_CHECKING, ClassVar, Type
from pydantic import BaseModel
from pydantic import root_validator
from zerospeech.out import console as out_console, void_console
if TYPE_CHECKING:
from zerospeech.submissions import Submission
from zerospeech.datasets import Dataset
class NoSubmissionTypeError(Exception):
""" This benchmark has not specified submission type"""
pass
class Benchmark(BaseModel, abc.ABC):
""" A Generic benchmark class """
_name: ClassVar[str] = ...
_doc_url: ClassVar[str] = ...
__submission_cls__: Type["Submission"] = ...
dataset: "Dataset"
quiet: bool = False
@classmethod
def docs(cls):
text = getattr(cls, "__doc__", 'No information provided')
url = getattr(cls, '_doc_url', None)
if url:
text += f"For more information visit: {url}"
return text
@property
def name(self) -> str:
return getattr(self, '_name')
@property
def doc_url(self) -> str:
return getattr(self, '_doc_url')
@property
def console(self):
if self.quiet:
return void_console
return out_console
@root_validator(pre=True)
def base_validation(cls, values):
assert hasattr(cls, "_name"), f"A benchmark requires a name (add a _name attribute to the subclass {cls})"
assert hasattr(cls, "_doc_url"), f"A benchmark requires a name (add a _doc_url attribute to the subclass {cls})"
return values
def load_submission(self, location: Path, **kwargs) -> "Submission":
""" Load a submission using specified submission type """
if hasattr(self, '__submission_cls__'):
return self.__submission_cls__.load(location, **kwargs)
raise NoSubmissionTypeError(f'No submission type in benchmark {self._name}')
def init_submission_dir(self, location: Path):
if hasattr(self, '__submission_cls__'):
return self.__submission_cls__.init_dir(location)
raise NoSubmissionTypeError(f'No submission type in benchmark {self._name}')
@abc.abstractmethod
def run(self, submission: "Submission"):
""" Run the benchmark """
pass | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/benchmarks/_model.py | _model.py |
from typing import Tuple, List, Dict, ClassVar, Type
import pandas
import pandas as pd
from pydantic import Field
from zerospeech.datasets import AbxLSDataset
from zerospeech.generics import (
FileItem, FileListItem
)
from zerospeech.submissions.abxLS import AbxLSSubmission
from zerospeech.submissions import Submission
from zerospeech.tasks.abx.abxLS_phoneme import SimpleABXPhonemeTask, ContextMode
from ._model import Benchmark
return_type = Tuple[str, FileItem, FileListItem, ContextMode]
class AbxLSTask(SimpleABXPhonemeTask):
""" ABX task for abx-LS-Rob """
def format_results(self, results: Dict) -> pd.DataFrame:
formatted_results = []
for key, lst in results.items():
for obj in lst:
formatted_results.append(
dict(
subset='-'.join(key.split('-')[:2]),
speaker_mode=obj.get("abx-s-condition"),
context_mode=obj.get('abx-c-condition'),
granularity=obj.get('dataset'),
score=obj.get('score'),
# item_file=Path(obj.get('item-file')).name,
pooling=obj.get('pooling'),
seed=obj.get('seed'),
)
)
return pandas.DataFrame(formatted_results)
def extract_sets(
self, submission: AbxLSSubmission,
dataset: AbxLSDataset, context: ContextMode = ContextMode.all) -> List[return_type]:
""" Extract relevant data for abx from submission & dataset """
self.sets = submission.sets
self.tasks = submission.tasks
abx_sets = []
if ContextMode.triphone_within in context.as_set():
item_type = "triphone_item_file"
if 'dev' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'dev-clean-triphone-within',
dataset.index.subsets.dev_clean.items.get(item_type),
submission.items.dev_clean,
context.triphone_within
))
if 'other' in self.tasks:
abx_sets.append((
'dev-other-triphone-within',
dataset.index.subsets.dev_other.items.get(item_type),
submission.items.dev_other,
context.triphone_within
))
if 'test' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'test-clean-triphone-within',
dataset.index.subsets.test_clean.items.get(item_type),
submission.items.test_clean,
context.triphone_within
))
if 'other' in self.tasks:
abx_sets.append((
'test-other-triphone-within',
dataset.index.subsets.test_other.items.get(item_type),
submission.items.test_other,
context.triphone_within
))
if ContextMode.phoneme_within in context.as_set():
item_type = "phoneme_item_file"
if 'dev' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'dev-clean-phoneme-within',
dataset.index.subsets.dev_clean.items.get(item_type),
submission.items.dev_clean,
context.phoneme_within
))
if 'other' in self.tasks:
abx_sets.append((
'dev-other-phoneme-within',
dataset.index.subsets.dev_other.items.get(item_type),
submission.items.dev_other,
context.phoneme_within
))
if 'test' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'test-clean-phoneme-within',
dataset.index.subsets.test_clean.items.get(item_type),
submission.items.test_clean,
context.phoneme_within
))
if 'other' in self.tasks:
abx_sets.append((
'test-other-phoneme-within',
dataset.index.subsets.test_other.items.get(item_type),
submission.items.test_other,
context.phoneme_within
))
if ContextMode.phoneme_any in context.as_set():
item_type = "phoneme_item_file"
if 'dev' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'dev-clean-phoneme-any',
dataset.index.subsets.dev_clean.items.get(item_type),
submission.items.dev_clean,
context.phoneme_any
))
if 'other' in self.tasks:
abx_sets.append((
'dev-other-phoneme-any',
dataset.index.subsets.dev_other.items.get(item_type),
submission.items.dev_other,
context.phoneme_any
))
if 'test' in self.sets:
if 'clean' in self.tasks:
abx_sets.append((
'test-clean-phoneme-any',
dataset.index.subsets.test_clean.items.get(item_type),
submission.items.test_clean,
context.phoneme_any
))
if 'other' in self.tasks:
abx_sets.append((
'test-other-phoneme-any',
dataset.index.subsets.test_other.items.get(item_type),
submission.items.test_other,
context.phoneme_any
))
return abx_sets
class AbxLSBenchmark(Benchmark):
""" abx-LS-Phoneme is a benchmark on acoustic Units using the ABX metric.
It is a reimplementation of the ABX-LS benchmark with more robust .item files
This benchmark has 2 sub-tasks :
- clean
- other
Each task has two subsets: dev, test
For ABX measuring we use this module : https://github.com/zerospeech/libri-light-abx2
"""
_name: ClassVar[str] = "abxLS"
_doc_url: ClassVar[str] = "https://zerospeech.com/tasks/task_1/tasks_goals/"
__submission_cls__: Type[Submission] = AbxLSSubmission
dataset: AbxLSDataset = Field(default_factory=lambda: AbxLSDataset.load())
def run(self, submission: AbxLSSubmission):
""" run ABX-LSRob tasks """
params = submission.params
self.console.print(f'Running {self.name} benchmark on {submission.location.name}')
# create output dir
submission.score_dir.mkdir(exist_ok=True, parents=True)
task = AbxLSTask.parse_obj(params.get_task())
task.eval(submission, self.dataset)
self.console.print('[green]:heavy_check_mark:[/green]Evaluation of benchmark completed successfully ')
self.console.print(f"Scores can be found @ {submission.score_dir}") | zerospeech-benchmarks | /zerospeech-benchmarks-0.9.4.tar.gz/zerospeech-benchmarks-0.9.4/zerospeech/benchmarks/abxLS.py | abxLS.py |
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Optional, List
from vdataset import mount, unmount
from .libri_light.eval_ABX import main as run_abx
ABXFileTypes = Enum('ABXFileTypes',
'.pt .npy .txt .wav .flac .mp3')
ABXMode = Enum('ABXMode', 'all within across')
ABXDistanceMode = Enum('ABXDistanceMode',
'euclidian cosine kl kl_symmetric')
@dataclass
class AbxArguments:
""" List of arguments to provide to abx in phonetic_eval.abx"""
# path to input data
path_data: str
# path to item file
path_item_file: str
# Path to a CPC checkpoint
path_checkpoint: Optional[str] = None
# size of a single feature
feature_size: Optional[float] = float(0.1)
# Use the GPU to compute distances
cuda: bool = True
# extension (of input files ?)
file_extension: ABXFileTypes = '.txt'
# Choose the mode of the ABX score to compute
mode: ABXMode = 'all'
# Choose the kind of distance to use to compute
distance_mode: ABXDistanceMode = 'cosine'
# Max size of a group while computing the ABX score
max_size_group: int = 10
# When computing the ABX across score, maximum
# number of speaker X to sample per couple A,B.
max_x_across: int = 5
# location to output the results
out: Optional[str] = None
# boolean flag setting the path_data as a mounted dataset
_is_mounted: bool = False
@classmethod
def load_from_file_list(cls, file_list: List[Path], **kwargs):
""" Create a mounted folder containing all the files as symlinks """
data_loc = mount(file_list)
if data_loc:
return cls(path_data=str(data_loc), **kwargs)
raise SystemError('Could not create temp folder')
def clear_mounts(self):
""" Clean mounted folder """
if self._is_mounted:
unmount(self.path_data)
def abx_eval(args: AbxArguments):
""" Run abx evaluation """
results = run_abx(arg_obj=args)
args.clear_mounts()
return results | zerospeech-libriabx | /zerospeech_libriabx-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/libriabx/wrappers.py | wrappers.py |
import argparse
import torch
import torchaudio
import torch.nn as nn
import torch.nn.functional as F
def download_state_dict(model_name):
base_url = "https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints"
return torch.hub.load_state_dict_from_url(f"{base_url}/{model_name}")
def load_cpc_features(state_dict):
config = state_dict["config"]
weights = state_dict["weights"]
encoder = CPCEncoder(config["hiddenEncoder"])
ar_net = CPCAR(config["hiddenEncoder"], config["hiddenGar"], False,
config["nLevelsGRU"])
model = CPCModel(encoder, ar_net)
model.load_state_dict(weights, strict=False)
output = FeatureModule(model, False)
output.config = config
return output
def get_features_state_dict(feature_module):
config = feature_module.config
if config is None:
raise ValueError("The input feature_module should have config defined")
weights = feature_module.model.state_dict()
return {"config": config, "weights": weights}
def build_feature_from_file(file_path, feature_maker, max_size_seq=64000):
r"""
Apply the featureMaker to the given file.
Arguments:
- file_path (FeatureModule): model to apply
- file_path (string): path of the sequence to load
- seq_norm (bool): if True, normalize the output along the time
dimension to get chunks of mean zero and var 1
- max_size_seq (int): maximal size of a chunk
Return:
a torch vector of size 1 x Seq_size x Feature_dim
"""
seq = torchaudio.load(file_path)[0]
sizeSeq = seq.size(1)
start = 0
out = []
while start < sizeSeq:
if start + max_size_seq > sizeSeq:
break
end = min(sizeSeq, start + max_size_seq)
subseq = (seq[:, start:end]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = feature_maker(subseq)
out.append(features.detach().cpu())
start += max_size_seq
if start < sizeSeq:
subseq = (seq[:, -max_size_seq:]).view(1, 1, -1).cuda(device=0)
with torch.no_grad():
features = feature_maker(subseq)
df = subseq.size(2) // features.size(1)
delta = (sizeSeq - start) // df
out.append(features[:, -delta:].detach().cpu())
out = torch.cat(out, dim=1)
return out.view(out.size(1), out.size(2))
##############################################################################
# Minimal code to load a CPC checkpoint
##############################################################################
class ChannelNorm(nn.Module):
def __init__(self,
numFeatures,
epsilon=1e-05,
affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(
torch.Tensor(1, numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean)*torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
class CPCEncoder(nn.Module):
def __init__(self,
sizeHidden=512):
super(CPCEncoder, self).__init__()
normLayer = ChannelNorm
self.conv0 = nn.Conv1d(1, sizeHidden, 10, stride=5, padding=3)
self.batchNorm0 = normLayer(sizeHidden)
self.conv1 = nn.Conv1d(sizeHidden, sizeHidden, 8, stride=4, padding=2)
self.batchNorm1 = normLayer(sizeHidden)
self.conv2 = nn.Conv1d(sizeHidden, sizeHidden, 4,
stride=2, padding=1)
self.batchNorm2 = normLayer(sizeHidden)
self.conv3 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm3 = normLayer(sizeHidden)
self.conv4 = nn.Conv1d(sizeHidden, sizeHidden, 4, stride=2, padding=1)
self.batchNorm4 = normLayer(sizeHidden)
self.DOWNSAMPLING = 160
def getDimOutput(self):
return self.conv4.out_channels
def forward(self, x):
x = F.relu(self.batchNorm0(self.conv0(x)))
x = F.relu(self.batchNorm1(self.conv1(x)))
x = F.relu(self.batchNorm2(self.conv2(x)))
x = F.relu(self.batchNorm3(self.conv3(x)))
x = F.relu(self.batchNorm4(self.conv4(x)))
return x
class CPCAR(nn.Module):
def __init__(self,
dimEncoded,
dimOutput,
keepHidden,
nLevelsGRU):
super(CPCAR, self).__init__()
self.baseNet = nn.LSTM(dimEncoded, dimOutput,
num_layers=nLevelsGRU, batch_first=True)
self.hidden = None
self.keepHidden = keepHidden
def getDimOutput(self):
return self.baseNet.hidden_size
def forward(self, x):
try:
self.baseNet.flatten_parameters()
except RuntimeError:
pass
x, h = self.baseNet(x, self.hidden)
if self.keepHidden:
if isinstance(h, tuple):
self.hidden = tuple(x.detach() for x in h)
else:
self.hidden = h.detach()
return x
class CPCModel(nn.Module):
def __init__(self,
encoder,
AR):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = AR
def forward(self, batchData, label):
encodedData = self.gEncoder(batchData).permute(0, 2, 1)
cFeature = self.gAR(encodedData)
return cFeature, encodedData, label
class FeatureModule(torch.nn.Module):
r"""
A simpler interface to handle CPC models. Useful for a smooth workflow when
working with CPC trained features.
"""
def __init__(self, featureMaker, get_encoded,
seq_norm=True):
super(FeatureModule, self).__init__()
self.get_encoded = get_encoded
self.model = featureMaker
self.seq_norm = seq_norm
self.config = None
def forward(self, batch_data):
# Input Size : BatchSize x 1 x SeqSize
# Feature size: BatchSize x SeqSize x ChannelSize
if self.is_cuda:
batch_data = batch_data.cuda()
cFeature, encoded, _ = self.model(batch_data, None)
if self.get_encoded:
cFeature = encoded
if self.seq_norm:
mean = cFeature.mean(dim=1, keepdim=True)
var = cFeature.var(dim=1, keepdim=True)
cFeature = (cFeature - mean) / torch.sqrt(var + 1e-08)
return cFeature
def cuda(self):
self.is_cuda = True
super(FeatureModule, self).cuda()
def cpu(self):
self.is_cuda = False
super(FeatureModule, self).cuda()
def get_output_dim(self):
if self.get_encoded:
return self.config["hiddenEncoder"]
return self.config["hiddenGar"]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Download model')
parser.add_argument('model_name', type=str,
choices=["600h", "6kh", "60kh"])
parser.add_argument('output', type=str)
args = parser.parse_args()
CPC_MODELS_NAMES = {"60kh": "60k_epoch4-d0f474de.pt",
"600h": "600h-bdd7ced6.pt",
"6kh":"6k_epoch30-9df0493c.pt"}
state_dict = download_state_dict(CPC_MODELS_NAMES[args.model_name])
torch.save(state_dict, args.output) | zerospeech-libriabx | /zerospeech_libriabx-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/libriabx/libri_light/CPC_loader.py | CPC_loader.py |
import argparse
import os
import sys
from pathlib import Path
import numpy as np
import torch
from .ABX_src import abx_group_computation as abx_g
from .ABX_src import abx_iterators as abx_it
from .CPC_loader import load_cpc_features, build_feature_from_file
def find_all_files(path_dir, extension):
out = []
for root, dirs, filenames in os.walk(path_dir):
for f in filenames:
if f.endswith(extension):
out.append(((str(Path(f).stem)), os.path.join(root, f)))
return out
def reduce_sparse_data(quotient, divisor):
return quotient / (1e-08 * (divisor == 0) + divisor)
def load_pt(x):
data = torch.load(x, 'cpu')
assert(len(data.size()) == 2)
return data
def load_npy(x):
data = torch.tensor(np.load(x))
assert(len(data.size()) == 2)
return data
def load_txt(x):
data = torch.tensor(np.loadtxt(x))
assert (len(data.size()) == 2)
return data
def ABX(feature_function,
path_item_file,
seq_list,
distance_mode,
step_feature,
modes,
cuda=False,
max_x_across=5,
max_size_group=30):
# ABX dataset
ABXDataset = abx_it.ABXFeatureLoader(path_item_file, seq_list,
feature_function, step_feature, True)
if cuda:
ABXDataset.cuda()
# Distance function
distance_function = abx_g.get_distance_function_from_name(distance_mode)
# Output
scores = {}
# ABX within
if 'within' in modes:
print(" > Computing ABX within speakers...")
ABXIterator = ABXDataset.get_iterator('within', max_size_group)
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=3).to_dense()
group_confusion = torch.sparse.sum(group_confusion, dim=3).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['within'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f" > ...done. ABX within : {scores['within']}")
# ABX across
if 'across' in modes:
print(" > Computing ABX across speakers...")
ABXIterator = ABXDataset.get_iterator('across', max_size_group)
ABXIterator.max_x = max_x_across
group_confusion = abx_g.get_abx_scores_dtw_on_group(ABXIterator,
distance_function,
ABXIterator.symmetric)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(group_confusion._indices(),
torch.ones((n_data),
dtype=torch.float),
group_confusion.size())
divisor_context = torch.sparse.sum(index_, dim=[3, 4]).to_dense()
group_confusion = torch.sparse.sum(
group_confusion, dim=[3, 4]).to_dense()
group_confusion = reduce_sparse_data(group_confusion, divisor_context)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = reduce_sparse_data(group_confusion.sum(dim=0),
divisor_speaker)
scores['across'] = (phone_confusion.sum() /
(divisor_speaker > 0).sum()).item()
print(f" > ...done. ABX across : {scores['across']}")
return scores
def parse_args(argv=None):
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(description='ABX metric')
parser.add_argument('path_data', type=str,
help="Path to directory containing the data")
parser.add_argument('path_item_file', type=str,
help="Path to the .item file")
parser.add_argument('--path_checkpoint', type=str, default=None,
help="Path to a CPC checkpoint. If set, the apply the "
"model to the input data to compute the features")
parser.add_argument('--file_extension', type=str, default='.pt',
choices=['.pt', '.npy', '.wav', '.flac', '.mp3'])
parser.add_argument('--feature_size', type=float, default=0.01,
help="Size (in s) of one feature")
parser.add_argument('--cuda', action='store_true',
help="Use the GPU to compute distances")
parser.add_argument('--mode', type=str, default='all',
choices=['all', 'within', 'across'],
help="Choose the mode of the ABX score to compute")
parser.add_argument('--distance_mode', type=str, default='cosine',
choices=['euclidian', 'cosine', 'kl', 'kl_symmetric'],
help="Choose the kind of distance to use to compute "
"the ABX score.")
parser.add_argument("--max_size_group", type=int, default=10,
help="Max size of a group while computing the"
"ABX score. A small value will make the code "
"faster but less precise.")
parser.add_argument("--max_x_across", type=int, default=5,
help="When computing the ABX across score, maximum"
"number of speaker X to sample per couple A,B. "
" A small value will make the code faster but "
"less precise.")
parser.add_argument("--out", type=str, default=None,
help="Path where the results should be saved")
# multi-gpu / multi-node
return parser.parse_args(argv)
def main(argv=None, arg_obj=None):
if arg_obj:
args = arg_obj
else:
args = parse_args(argv)
if args.path_checkpoint is None:
if args.file_extension == '.pt':
feature_function = load_pt
elif args.file_extension == '.npy':
feature_function = load_npy
elif args.file_extension == '.txt':
feature_function = load_txt
else:
state_dict = torch.load(args.path_checkpoint)
feature_maker = load_cpc_features(state_dict)
feature_maker.cuda()
feature_function = lambda x: build_feature_from_file(x, feature_maker)
# Modes
if args.mode == 'all':
modes = ["within", "across"]
else:
modes = [args.mode]
step_feature = 1 / args.feature_size
# Get the list of sequences
seq_list = find_all_files(args.path_data, args.file_extension)
scores = ABX(feature_function, args.path_item_file,
seq_list, args.distance_mode,
step_feature, modes,
cuda=args.cuda,
max_x_across=args.max_x_across,
max_size_group=args.max_size_group)
return scores | zerospeech-libriabx | /zerospeech_libriabx-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/libriabx/libri_light/eval_ABX.py | eval_ABX.py |
import torch
import progressbar
import math
import random
def normalize_with_singularity(x):
r"""
Normalize the given vector across the third dimension.
Extend all vectors by eps=1e-12 to put the null vector at the maximal
cosine distance from any non-null vector.
"""
S, H = x.size()
norm_x = (x**2).sum(dim=1, keepdim=True)
x /= torch.sqrt(norm_x)
zero_vals = (norm_x == 0).view(S)
x[zero_vals] = 1 / math.sqrt(H)
border_vect = torch.zeros((S, 1),
dtype=x.dtype,
device=x.device) + 1e-12
border_vect[zero_vals] = -2*1e12
return torch.cat([x, border_vect], dim=1)
def load_item_file(path_item_file):
r""" Load a .item file indicating the triplets for the ABX score. The
input file must have the following fomat:
line 0 : whatever (not read)
line > 0: #file_ID onset offset #phone prev-phone next-phone speaker
onset : begining of the triplet (in s)
onset : end of the triplet (in s)
"""
with open(path_item_file, 'r') as file:
data = file.readlines()[1:]
data = [x.replace('\n', '') for x in data]
out = {}
phone_match = {}
speaker_match = {}
context_match = {}
for line in data:
items = line.split()
assert(len(items) == 7)
fileID = items[0]
if fileID not in out:
out[fileID] = []
onset, offset = float(items[1]), float(items[2])
context = '+'.join([items[4], items[5]])
phone = items[3]
speaker = items[6]
if phone not in phone_match:
s = len(phone_match)
phone_match[phone] = s
phone_id = phone_match[phone]
if context not in context_match:
s = len(context_match)
context_match[context] = s
context_id = context_match[context]
if speaker not in speaker_match:
s = len(speaker_match)
speaker_match[speaker] = s
speaker_id = speaker_match[speaker]
out[fileID].append([onset, offset, context_id, phone_id, speaker_id])
return out, context_match, phone_match, speaker_match
def get_features_group(in_data, index_order):
in_index = list(range(len(in_data)))
in_index.sort(key=lambda x: [in_data[x][i] for i in index_order])
out_groups = []
last_values = [in_data[in_index[0]][i] for i in index_order]
i_s = 0
curr_group = [[] for i in index_order]
n_orders = len(index_order) - 1
tmp = [in_data[i] for i in in_index]
for index, item in enumerate(tmp):
for order_index, order in enumerate(index_order):
if item[order] != last_values[order_index]:
curr_group[-1].append((i_s, index))
for i in range(n_orders, order_index, -1):
curr_group[i-1].append(curr_group[i])
curr_group[i] = []
if order_index == 0:
out_groups += curr_group[0]
curr_group[0] = []
last_values = [item[i] for i in index_order]
i_s = index
break
if i_s < len(in_data):
curr_group[-1].append((i_s, len(in_data)))
for i in range(n_orders, 0, -1):
curr_group[i-1].append(curr_group[i])
out_groups += curr_group[0]
return in_index, out_groups
class ABXFeatureLoader:
def __init__(self,
path_item_file,
seqList,
featureMaker,
stepFeature,
normalize):
r"""
Args:
path_item_file (str): path to the .item files containing the ABX
triplets
seqList (list): list of items (fileID, path) where fileID refers to
the file's ID as used in path_item_file, and path
is the actual path to the input audio sequence
featureMaker (function): either a function or a callable object.
Takes a path as input and outputs the
feature sequence corresponding to the
given file.
normalize (bool): if True all input features will be noramlized
across the channels dimension.
Note:
You can use this dataset with pre-computed features. For example, if
you have a collection of features files in the torch .pt format then
you can just set featureMaker = torch.load.
"""
files_data, self.context_match, self.phone_match, self.speaker_match = \
load_item_file(path_item_file)
self.seqNorm = True
self.stepFeature = stepFeature
self.loadFromFileData(files_data, seqList, featureMaker, normalize)
def loadFromFileData(self, files_data, seqList, feature_maker, normalize):
# self.features[i]: index_start, size, context_id, phone_id, speaker_id
self.features = []
self.INDEX_CONTEXT = 2
self.INDEX_PHONE = 3
self.INDEX_SPEAKER = 4
data = []
totSize = 0
print(" > Building the input features...")
bar = progressbar.ProgressBar(prefix=' > ', max_value=len(seqList))
bar.start()
for index, vals in enumerate(seqList):
fileID, file_path = vals
bar.update(index)
if fileID not in files_data:
continue
features = feature_maker(file_path)
if normalize:
features = normalize_with_singularity(features)
features = features.detach().cpu()
phone_data = files_data[fileID]
for phone_start, phone_end, context_id, phone_id, speaker_id in phone_data:
index_start = max(
0, int(math.ceil(self.stepFeature * phone_start - 0.5)))
index_end = min(features.size(0),
int(math.floor(self.stepFeature * phone_end - 0.5)))
if index_start >= features.size(0) or index_end <= index_start:
continue
loc_size = index_end - index_start
self.features.append([totSize, loc_size, context_id,
phone_id, speaker_id])
data.append(features[index_start:index_end])
totSize += loc_size
bar.finish()
self.data = torch.cat(data, dim=0)
self.feature_dim = self.data.size(1)
def get_data_device(self):
return self.data.device
def cuda(self):
self.data = self.data.cuda()
def cpu(self):
self.data = self.data.cpu()
def get_max_group_size(self, i_group, i_sub_group):
id_start, id_end = self.group_index[i_group][i_sub_group]
return max([self.features[i][1] for i in range(id_start, id_end)])
def get_ids(self, index):
context_id, phone_id, speaker_id = self.features[index][2:]
return context_id, phone_id, speaker_id
def __getitem__(self, index):
i_data, out_size, context_id, phone_id, speaker_id = self.features[index]
return self.data[i_data:(i_data + out_size)], out_size, (context_id, phone_id, speaker_id)
def __len__(self):
return len(self.features)
def get_n_speakers(self):
return len(self.speaker_match)
def get_n_context(self):
return len(self.context_match)
def get_n_phone(self):
return len(self.phone_match)
def get_n_groups(self):
return len(self.group_index)
def get_n_sub_group(self, index_sub_group):
return len(self.group_index[index_sub_group])
def get_iterator(self, mode, max_size_group):
if mode == 'within':
return ABXWithinGroupIterator(self, max_size_group)
if mode == 'across':
return ABXAcrossGroupIterator(self, max_size_group)
raise ValueError(f"Invalid mode: {mode}")
class ABXIterator:
r"""
Base class building ABX's triplets.
"""
def __init__(self, abxDataset, max_size_group):
self.max_size_group = max_size_group
self.dataset = abxDataset
self.len = 0
self.index_csp, self.groups_csp = \
get_features_group(abxDataset.features,
[abxDataset.INDEX_CONTEXT,
abxDataset.INDEX_SPEAKER,
abxDataset.INDEX_PHONE])
def get_group(self, i_start, i_end):
data = []
max_size = 0
to_take = list(range(i_start, i_end))
if i_end - i_start > self.max_size_group:
to_take = random.sample(to_take, k=self.max_size_group)
for i in to_take:
loc_data, loc_size, loc_id = self.dataset[self.index_csp[i]]
max_size = max(loc_size, max_size)
data.append(loc_data)
N = len(to_take)
out_data = torch.zeros(N, max_size,
self.dataset.feature_dim,
device=self.dataset.get_data_device())
out_size = torch.zeros(N, dtype=torch.long,
device=self.dataset.get_data_device())
for i in range(N):
size = data[i].size(0)
out_data[i, :size] = data[i]
out_size[i] = size
return out_data, out_size, loc_id
def __len__(self):
return self.len
def get_board_size(self):
r"""
Get the output dimension of the triplet's space.
"""
pass
class ABXWithinGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX within score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXWithinGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = True
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
if i_end - i_start > 1:
self.len += (len(speaker_group) - 1)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
if i_end_a - i_start_a == 1:
continue
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
data_b, size_b, id_b = self.get_group(i_start_b,
i_end_b)
data_a, size_a, id_a = self.get_group(i_start_a,
i_end_a)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0]
yield out_coords, (data_a, size_a), (data_b, size_b), \
(data_a, size_a)
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context())
class ABXAcrossGroupIterator(ABXIterator):
r"""
Iterator giving the triplets for the ABX across score.
"""
def __init__(self, abxDataset, max_size_group):
super(ABXAcrossGroupIterator, self).__init__(abxDataset,
max_size_group)
self.symmetric = False
self.get_speakers_from_cp = {}
self.max_x = 5
for context_group in self.groups_csp:
for speaker_group in context_group:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
if c_id not in self.get_speakers_from_cp:
self.get_speakers_from_cp[c_id] = {}
if p_id not in self.get_speakers_from_cp[c_id]:
self.get_speakers_from_cp[c_id][p_id] = {}
self.get_speakers_from_cp[c_id][p_id][s_id] = (
i_start, i_end)
for context_group in self.groups_csp:
for speaker_group in context_group:
if len(speaker_group) > 1:
for i_start, i_end in speaker_group:
c_id, p_id, s_id = self.dataset.get_ids(
self.index_csp[i_start])
self.len += (len(speaker_group) - 1) * (min(self.max_x,
len(self.get_speakers_from_cp[c_id][p_id]) - 1))
def get_other_speakers_in_group(self, i_start_group):
c_id, p_id, s_id = self.dataset.get_ids(self.index_csp[i_start_group])
return [v for k, v in self.get_speakers_from_cp[c_id][p_id].items() if k != s_id]
def get_abx_triplet(self, i_a, i_b, i_x):
i_start_a, i_end_a = i_a
data_a, size_a, id_a = self.get_group(i_start_a, i_end_a)
i_start_b, i_end_b = i_b
data_b, size_b, id_b = self.get_group(i_start_b, i_end_b)
i_start_x, i_end_x = i_x
data_x, size_x, id_x = self.get_group(i_start_x, i_end_x)
out_coords = id_a[2], id_a[1], id_b[1], id_a[0], id_x[2]
return out_coords, (data_a, size_a), (data_b, size_b), \
(data_x, size_x)
def __iter__(self):
for i_c, context_group in enumerate(self.groups_csp):
for i_s, speaker_group in enumerate(context_group):
n_phones = len(speaker_group)
if n_phones == 1:
continue
for i_a in range(n_phones):
i_start_a, i_end_a = self.groups_csp[i_c][i_s][i_a]
ref = self.get_other_speakers_in_group(i_start_a)
if len(ref) > self.max_x:
speakers_a = random.sample(ref, k=self.max_x)
else:
speakers_a = ref
for i_start_x, i_end_x in speakers_a:
for i_b in range(n_phones):
if i_b == i_a:
continue
i_start_b, i_end_b = self.groups_csp[i_c][i_s][i_b]
yield self.get_abx_triplet((i_start_a, i_end_a), (i_start_b, i_end_b), (i_start_x, i_end_x))
def get_board_size(self):
return (self.dataset.get_n_speakers(),
self.dataset.get_n_phone(),
self.dataset.get_n_phone(),
self.dataset.get_n_context(),
self.dataset.get_n_speakers()) | zerospeech-libriabx | /zerospeech_libriabx-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/libriabx/libri_light/ABX_src/abx_iterators.py | abx_iterators.py |
import torch
import math
import libri_light_dtw as dtw
import progressbar
def get_distance_function_from_name(name_str):
if name_str == 'euclidean':
return get_euclidian_distance_batch
if name_str == 'cosine':
return get_cosine_distance_batch
if name_str == 'kl':
return get_kl_distance_batch
if name_str == 'kl_symmetric':
return get_kl_distance_symmetric_batch
raise ValueError(f"Invalid distance mode")
def check_dtw_group_validity(a, b, x):
assert (len(a.size()) == len(b.size()))
assert (len(a.size()) == len(x.size()))
assert (a.size(2) == x.size(2))
assert (a.size(2) == b.size(2))
def get_kl_distance_batch(a1, a2, epsilon=1e-6):
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
# (P * (P / Q).log()).sum()
div = (a1.view(N1, 1, S1, 1, D) + epsilon) / (a2.view(1, N2, 1, S2, D) + epsilon)
prod = (a1.view(N1, 1, S1, 1, D)) * div.log()
return prod.sum(dim=4)
def get_kl_distance_symmetric_batch(a1, a2, epsilon=1e-6):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
div1 = (a1.view(N1, 1, S1, 1, D) + epsilon) / (a2.view(1, N2, 1, S2, D) + epsilon)
div2 = (a2.view(1, N2, 1, S2, D) + epsilon) / (a1.view(N1, 1, S1, 1, D) + epsilon)
prod1 = (a1.view(N1, 1, S1, 1, D)) * div1.log()
prod2 = (a2.view(1, N2, 1, S2, D)) * div2.log()
return (0.5 * prod1 + 0.5 * prod2).sum(dim=4)
def get_cosine_distance_batch(a1, a2, epsilon=1e-8):
r""" a1 and a2 must be normalized"""
N1, S1, D = a1.size() # Batch x Seq x Channel
N2, S2, D = a2.size() # Batch x Seq x Channel
prod = (a1.view(N1, 1, S1, 1, D)) * (a2.view(1, N2, 1, S2, D))
# Sum accross the channel dimension
prod = torch.clamp(prod.sum(dim=4), -1, 1).acos() / math.pi
return prod
def get_euclidian_distance_batch(a1, a2):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
diff = a1.view(N1, 1, S1, 1, D) - a2.view(1, N2, 1, S2, D)
return torch.sqrt((diff ** 2).sum(dim=4))
def get_distance_group_dtw(a1, a2, size1, size2,
ignore_diag=False, symmetric=False,
distance_function=get_cosine_distance_batch):
N1, S1, D = a1.size()
N2, S2, D = a2.size()
if size1.size(0) != N1:
print(a1.size(), size1.size())
print(a2.size(), size2.size())
assert (size1.size(0) == N1)
assert (size2.size(0) == N2)
distance_mat = distance_function(a1, a2).detach().cpu().numpy()
return dtw.dtw_batch(a1, a2, size1, size2,
distance_mat,
ignore_diag, symmetric)
def get_theta_group_dtw(a, b, x, sa, sb, sx, distance_function, symmetric):
check_dtw_group_validity(a, b, x)
dxb = get_distance_group_dtw(
x, b, sx, sb, distance_function=distance_function)
dxa = get_distance_group_dtw(x, a, sx, sa, ignore_diag=symmetric,
symmetric=symmetric,
distance_function=distance_function)
Nx, Na = dxa.size()
Nx, Nb = dxb.size()
if symmetric:
n_pos = Na * (Na - 1)
max_val = dxb.max().item()
for i in range(Na):
dxa[i, i] = max_val + 1
else:
n_pos = Na * Nx
dxb = dxb.view(Nx, 1, Nb).expand(Nx, Na, Nb)
dxa = dxa.view(Nx, Na, 1).expand(Nx, Na, Nb)
sc = (dxa < dxb).sum() + 0.5 * (dxa == dxb).sum()
sc /= (n_pos * Nb)
return sc.item()
def loc_dtw(data, distance_function, symmetric):
coords, group_a, group_b, group_x = data
group_a_data, group_a_size = group_a
group_b_data, group_b_size = group_b
group_x_data, group_x_size = group_x
theta = get_theta_group_dtw(group_a_data,
group_b_data,
group_x_data,
group_a_size,
group_b_size,
group_x_size,
distance_function,
symmetric)
return (coords, 1 - theta)
def get_abx_scores_dtw_on_group(group_iterator,
distance_function,
symmetric):
data_list = []
coords_list = []
bar = progressbar.ProgressBar(prefix=' > ', max_value=len(group_iterator))
bar.start()
with torch.no_grad():
for index, group in enumerate(group_iterator):
bar.update(index)
coords, abx = loc_dtw(group, distance_function, symmetric)
data_list.append(abx)
coords_list.append(coords)
bar.finish()
return torch.sparse.FloatTensor(torch.LongTensor(coords_list).t(),
torch.FloatTensor(data_list),
group_iterator.get_board_size()) | zerospeech-libriabx | /zerospeech_libriabx-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/libriabx/libri_light/ABX_src/abx_group_computation.py | abx_group_computation.py |
# libri-light-abx2
The ABX phonetic evaluation metric for unsupervised representation learning as used by the ZeroSpeech challenge, now with context-type options (on-triphone, within-context, any-context). This module is a reworking of https://github.com/zerospeech/libri-light-abx, which in turn is a wrapper around https://github.com/facebookresearch/libri-light/tree/main/eval
### Installation
You can install this module from pip directly using the following command :
`pip install zerospeech-libriabx2`
Or you can install from source by cloning this repository and running:
`pip install .`
As the final alternative, you can install into a conda environment by running:
`conda install -c conda-forge -c pytorch -c coml zerospeech-libriabx2 pytorch::pytorch`
### Usage
### From command line
```
usage: zrc-abx2 [-h] [--path_checkpoint PATH_CHECKPOINT]
[--file_extension {.pt,.npy,.wav,.flac,.mp3,.npz,.txt}]
[--feature_size FEATURE_SIZE] [--cuda]
[--speaker_mode {all,within,across}]
[--context_mode {all,within,any}]
[--distance_mode {euclidian,euclidean,cosine,kl,kl_symmetric}]
[--max_size_group MAX_SIZE_GROUP]
[--max_x_across MAX_X_ACROSS] [--out OUT] [--seed SEED]
[--pooling {none,mean,hamming}] [--seq_norm]
[--max_size_seq MAX_SIZE_SEQ] [--strict]
path_data path_item_file
ABX metric
positional arguments:
path_data Path to directory containing the submission data
path_item_file Path to the .item file containing the timestamps and
transcriptions
optional arguments:
-h, --help show this help message and exit
--path_checkpoint PATH_CHECKPOINT
Path to a CPC checkpoint. If set, apply the model to
the input data to compute the features
--file_extension {.pt,.npy,.wav,.flac,.mp3,.npz,.txt}
--feature_size FEATURE_SIZE
Size (in s) of one feature
--cuda Use the GPU to compute distances
--speaker_mode {all,within,across}
Choose the speaker mode of the ABX score to compute
--context_mode {all,within,any}
Choose the context mode of the ABX score to compute
--distance_mode {euclidian,euclidean,cosine,kl,kl_symmetric}
Choose the kind of distance to use to compute the ABX
score.
--max_size_group MAX_SIZE_GROUP
Max size of a group while computing the ABX score. A
small value will make the code faster but less
precise.
--max_x_across MAX_X_ACROSS
When computing the ABX across score, maximum number of
speaker X to sample per couple A,B. A small value will
make the code faster but less precise.
--out OUT Path where the results should be saved
--seed SEED Seed to use in random sampling.
--pooling {none,mean,hamming}
Type of pooling over frame representations of items.
--seq_norm Used for CPC features only. If activated, normalize
each batch of feature across the time channel before
computing ABX.
--max_size_seq MAX_SIZE_SEQ
Used for CPC features only. Maximal number of frames
to consider when computing a batch of features.
--strict Used for CPC features only. If activated, each batch
of feature will contain exactly max_size_seq frames.
```
### Python API
You can also call the abx evaluation from python code. You can use the following example:
```
import zrc_abx2
args = zrc_abx2.EvalArgs(
path_data= "/location/to/representations/",
path_item_file= "/location/to/file.item",
**other_options
)
result = zrc_abx2.EvalABX().eval_abx(args)
```
## Information on evaluation conditions
A new variable in this ABX version is context.
In the within-context condition, a, b, and x have the same surrounding context (i.e. the same preceding and following phoneme). any-context ignores the surrounding context; typically, it varies.
For the within-context and any-context comparison, use an item file that extracts phonemes (rather than XYZ triphones). For the on-triphone condition, which is still available, use an item file that extracts triphones (just like in the previous abx evaluation), and then run it within-context (which was the default behavior of the previous abx evaluation). any-context is not used for the on-triphone version due to excessive noise that would be included in the representation.
Like in the previous version, it is also possible to run within-speaker (a, b, x are all from the same speaker) and across-speaker (a and b are from the same speaker, x is from another) evaluations. So there are four phoneme-based evaluation combinations in total: within_s-within_c, within_s-any-c, across_s-within_c, across_s-any_c; and two triphone-based evaluation combinations: within_s-within_c, across_s-within_c.
| zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/README.md | README.md |
import argparse
import json
import os
import sys
import warnings
from datetime import datetime
from pathlib import Path
from typing import Any, Callable, Dict, Literal, NamedTuple, Optional, List, Tuple
import torch
import numpy as np
import pandas
from typing_extensions import LiteralString
import zrc_abx2.ABX_src.abx_group_computation as abx_g
from zrc_abx2.ABX_src.ABXDataset.abx_feature_loader import ABXFeatureLoader
from zrc_abx2.ABX_src.ABXIterators.abx_iterator_factory import IteratorFactory
from zrc_abx2.ABX_src.models import Pooling
from zrc_abx2.cpc.feature_loader import FeatureModule, buildFeature, loadModel
# Default args
PATH_CHECKPOINT = None
FILE_EXTENSION = ".npy"
FEATURE_SIZE = 0.01
CUDA = False
SPEAKER_MODE = "all"
CONTEXT_MODE = "all"
DISTANCE_MODE = "cosine"
MAX_SIZE_GROUP = 10
MAX_X_ACROSS = 5
OUT = None
SEED = 3459
POOLING = "none"
# CPC
SEQ_NORM = False
MAX_SIZE_SEQ = 64000
STRICT = False
class EvalArgs(NamedTuple):
# See parse_args for help
# Mandatory
path_data: str
path_item_file: str
# Args with defaults
path_checkpoint: Optional[str] = PATH_CHECKPOINT
file_extension: str = FILE_EXTENSION
feature_size: float = FEATURE_SIZE
cuda: bool = CUDA
speaker_mode: str = SPEAKER_MODE
context_mode: str = CONTEXT_MODE
distance_mode: str = DISTANCE_MODE
max_size_group: int = MAX_SIZE_GROUP
max_x_across: int = MAX_X_ACROSS
out: Optional[str] = OUT
seed: int = SEED
pooling: str = POOLING
# CPC only
seq_norm: bool = SEQ_NORM
max_size_seq: int = MAX_SIZE_SEQ
strict: bool = STRICT
# Feature-loading functions, one per file format
# If model loaded from checkpoint, procedure specified in eval_abx()
def _load_pt(x):
data = torch.load(x, "cpu")
assert len(data.size()) == 2
return data
def _load_npy(x):
data = torch.tensor(np.load(x))
assert len(data.size()) == 2
return data
def _load_txt(x):
data = torch.tensor(np.loadtxt(x))
assert len(data.size()) == 2
return data
def _loadCPCFeatureMaker(
CPC_pathCheckpoint,
encoder_layer=False,
keepHidden=True,
gru_level=-1,
cuda=False,
):
if gru_level and gru_level > 0:
updateConfig = argparse.Namespace(nLevelsGRU=gru_level)
else:
updateConfig = None
model, _, _ = loadModel([CPC_pathCheckpoint], updateConfig=updateConfig)
model.gAR.keepHidden = keepHidden
featureMaker = FeatureModule(model, get_encoded=encoder_layer)
featureMaker.eval()
if cuda:
featureMaker.cuda()
return featureMaker
class EvalABX:
# INTERFACE
def eval_abx(self, args: EvalArgs) -> List[Dict[str, Any]]:
print("eval_ABX args:")
print(args)
if args.path_checkpoint is None:
if args.file_extension == ".pt":
feature_function = _load_pt
elif args.file_extension == ".npy" or args.file_extension == ".npz":
feature_function = _load_npy
elif args.file_extension == ".txt":
feature_function = _load_txt
else:
feature_maker = _loadCPCFeatureMaker(
args.path_checkpoint,
encoder_layer=False,
keepHidden=True,
gru_level=-1,
cuda=False,
)
def feature_function(x):
return buildFeature(
feature_maker,
x,
strict=args.strict,
maxSizeSeq=args.max_size_seq,
seqNorm=args.seq_norm,
)[0]
# Speaker modes
if args.speaker_mode == "all":
speakermodes = ["within", "across"]
else:
speakermodes = [args.speaker_mode]
# Context modes
if args.context_mode == "all":
contextmodes = ["within", "any"]
else:
contextmodes = [args.context_mode]
step_feature = 1 / args.feature_size
# Get the list of sequences
seq_list = self._find_all_files(args.path_data, args.file_extension)
scores = self._ABX(
self._pooling_type(args.pooling),
args.seed,
feature_function,
args.path_item_file,
seq_list,
args.distance_mode,
step_feature,
speakermodes,
contextmodes,
cuda=args.cuda,
max_x_across=args.max_x_across,
max_size_group=args.max_size_group,
)
return self.formatted_abx_results(scores, args)
def formatted_abx_results(
self, scores: Dict[str, float], eval_args: EvalArgs
) -> List[Dict[str, Any]]:
results: List[Dict[str, Any]] = []
for key, val in scores.items():
result: Dict[str, Any] = {}
path_data = eval_args.path_data
item_f = eval_args.path_item_file
result["path_data"] = path_data
result["item-file"] = item_f
try:
dataset = Path(item_f).stem.split("-")
result["dataset"] = dataset[0]
result["sub-dataset"] = dataset[1]
except:
warnings.warn(
"Unable to retrieve dataset names for the results. Proceeding.",
RuntimeWarning,
)
result["pooling"] = eval_args.pooling
result["seed"] = eval_args.seed
result["run-date"] = datetime.now().strftime("%Y-%m-%d")
result["score"] = val
try:
result["abx-s-condition"] = key.split("-")[0]
result["abx-c-condition"] = key.split("-")[1]
except:
raise ValueError(
"Unable to retrieve abx condition definitions for the results."
)
results.append(result)
return results
def _ABX(
self,
pooling: Pooling,
seed_n: int,
feature_function: Callable,
path_item_file: str,
seq_list: List[Tuple[str, LiteralString]],
distance_mode: str,
step_feature: float,
speakermodes: List[str],
contextmodes: List[str],
cuda=False,
max_x_across=5,
max_size_group=30,
) -> Dict[str, float]:
# Distance function
distance_function = abx_g.get_distance_function_from_name(distance_mode)
# Output
scores: Dict[str, float] = {}
print(
"Date and time of run start:",
datetime.now().strftime("%Y-%m-%d %H:%M"),
)
# ABX calculations differ per context mode
for contextmode in contextmodes:
if not contextmode in ("within", "any"):
raise ValueError(f"Contextmode not supported: {contextmode}")
ABXDataset = ABXFeatureLoader(
pooling,
path_item_file,
seq_list,
feature_function,
step_feature,
True,
).loadFromFileData()
dimnwithin = None
dimnacross: list[int] = []
if contextmode == "within":
dimnwithin = 3 # TODO: can we make these programmatic?
dimnacross = [3, 4]
elif contextmode == "any":
# dimnwithin not used in this condition.
dimnacross = [3]
if cuda:
ABXDataset.cuda()
# ABX within speaker
if "within" in speakermodes:
print(f"Computing ABX {contextmode} context within speakers...")
ABXIterator = IteratorFactory.get_iterator(
ABXDataset, contextmode, "within", max_size_group, seed_n
)
group_confusion = abx_g.get_abx_scores_dtw_on_group(
ABXIterator, distance_function, ABXIterator.symmetric, pooling
)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(
group_confusion._indices(),
torch.ones((n_data), dtype=torch.float),
group_confusion.size(),
)
if contextmode == "any":
divisor_context = index_.to_dense()
group_confusion = group_confusion.to_dense()
else:
divisor_context = torch.sparse.sum(
index_, dimnwithin
).to_dense()
group_confusion = torch.sparse.sum(
group_confusion, dimnwithin
).to_dense()
group_confusion = self._reduce_sparse_data(
group_confusion, divisor_context
)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = self._reduce_sparse_data(
group_confusion.sum(dim=0), divisor_speaker
)
scores[f"within-{contextmode}"] = (
phone_confusion.sum() / (divisor_speaker > 0).sum()
).item()
print(
f"...done. ABX {contextmode}_context within_speaker : {scores[f'within-{contextmode}']}"
)
# ABX across
if "across" in speakermodes:
print(f"Computing ABX {contextmode} context across speakers...")
ABXIterator = IteratorFactory.get_iterator(
ABXDataset, contextmode, "across", max_size_group, seed_n
)
ABXIterator.max_x = max_x_across # Only used in across-speaker
group_confusion = abx_g.get_abx_scores_dtw_on_group(
ABXIterator, distance_function, ABXIterator.symmetric, pooling
)
n_data = group_confusion._values().size(0)
index_ = torch.sparse.LongTensor(
group_confusion._indices(),
torch.ones((n_data), dtype=torch.float),
group_confusion.size(),
)
divisor_context = torch.sparse.sum(index_, dimnacross).to_dense()
if not dimnacross:
raise ValueError("dimnacross not set")
group_confusion = torch.sparse.sum(
group_confusion, dimnacross
).to_dense()
group_confusion = self._reduce_sparse_data(
group_confusion, divisor_context
)
S, p1, p2 = group_confusion.size()
index_speaker = divisor_context > 0
divisor_speaker = index_speaker.sum(dim=0)
phone_confusion = self._reduce_sparse_data(
group_confusion.sum(dim=0), divisor_speaker
)
scores[f"across-{contextmode}"] = (
phone_confusion.sum() / (divisor_speaker > 0).sum()
).item()
print(
f"...done. ABX {contextmode}_context across_speaker : {scores[f'across-{contextmode}']}"
)
return scores
def _find_all_files(
self, path_dir, extension
) -> List[Tuple[str, LiteralString]]:
"""Returns: a list of tuples, each tuple having this format:
[0]: filename (no extension);
[1]: absolute path of the file.
"""
out: List[Tuple[str, LiteralString]] = []
for root, dirs, filenames in os.walk(path_dir):
for f in filenames:
if f.endswith(extension):
out.append(((str(Path(f).stem)), os.path.join(root, f)))
return out
def _reduce_sparse_data(self, quotient, divisor):
return quotient / (1e-08 * (divisor == 0) + divisor)
def _pooling_type(
self, pooling: str
) -> Literal[Pooling.NONE, Pooling.MEAN, Pooling.HAMMING]:
if pooling == "none":
return Pooling.NONE
elif pooling == "mean":
return Pooling.MEAN
elif pooling == "hamming":
return Pooling.HAMMING
else:
raise ValueError("Unsupported pooling type.")
def parse_args(argv):
parser = argparse.ArgumentParser(description="ABX metric")
parser.add_argument(
"path_data",
type=str,
help="Path to directory containing the submission data",
)
parser.add_argument(
"path_item_file",
type=str,
help="Path to the .item file containing the timestamps and transcriptions",
)
parser.add_argument(
"--path_checkpoint",
type=str,
default=PATH_CHECKPOINT,
help="Path to a CPC checkpoint. If set, apply the "
"model to the input data to compute the features",
)
parser.add_argument(
"--file_extension",
type=str,
default=FILE_EXTENSION,
choices=[".pt", ".npy", ".wav", ".flac", ".mp3", ".npz", ".txt"],
)
parser.add_argument(
"--feature_size",
type=float,
default=FEATURE_SIZE,
help="Size (in s) of one feature",
)
parser.add_argument(
"--cuda", action="store_true", help="Use the GPU to compute distances"
)
parser.add_argument(
"--speaker_mode",
type=str,
default=SPEAKER_MODE,
choices=["all", "within", "across"],
help="Choose the speaker mode of the ABX score to compute",
)
parser.add_argument(
"--context_mode",
type=str,
default=CONTEXT_MODE,
choices=["all", "within", "any"],
help="Choose the context mode of the ABX score to compute",
)
parser.add_argument(
"--distance_mode",
type=str,
default=DISTANCE_MODE,
choices=["euclidian", "euclidean", "cosine", "kl", "kl_symmetric"],
help="Choose the kind of distance to use to compute " "the ABX score.",
)
parser.add_argument(
"--max_size_group",
type=int,
default=MAX_SIZE_GROUP,
help="Max size of a group while computing the"
"ABX score. A small value will make the code "
"faster but less precise.",
)
parser.add_argument(
"--max_x_across",
type=int,
default=MAX_X_ACROSS,
help="When computing the ABX across score, maximum "
"number of speaker X to sample per couple A,B. "
" A small value will make the code faster but "
"less precise.",
)
parser.add_argument(
"--out",
type=str,
default=OUT,
help="Path where the results should be saved.",
)
parser.add_argument(
"--seed", type=int, default=SEED, help="Seed to use in random sampling."
)
parser.add_argument(
"--pooling",
type=str,
choices=["none", "mean", "hamming"],
default=POOLING,
help="Type of pooling over frame representations of items.",
)
parser.add_argument(
"--seq_norm",
action="store_true",
help="Used for CPC features only. "
"If activated, normalize each batch of feature across the "
"time channel before computing ABX.",
)
parser.add_argument(
"--max_size_seq",
default=MAX_SIZE_SEQ,
type=int,
help="Used for CPC features only. Maximal number of frames to consider when computing a "
"batch of features.",
)
parser.add_argument(
"--strict",
action="store_true",
help="Used for CPC features only. "
"If activated, each batch of feature will contain exactly "
"max_size_seq frames.",
)
# multi-gpu / multi-node
return parser.parse_args(argv)
# CMDLINE
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
args = parse_args(argv)
eval_args = EvalArgs(
path_data=args.path_data,
path_item_file=args.path_item_file,
path_checkpoint=args.path_checkpoint,
file_extension=args.file_extension,
feature_size=args.feature_size,
cuda=args.cuda,
speaker_mode=args.speaker_mode,
context_mode=args.context_mode,
distance_mode=args.distance_mode,
max_x_across=args.max_x_across,
out=args.out,
seed=args.seed,
pooling=args.pooling,
seq_norm=args.seq_norm,
max_size_seq=args.max_size_seq,
strict=args.strict,
)
abx_evaluator = EvalABX()
scores = abx_evaluator.eval_abx(eval_args)
if eval_args.out:
out_dir = Path(eval_args.out)
elif eval_args.path_checkpoint:
out_dir = Path(eval_args.path_checkpoint).parent
else:
raise ValueError(
"Unable to find output path from args.out or args.path_checkpoint."
)
out_dir.mkdir(exist_ok=True)
df = pandas.DataFrame(scores)
with open(out_dir / f"ABX_scores.csv", "a") as file:
df.to_csv(file, mode="a", index=False, header=file.tell() == 0)
t = datetime.now().strftime("%Y-%m-%d_%H-%M")
path_args = out_dir / f"ABX_args_{t}.json"
with open(path_args, "w") as file:
json.dump(vars(args), file, indent=2)
if __name__ == "__main__":
main() | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/eval_ABX.py | eval_ABX.py |
import argparse
import json
import os
import numpy as np
import torch
import time
from copy import deepcopy
import random
import psutil
import sys
import cpc.criterion as cr
import cpc.model as model
import cpc.utils.misc as utils
import cpc.feature_loader as fl
from cpc.cpc_default_config import set_default_cpc_config
from cpc.dataset import AudioBatchData, findAllSeqs, filterSeqs, parseSeqLabels
def getCriterion(args, downsampling, nSpeakers, nPhones):
dimFeatures = args.hiddenGar if not args.onEncoder else args.hiddenEncoder
if not args.supervised:
if args.cpc_mode == 'none':
cpcCriterion = cr.NoneCriterion()
else:
sizeInputSeq = (args.sizeWindow // downsampling)
cpcCriterion = cr.CPCUnsupersivedCriterion(args.nPredicts,
args.hiddenGar,
args.hiddenEncoder,
args.negativeSamplingExt,
mode=args.cpc_mode,
rnnMode=args.rnnMode,
dropout=args.dropout,
nSpeakers=nSpeakers,
speakerEmbedding=args.speakerEmbedding,
sizeInputSeq=sizeInputSeq)
elif args.pathPhone is not None:
if not args.CTC:
cpcCriterion = cr.PhoneCriterion(dimFeatures,
nPhones, args.onEncoder,
nLayers=args.nLevelsPhone)
else:
cpcCriterion = cr.CTCPhoneCriterion(dimFeatures,
nPhones, args.onEncoder)
else:
cpcCriterion = cr.SpeakerCriterion(dimFeatures, nSpeakers)
return cpcCriterion
def loadCriterion(pathCheckpoint, downsampling, nSpeakers, nPhones):
_, _, locArgs = fl.getCheckpointData(os.path.dirname(pathCheckpoint))
criterion = getCriterion(locArgs, downsampling, nSpeakers, nPhones)
state_dict = torch.load(pathCheckpoint, 'cpu')
criterion.load_state_dict(state_dict["cpcCriterion"])
return criterion
def trainStep(dataLoader,
cpcModel,
cpcCriterion,
optimizer,
scheduler,
loggingStep):
cpcModel.train()
cpcCriterion.train()
start_time = time.perf_counter()
n_examples = 0
logs, lastlogs = {}, None
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
n_examples += batchData.size(0)
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
totLoss = allLosses.sum()
totLoss.backward()
# Show grads ?
optimizer.step()
optimizer.zero_grad()
if "locLoss_train" not in logs:
logs["locLoss_train"] = np.zeros(allLosses.size(1))
logs["locAcc_train"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_train"] += (allLosses.mean(dim=0)).detach().cpu().numpy()
logs["locAcc_train"] += (allAcc.mean(dim=0)).cpu().numpy()
if (step + 1) % loggingStep == 0:
new_time = time.perf_counter()
elapsed = new_time - start_time
print(f"Update {step + 1}")
print(f"elapsed: {elapsed:.1f} s")
print(
f"{1000.0 * elapsed / loggingStep:.1f} ms per batch, {1000.0 * elapsed / n_examples:.1f} ms / example")
locLogs = utils.update_logs(logs, loggingStep, lastlogs)
lastlogs = deepcopy(logs)
utils.show_logs("Training loss", locLogs)
start_time, n_examples = new_time, 0
if scheduler is not None:
scheduler.step()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Average training loss on epoch", logs)
return logs
def valStep(dataLoader,
cpcModel,
cpcCriterion):
cpcCriterion.eval()
cpcModel.eval()
logs = {}
cpcCriterion.eval()
cpcModel.eval()
iter = 0
for step, fulldata in enumerate(dataLoader):
batchData, label = fulldata
batchData = batchData.cuda(non_blocking=True)
label = label.cuda(non_blocking=True)
with torch.no_grad():
c_feature, encoded_data, label = cpcModel(batchData, label)
allLosses, allAcc = cpcCriterion(c_feature, encoded_data, label)
if "locLoss_val" not in logs:
logs["locLoss_val"] = np.zeros(allLosses.size(1))
logs["locAcc_val"] = np.zeros(allLosses.size(1))
iter += 1
logs["locLoss_val"] += allLosses.mean(dim=0).cpu().numpy()
logs["locAcc_val"] += allAcc.mean(dim=0).cpu().numpy()
logs = utils.update_logs(logs, iter)
logs["iter"] = iter
utils.show_logs("Validation loss:", logs)
return logs
def run(trainDataset,
valDataset,
batchSize,
samplingMode,
cpcModel,
cpcCriterion,
nEpoch,
pathCheckpoint,
optimizer,
scheduler,
logs):
print(f"Running {nEpoch} epochs")
startEpoch = len(logs["epoch"])
bestAcc = 0
bestStateDict = None
start_time = time.time()
for epoch in range(startEpoch, nEpoch):
print(f"Starting epoch {epoch}")
utils.cpu_stats()
trainLoader = trainDataset.getDataLoader(batchSize, samplingMode,
True, numWorkers=0)
valLoader = valDataset.getDataLoader(batchSize, 'sequential', False,
numWorkers=0)
print("Training dataset %d batches, Validation dataset %d batches, batch size %d" %
(len(trainLoader), len(valLoader), batchSize))
locLogsTrain = trainStep(trainLoader, cpcModel, cpcCriterion,
optimizer, scheduler, logs["logging_step"])
locLogsVal = valStep(valLoader, cpcModel, cpcCriterion)
print(f'Ran {epoch + 1} epochs '
f'in {time.time() - start_time:.2f} seconds')
torch.cuda.empty_cache()
currentAccuracy = float(locLogsVal["locAcc_val"].mean())
if currentAccuracy > bestAcc:
bestStateDict = fl.get_module(cpcModel).state_dict()
for key, value in dict(locLogsTrain, **locLogsVal).items():
if key not in logs:
logs[key] = [None for x in range(epoch)]
if isinstance(value, np.ndarray):
value = value.tolist()
logs[key].append(value)
logs["epoch"].append(epoch)
if pathCheckpoint is not None \
and (epoch % logs["saveStep"] == 0 or epoch == nEpoch-1):
modelStateDict = fl.get_module(cpcModel).state_dict()
criterionStateDict = fl.get_module(cpcCriterion).state_dict()
fl.save_checkpoint(modelStateDict, criterionStateDict,
optimizer.state_dict(), bestStateDict,
f"{pathCheckpoint}_{epoch}.pt")
utils.save_logs(logs, pathCheckpoint + "_logs.json")
def main(args):
args = parseArgs(args)
utils.set_seed(args.random_seed)
logs = {"epoch": [], "iter": [], "saveStep": args.save_step}
loadOptimizer = False
if args.pathCheckpoint is not None and not args.restart:
cdata = fl.getCheckpointData(args.pathCheckpoint)
if cdata is not None:
data, logs, locArgs = cdata
print(f"Checkpoint detected at {data}")
fl.loadArgs(args, locArgs,
forbiddenAttr={"nGPU", "pathCheckpoint",
"debug", "restart", "world_size",
"n_nodes", "node_id", "n_gpu_per_node",
"max_size_loaded"})
args.load, loadOptimizer = [data], True
args.loadCriterion = True
logs["logging_step"] = args.logging_step
print(f'CONFIG:\n{json.dumps(vars(args), indent=4, sort_keys=True)}')
print('-' * 50)
seqNames, speakers = findAllSeqs(args.pathDB,
extension=args.file_extension,
loadCache=not args.ignore_cache)
print(f'Found files: {len(seqNames)} seqs, {len(speakers)} speakers')
# Datasets
if args.pathTrain is not None:
seqTrain = filterSeqs(args.pathTrain, seqNames)
else:
seqTrain = seqNames
if args.pathVal is None:
random.shuffle(seqTrain)
sizeTrain = int(0.99 * len(seqTrain))
seqTrain, seqVal = seqTrain[:sizeTrain], seqTrain[sizeTrain:]
print(f'Found files: {len(seqTrain)} train, {len(seqVal)} val')
else:
seqVal = filterSeqs(args.pathVal, seqNames)
if args.debug:
seqTrain = seqTrain[-1000:]
seqVal = seqVal[-100:]
phoneLabels, nPhones = None, None
if args.supervised and args.pathPhone is not None:
print("Loading the phone labels at " + args.pathPhone)
phoneLabels, nPhones = parseSeqLabels(args.pathPhone)
print(f"{nPhones} phones found")
print("")
print(f'Loading audio data at {args.pathDB}')
print("Loading the training dataset")
trainDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqTrain,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader,
MAX_SIZE_LOADED=args.max_size_loaded)
print("Training dataset loaded")
print("")
print("Loading the validation dataset")
valDataset = AudioBatchData(args.pathDB,
args.sizeWindow,
seqVal,
phoneLabels,
len(speakers),
nProcessLoader=args.n_process_loader)
print("Validation dataset loaded")
print("")
if args.load is not None:
cpcModel, args.hiddenGar, args.hiddenEncoder = \
fl.loadModel(args.load)
else:
# Encoder network
encoderNet = fl.getEncoder(args)
# AR Network
arNet = fl.getAR(args)
cpcModel = model.CPCModel(encoderNet, arNet)
batchSize = args.nGPU * args.batchSizeGPU
cpcModel.supervised = args.supervised
# Training criterion
if args.load is not None and args.loadCriterion:
cpcCriterion = loadCriterion(args.load[0], cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
else:
cpcCriterion = getCriterion(args, cpcModel.gEncoder.DOWNSAMPLING,
len(speakers), nPhones)
if loadOptimizer:
state_dict = torch.load(args.load[0], 'cpu')
cpcCriterion.load_state_dict(state_dict["cpcCriterion"])
cpcCriterion.cuda()
cpcModel.cuda()
# Optimizer
g_params = list(cpcCriterion.parameters()) + list(cpcModel.parameters())
lr = args.learningRate
optimizer = torch.optim.Adam(g_params, lr=lr,
betas=(args.beta1, args.beta2),
eps=args.epsilon)
if loadOptimizer:
print("Loading optimizer " + args.load[0])
state_dict = torch.load(args.load[0], 'cpu')
if "optimizer" in state_dict:
optimizer.load_state_dict(state_dict["optimizer"])
# Checkpoint
if args.pathCheckpoint is not None:
if not os.path.isdir(args.pathCheckpoint):
os.mkdir(args.pathCheckpoint)
args.pathCheckpoint = os.path.join(args.pathCheckpoint, "checkpoint")
scheduler = None
if args.schedulerStep > 0:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
args.schedulerStep,
gamma=0.5)
if args.schedulerRamp is not None:
n_epoch = args.schedulerRamp
print(f"Ramp activated. n_e = {n_epoch}")
scheduler_ramp = torch.optim.lr_scheduler.LambdaLR(optimizer,
lr_lambda=lambda epoch: utils.ramp_scheduling_function(
n_epoch, epoch),
last_epoch=-1)
if scheduler is None:
scheduler = scheduler_ramp
else:
scheduler = utils.SchedulerCombiner([scheduler_ramp, scheduler],
[0, args.schedulerRamp])
if scheduler is not None:
for i in range(len(logs["epoch"])):
scheduler.step()
cpcModel = torch.nn.DataParallel(cpcModel,
device_ids=range(args.nGPU)).cuda()
cpcCriterion = torch.nn.DataParallel(cpcCriterion,
device_ids=range(args.nGPU)).cuda()
run(trainDataset,
valDataset,
batchSize,
args.samplingType,
cpcModel,
cpcCriterion,
args.nEpoch,
args.pathCheckpoint,
optimizer,
scheduler,
logs)
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Trainer')
# Default arguments:
parser = set_default_cpc_config(parser)
group_db = parser.add_argument_group('Dataset')
group_db.add_argument('--pathDB', type=str, default=None,
help='Path to the directory containing the '
'data.')
group_db.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset.")
group_db.add_argument('--pathTrain', type=str, default=None,
help='Path to a .txt file containing the list of the '
'training sequences.')
group_db.add_argument('--pathVal', type=str, default=None,
help='Path to a .txt file containing the list of the '
'validation sequences.')
group_db.add_argument('--n_process_loader', type=int, default=8,
help='Number of processes to call to load the '
'dataset')
group_db.add_argument('--ignore_cache', action='store_true',
help='Activate if the dataset has been modified '
'since the last training session.')
group_db.add_argument('--max_size_loaded', type=int, default=4000000000,
help='Maximal amount of data (in byte) a dataset '
'can hold in memory at any given time')
group_supervised = parser.add_argument_group(
'Supervised mode (depreciated)')
group_supervised.add_argument('--supervised', action='store_true',
help='(Depreciated) Disable the CPC loss and activate '
'the supervised mode. By default, the supervised '
'training method is the speaker classification.')
group_supervised.add_argument('--pathPhone', type=str, default=None,
help='(Supervised mode only) Path to a .txt '
'containing the phone labels of the dataset. If given '
'and --supervised, will train the model using a '
'phone classification task.')
group_supervised.add_argument('--CTC', action='store_true')
group_save = parser.add_argument_group('Save')
group_save.add_argument('--pathCheckpoint', type=str, default=None,
help="Path of the output directory.")
group_save.add_argument('--logging_step', type=int, default=1000)
group_save.add_argument('--save_step', type=int, default=5,
help="Frequency (in epochs) at which a checkpoint "
"should be saved")
group_load = parser.add_argument_group('Load')
group_load.add_argument('--load', type=str, default=None, nargs='*',
help="Load an exsiting checkpoint. Should give a path "
"to a .pt file. The directory containing the file to "
"load should also have a 'checkpoint.logs' and a "
"'checkpoint.args'")
group_load.add_argument('--loadCriterion', action='store_true',
help="If --load is activated, load the state of the "
"training criterion as well as the state of the "
"feature network (encoder + AR)")
group_load.add_argument('--restart', action='store_true',
help="If any checkpoint is found, ignore it and "
"restart the training from scratch.")
group_gpu = parser.add_argument_group('GPUs')
group_gpu.add_argument('--nGPU', type=int, default=-1,
help="Number of GPU to use (default: use all "
"available GPUs)")
group_gpu.add_argument('--batchSizeGPU', type=int, default=8,
help='Number of batches per GPU.')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
args = parser.parse_args(argv)
if args.pathDB is None and (args.pathCheckpoint is None or args.restart):
parser.print_help()
print("Either provides an input dataset or a checkpoint to load")
sys.exit()
if args.pathCheckpoint is not None:
args.pathCheckpoint = os.path.abspath(args.pathCheckpoint)
if args.load is not None:
args.load = [os.path.abspath(x) for x in args.load]
# set it up if needed, so that it is dumped along with other args
if args.random_seed is None:
args.random_seed = random.randint(0, 2**31)
if args.nGPU < 0:
args.nGPU = torch.cuda.device_count()
assert args.nGPU <= torch.cuda.device_count(),\
f"number of GPU asked: {args.nGPU}," \
f"number GPU detected: {torch.cuda.device_count()}"
print(f"Let's use {args.nGPU} GPUs!")
if args.arMode == 'no_ar':
args.hiddenGar = args.hiddenEncoder
return args
if __name__ == "__main__":
torch.multiprocessing.set_start_method('spawn')
args = sys.argv[1:]
main(args) | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/train.py | train.py |
import os
import random
import time
import tqdm
import torch
import soundfile as sf
from pathlib import Path
from copy import deepcopy
from torch.multiprocessing import Pool
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import Sampler, BatchSampler
import torchaudio
class AudioBatchData(Dataset):
def __init__(self,
path,
sizeWindow,
seqNames,
phoneLabelsDict,
nSpeakers,
nProcessLoader=50,
MAX_SIZE_LOADED=4000000000):
"""
Args:
- path (string): path to the training dataset
- sizeWindow (int): size of the sliding window
- seqNames (list): sequences to load
- phoneLabelsDict (dictionnary): if not None, a dictionnary with the
following entries
"step": size of a labelled window
"$SEQ_NAME": list of phonem labels for
the sequence $SEQ_NAME
- nSpeakers (int): number of speakers to expect.
- nProcessLoader (int): number of processes to call when loading the
data from the disk
- MAX_SIZE_LOADED (int): target maximal size of the floating array
containing all loaded data.
"""
self.MAX_SIZE_LOADED = MAX_SIZE_LOADED
self.nProcessLoader = nProcessLoader
self.dbPath = Path(path)
self.sizeWindow = sizeWindow
self.seqNames = [(s, self.dbPath / x) for s, x in seqNames]
self.reload_pool = Pool(nProcessLoader)
self.prepare()
self.speakers = list(range(nSpeakers))
self.data = []
self.phoneSize = 0 if phoneLabelsDict is None else \
phoneLabelsDict["step"]
self.phoneStep = 0 if phoneLabelsDict is None else \
self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(phoneLabelsDict)
self.loadNextPack(first=True)
self.loadNextPack()
self.doubleLabels = False
def resetPhoneLabels(self, newPhoneLabels, step):
self.phoneSize = step
self.phoneStep = self.sizeWindow // self.phoneSize
self.phoneLabelsDict = deepcopy(newPhoneLabels)
self.loadNextPack()
def splitSeqTags(seqName):
path = os.path.normpath(seqName)
return path.split(os.sep)
def getSeqNames(self):
return [str(x[1]) for x in self.seqNames]
def clear(self):
if 'data' in self.__dict__:
del self.data
if 'speakerLabel' in self.__dict__:
del self.speakerLabel
if 'phoneLabels' in self.__dict__:
del self.phoneLabels
if 'seqLabel' in self.__dict__:
del self.seqLabel
def prepare(self):
random.shuffle(self.seqNames)
start_time = time.time()
print("Checking length...")
allLength = self.reload_pool.map(extractLength, self.seqNames)
self.packageIndex, self.totSize = [], 0
start, packageSize = 0, 0
for index, length in tqdm.tqdm(enumerate(allLength)):
packageSize += length
if packageSize > self.MAX_SIZE_LOADED:
self.packageIndex.append([start, index])
self.totSize += packageSize
start, packageSize = index, 0
if packageSize > 0:
self.packageIndex.append([start, len(self.seqNames)])
self.totSize += packageSize
print(f"Done, elapsed: {time.time() - start_time:.3f} seconds")
print(f'Scanned {len(self.seqNames)} sequences '
f'in {time.time() - start_time:.2f} seconds')
print(f"{len(self.packageIndex)} chunks computed")
self.currentPack = -1
self.nextPack = 0
def getNPacks(self):
return len(self.packageIndex)
def loadNextPack(self, first=False):
self.clear()
if not first:
self.currentPack = self.nextPack
start_time = time.time()
print('Joining pool')
self.r.wait()
print(f'Joined process, elapsed={time.time()-start_time:.3f} secs')
self.nextData = self.r.get()
self.parseNextDataBlock()
del self.nextData
self.nextPack = (self.currentPack + 1) % len(self.packageIndex)
seqStart, seqEnd = self.packageIndex[self.nextPack]
if self.nextPack == 0 and len(self.packageIndex) > 1:
self.prepare()
self.r = self.reload_pool.map_async(loadFile,
self.seqNames[seqStart:seqEnd])
def parseNextDataBlock(self):
# Labels
self.speakerLabel = [0]
self.seqLabel = [0]
self.phoneLabels = []
speakerSize = 0
indexSpeaker = 0
# To accelerate the process a bit
self.nextData.sort(key=lambda x: (x[0], x[1]))
tmpData = []
for speaker, seqName, seq in self.nextData:
while self.speakers[indexSpeaker] < speaker:
indexSpeaker += 1
self.speakerLabel.append(speakerSize)
if self.speakers[indexSpeaker] != speaker:
raise ValueError(f'{speaker} invalid speaker')
if self.phoneLabelsDict is not None:
self.phoneLabels += self.phoneLabelsDict[seqName]
newSize = len(self.phoneLabelsDict[seqName]) * self.phoneSize
seq = seq[:newSize]
sizeSeq = seq.size(0)
tmpData.append(seq)
self.seqLabel.append(self.seqLabel[-1] + sizeSeq)
speakerSize += sizeSeq
del seq
self.speakerLabel.append(speakerSize)
self.data = torch.cat(tmpData, dim=0)
def getPhonem(self, idx):
idPhone = idx // self.phoneSize
return self.phoneLabels[idPhone:(idPhone + self.phoneStep)]
def getSpeakerLabel(self, idx):
idSpeaker = next(x[0] for x in enumerate(
self.speakerLabel) if x[1] > idx) - 1
return idSpeaker
def __len__(self):
return self.totSize // self.sizeWindow
def __getitem__(self, idx):
if idx < 0 or idx >= len(self.data) - self.sizeWindow - 1:
print(idx)
outData = self.data[idx:(self.sizeWindow + idx)].view(1, -1)
label = torch.tensor(self.getSpeakerLabel(idx), dtype=torch.long)
if self.phoneSize > 0:
label_phone = torch.tensor(self.getPhonem(idx), dtype=torch.long)
if not self.doubleLabels:
label = label_phone
else:
label_phone = torch.zeros(1)
if self.doubleLabels:
return outData, label, label_phone
return outData, label
def getNSpeakers(self):
return len(self.speakers)
def getNSeqs(self):
return len(self.seqLabel) - 1
def getNLoadsPerEpoch(self):
return len(self.packageIndex)
def getBaseSampler(self, type, batchSize, offset):
if type == "samespeaker":
return SameSpeakerSampler(batchSize, self.speakerLabel,
self.sizeWindow, offset)
if type == "samesequence":
return SameSpeakerSampler(batchSize, self.seqLabel,
self.sizeWindow, offset)
if type == "sequential":
return SequentialSampler(len(self.data), self.sizeWindow,
offset, batchSize)
sampler = UniformAudioSampler(len(self.data), self.sizeWindow,
offset)
return BatchSampler(sampler, batchSize, True)
def getDataLoader(self, batchSize, type, randomOffset, numWorkers=0,
onLoop=-1):
r"""
Get a batch sampler for the current dataset.
Args:
- batchSize (int): batch size
- groupSize (int): in the case of type in ["speaker", "sequence"]
number of items sharing a same label in the group
(see AudioBatchSampler)
- type (string):
type == "speaker": grouped sampler speaker-wise
type == "sequence": grouped sampler sequence-wise
type == "sequential": sequential sampling
else: uniform random sampling of the full audio
vector
- randomOffset (bool): if True add a random offset to the sampler
at the begining of each iteration
"""
nLoops = len(self.packageIndex)
totSize = self.totSize // (self.sizeWindow * batchSize)
if onLoop >= 0:
self.currentPack = onLoop - 1
self.loadNextPack()
nLoops = 1
def samplerCall():
offset = random.randint(0, self.sizeWindow // 2) \
if randomOffset else 0
return self.getBaseSampler(type, batchSize, offset)
return AudioLoader(self, samplerCall, nLoops, self.loadNextPack,
totSize, numWorkers)
def loadFile(data):
speaker, fullPath = data
seqName = fullPath.stem
# Due to some issues happening when combining torchaudio.load
# with torch.multiprocessing we use soundfile to load the data
seq = torch.tensor(sf.read(fullPath)[0]).float()
if len(seq.size()) == 2:
seq = seq.mean(dim=1)
return speaker, seqName, seq
class AudioLoader(object):
r"""
A DataLoader meant to handle an AudioBatchData object.
In order to handle big datasets AudioBatchData works with big chunks of
audio it loads sequentially in memory: once all batches have been sampled
on a chunk, the AudioBatchData loads the next one.
"""
def __init__(self,
dataset,
samplerCall,
nLoop,
updateCall,
size,
numWorkers):
r"""
Args:
- dataset (AudioBatchData): target dataset
- samplerCall (function): batch-sampler to call
- nLoop (int): number of chunks to load
- updateCall (function): function loading the next chunk
- size (int): total number of batches
- numWorkers (int): see torch.utils.data.DataLoader
"""
self.samplerCall = samplerCall
self.updateCall = updateCall
self.nLoop = nLoop
self.size = size
self.dataset = dataset
self.numWorkers = numWorkers
def __len__(self):
return self.size
def __iter__(self):
for i in range(self.nLoop):
sampler = self.samplerCall()
dataloader = DataLoader(self.dataset,
batch_sampler=sampler,
num_workers=self.numWorkers)
for x in dataloader:
yield x
if i < self.nLoop - 1:
self.updateCall()
class UniformAudioSampler(Sampler):
def __init__(self,
dataSize,
sizeWindow,
offset):
self.len = dataSize // sizeWindow
self.sizeWindow = sizeWindow
self.offset = offset
if self.offset > 0:
self.len -= 1
def __iter__(self):
return iter((self.offset
+ self.sizeWindow * torch.randperm(self.len)).tolist())
def __len__(self):
return self.len
class SequentialSampler(Sampler):
def __init__(self, dataSize, sizeWindow, offset, batchSize):
self.len = (dataSize // sizeWindow) // batchSize
self.sizeWindow = sizeWindow
self.offset = offset
self.startBatches = [x * (dataSize // batchSize)
for x in range(batchSize)]
self.batchSize = batchSize
if self.offset > 0:
self.len -= 1
def __iter__(self):
for idx in range(self.len):
yield [self.offset + self.sizeWindow * idx
+ start for start in self.startBatches]
def __len__(self):
return self.len
class SameSpeakerSampler(Sampler):
def __init__(self,
batchSize,
samplingIntervals,
sizeWindow,
offset):
self.samplingIntervals = samplingIntervals
self.sizeWindow = sizeWindow
self.batchSize = batchSize
self.offset = offset
if self.samplingIntervals[0] != 0:
raise AttributeError("Sampling intervals should start at zero")
nWindows = len(self.samplingIntervals) - 1
self.sizeSamplers = [(self.samplingIntervals[i+1] -
self.samplingIntervals[i]) // self.sizeWindow
for i in range(nWindows)]
if self.offset > 0:
self.sizeSamplers = [max(0, x - 1) for x in self.sizeSamplers]
order = [(x, torch.randperm(val).tolist())
for x, val in enumerate(self.sizeSamplers) if val > 0]
# Build Batches
self.batches = []
for indexSampler, randperm in order:
indexStart, sizeSampler = 0, self.sizeSamplers[indexSampler]
while indexStart < sizeSampler:
indexEnd = min(sizeSampler, indexStart + self.batchSize)
locBatch = [self.getIndex(x, indexSampler)
for x in randperm[indexStart:indexEnd]]
indexStart = indexEnd
self.batches.append(locBatch)
def __len__(self):
return len(self.batches)
def getIndex(self, x, iInterval):
return self.offset + x * self.sizeWindow \
+ self.samplingIntervals[iInterval]
def __iter__(self):
random.shuffle(self.batches)
return iter(self.batches)
def extractLength(couple):
speaker, locPath = couple
info = torchaudio.info(str(locPath))[0]
return info.length
def findAllSeqs(dirName,
extension='.flac',
loadCache=False,
speaker_level=1):
r"""
Lists all the sequences with the given extension in the dirName directory.
Output:
outSequences, speakers
outSequence
A list of tuples seq_path, speaker where:
- seq_path is the relative path of each sequence relative to the
parent directory
- speaker is the corresponding speaker index
outSpeakers
The speaker labels (in order)
The speaker labels are organized the following way
\dirName
\speaker_label
\..
...
seqName.extension
Adjust the value of speaker_level if you want to choose which level of
directory defines the speaker label. Ex if speaker_level == 2 then the
dataset should be organized in the following fashion
\dirName
\crappy_label
\speaker_label
\..
...
seqName.extension
Set speaker_label == 0 if no speaker label will be retrieved no matter the
organization of the dataset.
"""
cache_path = os.path.join(dirName, '_seqs_cache.txt')
if loadCache:
try:
outSequences, speakers = torch.load(cache_path)
print(f'Loaded from cache {cache_path} successfully')
return outSequences, speakers
except OSError as err:
print(f'Ran in an error while loading {cache_path}: {err}')
print('Could not load cache, rebuilding')
if dirName[-1] != os.sep:
dirName += os.sep
prefixSize = len(dirName)
speakersTarget = {}
outSequences = []
for root, dirs, filenames in tqdm.tqdm(os.walk(dirName)):
filtered_files = [f for f in filenames if f.endswith(extension)]
if len(filtered_files) > 0:
speakerStr = (os.sep).join(
root[prefixSize:].split(os.sep)[:speaker_level])
if speakerStr not in speakersTarget:
speakersTarget[speakerStr] = len(speakersTarget)
speaker = speakersTarget[speakerStr]
for filename in filtered_files:
full_path = os.path.join(root[prefixSize:], filename)
outSequences.append((speaker, full_path))
outSpeakers = [None for x in speakersTarget]
for key, index in speakersTarget.items():
outSpeakers[index] = key
try:
torch.save((outSequences, outSpeakers), cache_path)
print(f'Saved cache file at {cache_path}')
except OSError as err:
print(f'Ran in an error while saving {cache_path}: {err}')
return outSequences, outSpeakers
def parseSeqLabels(pathLabels):
with open(pathLabels, 'r') as f:
lines = f.readlines()
output = {"step": 160} # Step in librispeech dataset is 160bits
maxPhone = 0
for line in lines:
data = line.split()
output[data[0]] = [int(x) for x in data[1:]]
maxPhone = max(maxPhone, max(output[data[0]]))
return output, maxPhone + 1
def filterSeqs(pathTxt, seqCouples):
with open(pathTxt, 'r') as f:
inSeqs = [p.replace('\n', '') for p in f.readlines()]
inSeqs.sort()
seqCouples.sort(key=lambda x: os.path.basename(os.path.splitext(x[1])[0]))
output, index = [], 0
for x in seqCouples:
seq = os.path.basename(os.path.splitext(x[1])[0])
while index < len(inSeqs) and seq > inSeqs[index]:
index += 1
if index == len(inSeqs):
break
if seq == inSeqs[index]:
output.append(x)
return output | zerospeech-libriabx2 | /zerospeech-libriabx2-0.9.8.tar.gz/zerospeech-libriabx2-0.9.8/zrc_abx2/cpc/dataset.py | dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.