|
--- |
|
license: mit |
|
language: |
|
- en |
|
tags: |
|
- NLP |
|
pipeline_tag: feature-extraction |
|
--- |
|
|
|
# Usage |
|
from transformers import AutoTokenizer |
|
from model import ( |
|
BERTContrastiveLearning_simcse, |
|
BERTContrastiveLearning_simcse_w, |
|
BERTContrastiveLearning_samp, |
|
BERTContrastiveLearning_samp_w, |
|
) |
|
|
|
str_list = data["string"].tolist() # Your list of strings here |
|
tokenizer = AutoTokenizer.from_pretrained("emilyalsentzer/Bio_ClinicalBERT") |
|
tokenized_inputs = tokenizer( |
|
str_list, padding=True, max_length=50, truncation=True, return_tensors="pt" |
|
) |
|
input_ids = tokenized_inputs["input_ids"] |
|
attention_mask = tokenized_inputs["attention_mask"] |
|
|
|
model1 = BERTContrastiveLearning_simcse.load_from_checkpoint(ckpt1).eval() |
|
model2 = BERTContrastiveLearning_simcse_w.load_from_checkpoint(ckpt2).eval() |
|
model3 = BERTContrastiveLearning_samp.load_from_checkpoint(ckpt3).eval() |
|
model4 = BERTContrastiveLearning_samp_w.load_from_checkpoint(ckpt4).eval() |
|
|
|
cls, _ = model(input_ids, attention_mask) # embeddings |