Datasets:
File size: 1,175 Bytes
7cf8751 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
# import subprocess
import coremltools as ct
from transformers import AutoTokenizer
import numpy as np
model = ct.models.CompiledMLModel('./msmarco_distilbert_base_tas_b_512_single_quantized.mlmodelc')
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-distilbert-base-tas-b")
def tokenize(text):
return tokenizer(
text,
add_special_tokens=True, # Adds [CLS] and [SEP]
max_length=512,
padding='max_length',
truncation=True,
return_attention_mask=True,
return_tensors='np'
)
def embed(text):
result = tokenize(text)
token_ids = result['input_ids'].astype(np.float32)#.flatten().reshape(1, 512)
mask = result['attention_mask'].astype(np.float32)#.flatten().reshape(1, 512)
print(f"Tokens: {token_ids}")
print(f"Mask: {mask}")
predictions = model.predict({"input_ids": token_ids, "attention_mask": mask})
return predictions['embeddings'][0]
string = "test: hello, world! calling swift executable from python, what will we think of next?"
print(f"🔮 Embedding string: {string}")
embeddings = embed(string)
print(f"🔮 Embeddings (0-10): {embeddings[:10]}")
|