Datasets:
# import subprocess | |
import coremltools as ct | |
from transformers import AutoTokenizer | |
import numpy as np | |
model = ct.models.CompiledMLModel('./msmarco_distilbert_base_tas_b_512_single_quantized.mlmodelc') | |
tokenizer = AutoTokenizer.from_pretrained("sentence-transformers/msmarco-distilbert-base-tas-b") | |
def tokenize(text): | |
return tokenizer( | |
text, | |
add_special_tokens=True, # Adds [CLS] and [SEP] | |
max_length=512, | |
padding='max_length', | |
truncation=True, | |
return_attention_mask=True, | |
return_tensors='np' | |
) | |
def embed(text): | |
result = tokenize(text) | |
token_ids = result['input_ids'].astype(np.float32)#.flatten().reshape(1, 512) | |
mask = result['attention_mask'].astype(np.float32)#.flatten().reshape(1, 512) | |
print(f"Tokens: {token_ids}") | |
print(f"Mask: {mask}") | |
predictions = model.predict({"input_ids": token_ids, "attention_mask": mask}) | |
return predictions['embeddings'][0] | |
string = "test: hello, world! calling swift executable from python, what will we think of next?" | |
print(f"🔮 Embedding string: {string}") | |
embeddings = embed(string) | |
print(f"🔮 Embeddings (0-10): {embeddings[:10]}") | |