docs(readme): update python examples to reference correct model name
Browse files
README.md
CHANGED
@@ -64,8 +64,8 @@ input_texts = [
|
|
64 |
"sorting algorithms"
|
65 |
]
|
66 |
|
67 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
68 |
-
model = AutoModel.from_pretrained("
|
69 |
|
70 |
# Tokenize the input texts
|
71 |
batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')
|
@@ -86,7 +86,7 @@ from sentence_transformers.util import cos_sim
|
|
86 |
|
87 |
sentences = ['That is a happy person', 'That is a very happy person']
|
88 |
|
89 |
-
model = SentenceTransformer('
|
90 |
embeddings = model.encode(sentences)
|
91 |
print(cos_sim(embeddings[0], embeddings[1]))
|
92 |
```
|
|
|
64 |
"sorting algorithms"
|
65 |
]
|
66 |
|
67 |
+
tokenizer = AutoTokenizer.from_pretrained("Supabase/gte-small")
|
68 |
+
model = AutoModel.from_pretrained("Supabase/gte-small")
|
69 |
|
70 |
# Tokenize the input texts
|
71 |
batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')
|
|
|
86 |
|
87 |
sentences = ['That is a happy person', 'That is a very happy person']
|
88 |
|
89 |
+
model = SentenceTransformer('Supabase/gte-small')
|
90 |
embeddings = model.encode(sentences)
|
91 |
print(cos_sim(embeddings[0], embeddings[1]))
|
92 |
```
|