Update README.md
Browse files
README.md
CHANGED
@@ -46,7 +46,7 @@ Then you can use the model like this:
|
|
46 |
from sentence_transformers import SentenceTransformer
|
47 |
sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
|
48 |
|
49 |
-
model = SentenceTransformer('inokufu/
|
50 |
embeddings = model.encode(sentences)
|
51 |
print(embeddings)
|
52 |
```
|
@@ -72,8 +72,8 @@ def mean_pooling(model_output, attention_mask):
|
|
72 |
sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
|
73 |
|
74 |
# Load model from HuggingFace Hub
|
75 |
-
tokenizer = AutoTokenizer.from_pretrained('inokufu/
|
76 |
-
model = AutoModel.from_pretrained('inokufu/
|
77 |
|
78 |
# Tokenize sentences
|
79 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
|
|
46 |
from sentence_transformers import SentenceTransformer
|
47 |
sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
|
48 |
|
49 |
+
model = SentenceTransformer('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
|
50 |
embeddings = model.encode(sentences)
|
51 |
print(embeddings)
|
52 |
```
|
|
|
72 |
sentences = ["Apprendre le python", "Devenir expert en comptabilité"]
|
73 |
|
74 |
# Load model from HuggingFace Hub
|
75 |
+
tokenizer = AutoTokenizer.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
|
76 |
+
model = AutoModel.from_pretrained('inokufu/flaubert-base-uncased-xnli-sts-finetuned-education')
|
77 |
|
78 |
# Tokenize sentences
|
79 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|