adding tokenizer; new README
Browse files- README copy.md +23 -0
- added_tokens.json +1 -0
- config copy.json +26 -0
- config_sentence_transformers.json +7 -0
- modules.json +14 -0
- pytorch_model copy.bin +3 -0
- sentence_bert_config.json +4 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- vocab.txt +0 -0
README copy.md
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: sentence-similarity
|
3 |
+
language: english
|
4 |
+
tags:
|
5 |
+
- sentence-transformers
|
6 |
+
- sentence-similarity
|
7 |
+
- transformers
|
8 |
+
---
|
9 |
+
# recobo/agri-sentence-transformer
|
10 |
+
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 512 dimensional dense vector space and can be used for tasks like clustering or semantic search.
|
11 |
+
## Usage (Sentence-Transformers)
|
12 |
+
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
|
13 |
+
```
|
14 |
+
pip install -U sentence-transformers
|
15 |
+
```
|
16 |
+
Then you can use the model like this:
|
17 |
+
```python
|
18 |
+
from sentence_transformers import SentenceTransformer
|
19 |
+
sentences = ["A man is eating food.", "A man is eating a piece of bread"]
|
20 |
+
|
21 |
+
model = SentenceTransformer('recobo/agri-sentence-transformer')
|
22 |
+
embeddings = model.encode(sentences)
|
23 |
+
print(embeddings)
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"mycoplasma": 31185, "##ocarpus": 31144, "##iformis": 31173, "##ipes": 31126, "##lad": 31166, "##eus": 31170, "solanum": 31137, "##ophora": 31139, "##ospora": 31156, "##emat": 31099, "##eles": 31172, "##rys": 31167, "##ifolia": 31121, "##eud": 31175, "##ifol": 31138, "##ifl": 31162, "##aet": 31115, "##erp": 31110, "##aem": 31186, "gla": 31182, "brev": 31131, "sacchar": 31124, "##iaceae": 31108, "##chus": 31100, "##ylla": 31200, "##ifolium": 31195, "##ymn": 31104, "##roch": 31189, "##nus": 31194, "##iidae": 31136, "##elia": 31154, "##iella": 31197, "##aen": 31095, "elymus": 31168, "mult": 31161, "##obacillus": 31096, "##iflora": 31192, "##ogl": 31169, "lyc": 31132, "##ophyllum": 31141, "##ylus": 31112, "phyll": 31177, "##erb": 31187, "##apon": 31181, "##ichth": 31130, "dia": 31183, "pter": 31188, "##omys": 31118, "##otrich": 31113, "##occus": 31158, "##iridae": 31147, "##tus": 31128, "desulf": 31098, "##ichthys": 31106, "##acch": 31174, "##yph": 31103, "##ellus": 31129, "japon": 31184, "##oplasma": 31105, "##ocer": 31107, "##mun": 31111, "##ilis": 31203, "##esia": 31176, "##acillus": 31191, "##iat": 31109, "##ulata": 31149, "##eat": 31134, "##othr": 31190, "##iales": 31178, "prun": 31122, "##inensis": 31153, "##cul": 31143, "##esc": 31133, "##olyt": 31196, "##ephal": 31180, "##ucc": 31146, "##eria": 31142, "##ivirus": 31201, "echin": 31163, "##ilus": 31092, "##ocarp": 31164, "festuca": 31116, "##eaf": 31093, "##erus": 31155, "##onii": 31140, "##ibrio": 31120, "##icola": 31094, "##arv": 31090, "##escens": 31157, "##ymus": 31127, "##iformes": 31125, "##icult": 31199, "##anthus": 31114, "##ansfer": 31179, "##erpes": 31101, "##ectr": 31152, "##eph": 31198, "streptomyces": 31119, "brassica": 31102, "##ellia": 31165, "##unct": 31145, "##ioides": 31097, "##iensis": 31193, "##eae": 31117, "##phaer": 31123, "##icum": 31151, "[pad": 31159, "##eum": 31150, "##inia": 31160, "##omus": 31091, "##ocephal": 31148, "##erma": 31135, "##otus": 31202, "##alus": 31171}
|
config copy.json
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "output/models/final",
|
3 |
+
"architectures": [
|
4 |
+
"BertModel"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"classifier_dropout": null,
|
8 |
+
"gradient_checkpointing": false,
|
9 |
+
"hidden_act": "gelu",
|
10 |
+
"hidden_dropout_prob": 0.1,
|
11 |
+
"hidden_size": 768,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"intermediate_size": 3072,
|
14 |
+
"layer_norm_eps": 1e-12,
|
15 |
+
"max_position_embeddings": 512,
|
16 |
+
"model_type": "bert",
|
17 |
+
"num_attention_heads": 12,
|
18 |
+
"num_hidden_layers": 12,
|
19 |
+
"pad_token_id": 0,
|
20 |
+
"position_embedding_type": "absolute",
|
21 |
+
"torch_dtype": "float32",
|
22 |
+
"transformers_version": "4.13.0.dev0",
|
23 |
+
"type_vocab_size": 2,
|
24 |
+
"use_cache": true,
|
25 |
+
"vocab_size": 31204
|
26 |
+
}
|
config_sentence_transformers.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"__version__": {
|
3 |
+
"sentence_transformers": "2.1.0",
|
4 |
+
"transformers": "4.13.0.dev0",
|
5 |
+
"pytorch": "1.10.0"
|
6 |
+
}
|
7 |
+
}
|
modules.json
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"idx": 0,
|
4 |
+
"name": "0",
|
5 |
+
"path": "",
|
6 |
+
"type": "sentence_transformers.models.Transformer"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"idx": 1,
|
10 |
+
"name": "1",
|
11 |
+
"path": "1_Pooling",
|
12 |
+
"type": "sentence_transformers.models.Pooling"
|
13 |
+
}
|
14 |
+
]
|
pytorch_model copy.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f6e02e18394e0f2dcfcee9b9772b4ff67f78d94a0dcf41b707e922844f7ad793
|
3 |
+
size 440105393
|
sentence_bert_config.json
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"max_seq_length": 300,
|
3 |
+
"do_lower_case": false
|
4 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "do_basic_tokenize": true, "never_split": null, "special_tokens_map_file": null, "name_or_path": "output/models/final", "tokenizer_class": "BertTokenizer"}
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|