saraleivam commited on
Commit
0f16d34
1 Parent(s): b27d183

Add new SentenceTransformer model.

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
37
+ unigram.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 384,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": false,
9
+ "include_prompt": true
10
+ }
README.md ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
3
+ datasets: []
4
+ language: []
5
+ library_name: sentence-transformers
6
+ pipeline_tag: sentence-similarity
7
+ tags:
8
+ - sentence-transformers
9
+ - sentence-similarity
10
+ - feature-extraction
11
+ - generated_from_trainer
12
+ - dataset_size:500
13
+ - loss:SoftmaxLoss
14
+ widget:
15
+ - source_sentence: Reportando a Mánager ventasLograr un crecimiento sostenible de
16
+ los ingresos mediante la negociación, cierre, implementación y cumplimiento de
17
+ acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura
18
+ de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de
19
+ la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea
20
+ de flotas y Camiones.
21
+ sentences:
22
+ - Modernize Infrastructure and Applications with Google Cloud.Data Science.Business
23
+ Strategy.Understand the role that cloud modernization and migration plays in an
24
+ organization's digital transformation.. Examine available options to run compute
25
+ workloads in the cloud.. Explore the advantages of using containers, serverless
26
+ computing, and APIs in application modernization.. Learn about the business reasons
27
+ to choose hybrid or multi-cloud strategies, and how GKE Enterprise can help support
28
+ these strategies.
29
+ - Microsoft 365 Copilot for Leaders.Data Science.Machine Learning.Risk Management
30
+ - 'Decoding AI: A Deep Dive into AI Models and Predictions.Data Science.Machine
31
+ Learning.Learn key concepts and terminology in artificial intelligence (AI), including
32
+ machine learning, generative AI, and deep learning . Learn the core components
33
+ of machine learning systems, including data, models, and evaluation techniques.
34
+ Recognize why AI systems can fail and identify the kinds of work required to make
35
+ useful technology. Identify common pitfalls in conversations about AI and recognize
36
+ conflicts of interest when interpreting claims about AI systems'
37
+ - source_sentence: Reportando a Mánager ventasLograr un crecimiento sostenible de
38
+ los ingresos mediante la negociación, cierre, implementación y cumplimiento de
39
+ acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura
40
+ de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de
41
+ la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea
42
+ de flotas y Camiones.
43
+ sentences:
44
+ - 'Getting Started with BigQuery Machine Learning.Data Science.Cloud Computing.How
45
+ to create, evaluate and use machine learning models in BigQuery. '
46
+ - Convolutional Neural Networks.Data Science.Machine Learning.Artificial Neural
47
+ Networks, Computer Vision, Machine Learning, Applied Machine Learning, Deep Learning,
48
+ Machine Learning Software, Machine Learning Algorithms, Network Model, Tensorflow,
49
+ Network Architecture, Human Learning
50
+ - 'Understanding Plants - Part II: Fundamentals of Plant Biology.Data Science.Basic
51
+ Science.Understanding Plants - Part II: Fundamentals of Plant Biology'
52
+ - source_sentence: Reportando a Mánager ventasLograr un crecimiento sostenible de
53
+ los ingresos mediante la negociación, cierre, implementación y cumplimiento de
54
+ acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura
55
+ de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de
56
+ la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea
57
+ de flotas y Camiones.
58
+ sentences:
59
+ - Introduction to Computer Science and Programming.Data Science.Software Development.1.
60
+ Use the Javascript language to create interactive programs in the browser with
61
+ 2D graphics.. 2. Convert between number bases, work with modular arithmetic, sequences
62
+ and series and plot graphs.. 3. Develop and use mental models to describe the
63
+ workings of a range of computer systems.
64
+ - Programming Languages, Part A.Data Science.Software Development.Computer Programming,
65
+ Programming Principles, Algorithms, Critical Thinking
66
+ - 'Global Health Innovations.Data Science.Public Health.Describe the principles
67
+ and key types of innovation in order to characterise the fundamental features
68
+ of new models of care and technologies. Compare and contrast systems that support
69
+ the development, investment, and protection of healthcare innovation to navigate
70
+ the innovation journey. Evaluate key factors influencing the adoption and scaling
71
+ of different healthcare innovations, and examine the reasons why some innovations
72
+ fail . Critique a particular innovation, using a given framework, in order to
73
+ make a recommendation to a panel of decision makers. '
74
+ - source_sentence: Reportando a Mánager ventasLograr un crecimiento sostenible de
75
+ los ingresos mediante la negociación, cierre, implementación y cumplimiento de
76
+ acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura
77
+ de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de
78
+ la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea
79
+ de flotas y Camiones.
80
+ sentences:
81
+ - Development of Secure Embedded Systems.Data Science.Computer Security and Networks.Operating
82
+ Systems, Systems Design, Computer Programming, System Software, Computer Architecture,
83
+ Computer Networking, C Programming Language Family, Computer Programming Tools,
84
+ Hardware Design, Networking Hardware, System Programming, Theoretical Computer
85
+ Science, Algorithms
86
+ - GST - Genesis and imposition!.Data Science.Finance.Explain the genesis of GST,
87
+ the need for its introduction and the Constitutional and legal framework under
88
+ which it was introduced. . Identify and describe different forms of supplies
89
+ of goods and services, deemed supplies and transactions excluded from the scope
90
+ of supply.. Differentiate various types of supplies and identify whether a supply
91
+ is inter-State or intra-State, exempt or composite supply.. Critically analyse
92
+ whether a given transaction is a supply and define the nature of supply.
93
+ - 'AI for Project Managers and Scrum Masters.Data Science.Business Essentials.Identify
94
+ key elements of AI for Project Management . Evaluate AI Tools and Techniques for
95
+ Projects . Integrate AI into Project Lifecycles '
96
+ - source_sentence: Reportando a Mánager ventasLograr un crecimiento sostenible de
97
+ los ingresos mediante la negociación, cierre, implementación y cumplimiento de
98
+ acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura
99
+ de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de
100
+ la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea
101
+ de flotas y Camiones.
102
+ sentences:
103
+ - Introduction to Data Science and scikit-learn in Python.Data Science.Data Analysis.Employ
104
+ artificial intelligence techniques to test hypothesis in Python. Apply a machine
105
+ learning model combining Numpy, Pandas, and Scikit-Learn
106
+ - 'Planejamento de projetos: Como reunir tudo.Data Science.Leadership and Management.Descrever
107
+ os componentes da fase de planejamento e a significância deles.. Identificar ferramentas
108
+ e práticas recomendadas para criar um plano de projeto e um plano de gestão de
109
+ riscos. . Descrever como estimar, acompanhar e manter um orçamento.. Elaborar
110
+ um plano de comunicação e explicar como gerenciá-lo.'
111
+ - Microsoft 365 Copilot for Leaders.Data Science.Machine Learning.Risk Management
112
+ ---
113
+
114
+ # SentenceTransformer based on sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2
115
+
116
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
117
+
118
+ ## Model Details
119
+
120
+ ### Model Description
121
+ - **Model Type:** Sentence Transformer
122
+ - **Base model:** [sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2) <!-- at revision bf3bf13ab40c3157080a7ab344c831b9ad18b5eb -->
123
+ - **Maximum Sequence Length:** 128 tokens
124
+ - **Output Dimensionality:** 384 tokens
125
+ - **Similarity Function:** Cosine Similarity
126
+ <!-- - **Training Dataset:** Unknown -->
127
+ <!-- - **Language:** Unknown -->
128
+ <!-- - **License:** Unknown -->
129
+
130
+ ### Model Sources
131
+
132
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
133
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
134
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
135
+
136
+ ### Full Model Architecture
137
+
138
+ ```
139
+ SentenceTransformer(
140
+ (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
141
+ (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})
142
+ )
143
+ ```
144
+
145
+ ## Usage
146
+
147
+ ### Direct Usage (Sentence Transformers)
148
+
149
+ First install the Sentence Transformers library:
150
+
151
+ ```bash
152
+ pip install -U sentence-transformers
153
+ ```
154
+
155
+ Then you can load this model and run inference.
156
+ ```python
157
+ from sentence_transformers import SentenceTransformer
158
+
159
+ # Download from the 🤗 Hub
160
+ model = SentenceTransformer("saraleivam/GURU-paraphrase-multilingual-MiniLM-L12-v2")
161
+ # Run inference
162
+ sentences = [
163
+ 'Reportando a Mánager ventasLograr un crecimiento sostenible de los ingresos mediante la negociación, cierre, implementación y cumplimiento de acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea de flotas y Camiones.',
164
+ 'Introduction to Data Science and scikit-learn in Python.Data Science.Data Analysis.Employ artificial intelligence techniques to test hypothesis in Python. Apply a machine learning model combining Numpy, Pandas, and Scikit-Learn',
165
+ 'Planejamento de projetos: Como reunir tudo.Data Science.Leadership and Management.Descrever os componentes da fase de planejamento e a significância deles.. Identificar ferramentas e práticas recomendadas para criar um plano de projeto e um plano de gestão de riscos. . Descrever como estimar, acompanhar e manter um orçamento.. Elaborar um plano de comunicação e explicar como gerenciá-lo.',
166
+ ]
167
+ embeddings = model.encode(sentences)
168
+ print(embeddings.shape)
169
+ # [3, 384]
170
+
171
+ # Get the similarity scores for the embeddings
172
+ similarities = model.similarity(embeddings, embeddings)
173
+ print(similarities.shape)
174
+ # [3, 3]
175
+ ```
176
+
177
+ <!--
178
+ ### Direct Usage (Transformers)
179
+
180
+ <details><summary>Click to see the direct usage in Transformers</summary>
181
+
182
+ </details>
183
+ -->
184
+
185
+ <!--
186
+ ### Downstream Usage (Sentence Transformers)
187
+
188
+ You can finetune this model on your own dataset.
189
+
190
+ <details><summary>Click to expand</summary>
191
+
192
+ </details>
193
+ -->
194
+
195
+ <!--
196
+ ### Out-of-Scope Use
197
+
198
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
199
+ -->
200
+
201
+ <!--
202
+ ## Bias, Risks and Limitations
203
+
204
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
205
+ -->
206
+
207
+ <!--
208
+ ### Recommendations
209
+
210
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
211
+ -->
212
+
213
+ ## Training Details
214
+
215
+ ### Training Dataset
216
+
217
+ #### Unnamed Dataset
218
+
219
+
220
+ * Size: 500 training samples
221
+ * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>label</code>
222
+ * Approximate statistics based on the first 1000 samples:
223
+ | | sentence1 | sentence2 | label |
224
+ |:--------|:----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:-------------------------------------------------------------------|
225
+ | type | string | string | int |
226
+ | details | <ul><li>min: 85 tokens</li><li>mean: 85.0 tokens</li><li>max: 85 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 65.22 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>0: ~10.80%</li><li>1: ~13.20%</li><li>2: ~76.00%</li></ul> |
227
+ * Samples:
228
+ | sentence1 | sentence2 | label |
229
+ |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------|
230
+ | <code>Reportando a Mánager ventasLograr un crecimiento sostenible de los ingresos mediante la negociación, cierre, implementación y cumplimiento de acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea de flotas y Camiones.</code> | <code>Launching Your Music Career.Data Science.Music and Art.Articulate your Unique Selling Proposition.. Use the Business Model Canvas to determine the core functions required to effectively manage your portfolio career.. Complete a comprehensive growth and recruitment plan for your teaching studio and identify the competitive landscape.. Seek out and book performance opportunities in a variety of settings.</code> | <code>2</code> |
231
+ | <code>Reportando a Mánager ventasLograr un crecimiento sostenible de los ingresos mediante la negociación, cierre, implementación y cumplimiento de acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea de flotas y Camiones.</code> | <code>Robotics.Data Science.Electrical Engineering.Motion Planning. Matlab. Estimation</code> | <code>2</code> |
232
+ | <code>Reportando a Mánager ventasLograr un crecimiento sostenible de los ingresos mediante la negociación, cierre, implementación y cumplimiento de acuerdos con los diferentes clientes.Encargado de realizar la búsqueda y apertura de nuevos clientes a nivel LATAM . Entender requerimientos y saber asesorar de la mejor manera para un buen cierre de negocio. Alto conocimiento en la línea de flotas y Camiones.</code> | <code>Core Java.Data Science.Software Development.Learn the basic syntax and functions of the Java programming language. Apply object-oriented programming techniques to building classes, creating objects, and understanding how solutions are packaged in Java.. Learn how to implement inheritance and polymorphism in Java.. Use selected parts of the vast Java SE class library to enhance your Java programming techniques.</code> | <code>2</code> |
233
+ * Loss: [<code>SoftmaxLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#softmaxloss)
234
+
235
+ ### Training Hyperparameters
236
+
237
+ #### All Hyperparameters
238
+ <details><summary>Click to expand</summary>
239
+
240
+ - `overwrite_output_dir`: False
241
+ - `do_predict`: False
242
+ - `eval_strategy`: no
243
+ - `prediction_loss_only`: True
244
+ - `per_device_train_batch_size`: 8
245
+ - `per_device_eval_batch_size`: 8
246
+ - `per_gpu_train_batch_size`: None
247
+ - `per_gpu_eval_batch_size`: None
248
+ - `gradient_accumulation_steps`: 1
249
+ - `eval_accumulation_steps`: None
250
+ - `learning_rate`: 5e-05
251
+ - `weight_decay`: 0.0
252
+ - `adam_beta1`: 0.9
253
+ - `adam_beta2`: 0.999
254
+ - `adam_epsilon`: 1e-08
255
+ - `max_grad_norm`: 1.0
256
+ - `num_train_epochs`: 3.0
257
+ - `max_steps`: -1
258
+ - `lr_scheduler_type`: linear
259
+ - `lr_scheduler_kwargs`: {}
260
+ - `warmup_ratio`: 0.0
261
+ - `warmup_steps`: 0
262
+ - `log_level`: passive
263
+ - `log_level_replica`: warning
264
+ - `log_on_each_node`: True
265
+ - `logging_nan_inf_filter`: True
266
+ - `save_safetensors`: True
267
+ - `save_on_each_node`: False
268
+ - `save_only_model`: False
269
+ - `restore_callback_states_from_checkpoint`: False
270
+ - `no_cuda`: False
271
+ - `use_cpu`: False
272
+ - `use_mps_device`: False
273
+ - `seed`: 42
274
+ - `data_seed`: None
275
+ - `jit_mode_eval`: False
276
+ - `use_ipex`: False
277
+ - `bf16`: False
278
+ - `fp16`: False
279
+ - `fp16_opt_level`: O1
280
+ - `half_precision_backend`: auto
281
+ - `bf16_full_eval`: False
282
+ - `fp16_full_eval`: False
283
+ - `tf32`: None
284
+ - `local_rank`: 0
285
+ - `ddp_backend`: None
286
+ - `tpu_num_cores`: None
287
+ - `tpu_metrics_debug`: False
288
+ - `debug`: []
289
+ - `dataloader_drop_last`: False
290
+ - `dataloader_num_workers`: 0
291
+ - `dataloader_prefetch_factor`: None
292
+ - `past_index`: -1
293
+ - `disable_tqdm`: False
294
+ - `remove_unused_columns`: True
295
+ - `label_names`: None
296
+ - `load_best_model_at_end`: False
297
+ - `ignore_data_skip`: False
298
+ - `fsdp`: []
299
+ - `fsdp_min_num_params`: 0
300
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
301
+ - `fsdp_transformer_layer_cls_to_wrap`: None
302
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
303
+ - `deepspeed`: None
304
+ - `label_smoothing_factor`: 0.0
305
+ - `optim`: adamw_torch
306
+ - `optim_args`: None
307
+ - `adafactor`: False
308
+ - `group_by_length`: False
309
+ - `length_column_name`: length
310
+ - `ddp_find_unused_parameters`: None
311
+ - `ddp_bucket_cap_mb`: None
312
+ - `ddp_broadcast_buffers`: False
313
+ - `dataloader_pin_memory`: True
314
+ - `dataloader_persistent_workers`: False
315
+ - `skip_memory_metrics`: True
316
+ - `use_legacy_prediction_loop`: False
317
+ - `push_to_hub`: False
318
+ - `resume_from_checkpoint`: None
319
+ - `hub_model_id`: None
320
+ - `hub_strategy`: every_save
321
+ - `hub_private_repo`: False
322
+ - `hub_always_push`: False
323
+ - `gradient_checkpointing`: False
324
+ - `gradient_checkpointing_kwargs`: None
325
+ - `include_inputs_for_metrics`: False
326
+ - `eval_do_concat_batches`: True
327
+ - `fp16_backend`: auto
328
+ - `push_to_hub_model_id`: None
329
+ - `push_to_hub_organization`: None
330
+ - `mp_parameters`:
331
+ - `auto_find_batch_size`: False
332
+ - `full_determinism`: False
333
+ - `torchdynamo`: None
334
+ - `ray_scope`: last
335
+ - `ddp_timeout`: 1800
336
+ - `torch_compile`: False
337
+ - `torch_compile_backend`: None
338
+ - `torch_compile_mode`: None
339
+ - `dispatch_batches`: None
340
+ - `split_batches`: None
341
+ - `include_tokens_per_second`: False
342
+ - `include_num_input_tokens_seen`: False
343
+ - `neftune_noise_alpha`: None
344
+ - `optim_target_modules`: None
345
+ - `batch_eval_metrics`: False
346
+ - `batch_sampler`: batch_sampler
347
+ - `multi_dataset_batch_sampler`: proportional
348
+
349
+ </details>
350
+
351
+ ### Framework Versions
352
+ - Python: 3.10.12
353
+ - Sentence Transformers: 3.0.1
354
+ - Transformers: 4.41.2
355
+ - PyTorch: 2.3.1+cu121
356
+ - Accelerate: 0.31.0
357
+ - Datasets: 2.20.0
358
+ - Tokenizers: 0.19.1
359
+
360
+ ## Citation
361
+
362
+ ### BibTeX
363
+
364
+ #### Sentence Transformers and SoftmaxLoss
365
+ ```bibtex
366
+ @inproceedings{reimers-2019-sentence-bert,
367
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
368
+ author = "Reimers, Nils and Gurevych, Iryna",
369
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
370
+ month = "11",
371
+ year = "2019",
372
+ publisher = "Association for Computational Linguistics",
373
+ url = "https://arxiv.org/abs/1908.10084",
374
+ }
375
+ ```
376
+
377
+ <!--
378
+ ## Glossary
379
+
380
+ *Clearly define terms in order to be accessible across audiences.*
381
+ -->
382
+
383
+ <!--
384
+ ## Model Card Authors
385
+
386
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
387
+ -->
388
+
389
+ <!--
390
+ ## Model Card Contact
391
+
392
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
393
+ -->
config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 384,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 1536,
14
+ "layer_norm_eps": 1e-12,
15
+ "max_position_embeddings": 512,
16
+ "model_type": "bert",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 0,
20
+ "position_embedding_type": "absolute",
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.41.2",
23
+ "type_vocab_size": 2,
24
+ "use_cache": true,
25
+ "vocab_size": 250037
26
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "3.0.1",
4
+ "transformers": "4.41.2",
5
+ "pytorch": "2.3.1+cu121"
6
+ },
7
+ "prompts": {},
8
+ "default_prompt_name": null,
9
+ "similarity_fn_name": null
10
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cbd1427fc0396967b0b66afacc8b5f1d8f40ccef785dd2429563f721fc98ade
3
+ size 470637416
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cad551d5600a84242d0973327029452a1e3672ba6313c2a3c3d69c4310e12719
3
+ size 17082987
tokenizer_config.json ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "<s>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "<pad>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "</s>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "250001": {
36
+ "content": "<mask>",
37
+ "lstrip": true,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "bos_token": "<s>",
45
+ "clean_up_tokenization_spaces": true,
46
+ "cls_token": "<s>",
47
+ "do_lower_case": true,
48
+ "eos_token": "</s>",
49
+ "mask_token": "<mask>",
50
+ "max_length": 128,
51
+ "model_max_length": 128,
52
+ "pad_to_multiple_of": null,
53
+ "pad_token": "<pad>",
54
+ "pad_token_type_id": 0,
55
+ "padding_side": "right",
56
+ "sep_token": "</s>",
57
+ "stride": 0,
58
+ "strip_accents": null,
59
+ "tokenize_chinese_chars": true,
60
+ "tokenizer_class": "BertTokenizer",
61
+ "truncation_side": "right",
62
+ "truncation_strategy": "longest_first",
63
+ "unk_token": "<unk>"
64
+ }
unigram.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da145b5e7700ae40f16691ec32a0b1fdc1ee3298db22a31ea55f57a966c4a65d
3
+ size 14763260