Update README.md
Browse files
README.md
CHANGED
@@ -12,7 +12,7 @@ metrics:
|
|
12 |
- accuracy
|
13 |
---
|
14 |
|
15 |
-
# Model Card for
|
16 |
|
17 |
This is a ReactionT5 pre-trained to predict the products of reactions. You can use the demo [here](https://huggingface.co/spaces/sagawa/ReactionT5_task_forward).
|
18 |
|
@@ -38,8 +38,8 @@ Use the code below to get started with the model.
|
|
38 |
```python
|
39 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
40 |
|
41 |
-
tokenizer = AutoTokenizer.from_pretrained("sagawa/
|
42 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("sagawa/
|
43 |
|
44 |
inp = tokenizer('REACTANT:COC(=O)C1=CCCN(C)C1.O.[Al+3].[H-].[Li+].[Na+].[OH-]REAGENT:C1CCOC1', return_tensors='pt')
|
45 |
output = model.generate(**inp, num_beams=1, num_return_sequences=1, return_dict_in_generate=True, output_scores=True)
|
|
|
12 |
- accuracy
|
13 |
---
|
14 |
|
15 |
+
# Model Card for ReactionT5v2-forward
|
16 |
|
17 |
This is a ReactionT5 pre-trained to predict the products of reactions. You can use the demo [here](https://huggingface.co/spaces/sagawa/ReactionT5_task_forward).
|
18 |
|
|
|
38 |
```python
|
39 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
40 |
|
41 |
+
tokenizer = AutoTokenizer.from_pretrained("sagawa/ReactionT5v2-forward", return_tensors="pt")
|
42 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("sagawa/ReactionT5v2-forward")
|
43 |
|
44 |
inp = tokenizer('REACTANT:COC(=O)C1=CCCN(C)C1.O.[Al+3].[H-].[Li+].[Na+].[OH-]REAGENT:C1CCOC1', return_tensors='pt')
|
45 |
output = model.generate(**inp, num_beams=1, num_return_sequences=1, return_dict_in_generate=True, output_scores=True)
|