Update README.md
Browse files
README.md
CHANGED
@@ -75,7 +75,7 @@ If you want to use flash attention or increase sequence length, please, check th
|
|
75 |
from gliner import GLiNER
|
76 |
import torch
|
77 |
|
78 |
-
model = GLiNER.from_pretrained("knowledgator/gliner-
|
79 |
_attn_implementation = 'flash_attention_2',
|
80 |
max_len = 2048).to('cuda:0', dtype=torch.float16)
|
81 |
```
|
|
|
75 |
from gliner import GLiNER
|
76 |
import torch
|
77 |
|
78 |
+
model = GLiNER.from_pretrained("knowledgator/gliner-bi-llama-v1.0",
|
79 |
_attn_implementation = 'flash_attention_2',
|
80 |
max_len = 2048).to('cuda:0', dtype=torch.float16)
|
81 |
```
|