Update README.md
Browse files
README.md
CHANGED
@@ -120,15 +120,19 @@ The model's performance is evaluated using the F1 score for NER. The predictions
|
|
120 |
## Evaluation
|
121 |
'''python
|
122 |
import torch
|
|
|
123 |
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
|
|
|
124 |
import pandas as pd
|
125 |
|
126 |
|
127 |
model_checkpoint = "MassMin/xlm-roberta-base-finetuned-panx-de" # Replace with your Hugging Face model name
|
|
|
128 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
129 |
|
130 |
|
131 |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
|
|
132 |
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint).to(device)
|
133 |
|
134 |
|
@@ -136,6 +140,7 @@ ner_pipeline = pipeline("ner", model=model, tokenizer=tokenizer, framework="pt",
|
|
136 |
|
137 |
|
138 |
def tag_text_with_pipeline(text, ner_pipeline):
|
|
|
139 |
# Use the NER pipeline to get predictions
|
140 |
results = ner_pipeline(text)
|
141 |
|
@@ -147,9 +152,12 @@ def tag_text_with_pipeline(text, ner_pipeline):
|
|
147 |
|
148 |
|
149 |
text = "Jeff Dean works at Google in California."
|
|
|
150 |
result = tag_text_with_pipeline(text, ner_pipeline)
|
|
|
151 |
print(result)
|
152 |
-
|
|
|
153 |
<!-- This section describes the evaluation protocols and provides the results. -->
|
154 |
|
155 |
### Testing Data, Factors & Metrics
|
|
|
120 |
## Evaluation
|
121 |
'''python
|
122 |
import torch
|
123 |
+
|
124 |
from transformers import AutoTokenizer, AutoModelForTokenClassification, pipeline
|
125 |
+
|
126 |
import pandas as pd
|
127 |
|
128 |
|
129 |
model_checkpoint = "MassMin/xlm-roberta-base-finetuned-panx-de" # Replace with your Hugging Face model name
|
130 |
+
|
131 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
132 |
|
133 |
|
134 |
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
|
135 |
+
|
136 |
model = AutoModelForTokenClassification.from_pretrained(model_checkpoint).to(device)
|
137 |
|
138 |
|
|
|
140 |
|
141 |
|
142 |
def tag_text_with_pipeline(text, ner_pipeline):
|
143 |
+
|
144 |
# Use the NER pipeline to get predictions
|
145 |
results = ner_pipeline(text)
|
146 |
|
|
|
152 |
|
153 |
|
154 |
text = "Jeff Dean works at Google in California."
|
155 |
+
|
156 |
result = tag_text_with_pipeline(text, ner_pipeline)
|
157 |
+
|
158 |
print(result)
|
159 |
+
|
160 |
+
|
161 |
<!-- This section describes the evaluation protocols and provides the results. -->
|
162 |
|
163 |
### Testing Data, Factors & Metrics
|