Update README.md
Browse files
README.md
CHANGED
@@ -5,9 +5,11 @@ language:
|
|
5 |
---
|
6 |
# Structure Extraction Model by NuMind 🔥
|
7 |
|
8 |
-
NuExtract_tiny is a
|
9 |
|
10 |
-
Note: This model
|
|
|
|
|
11 |
|
12 |
We also provide a base (3.8B) and large(7B) version of this model: [NuExtract](https://huggingface.co/numind/NuExtract) and [NuExtract-large](https://huggingface.co/numind/NuExtract-large)
|
13 |
|
@@ -26,7 +28,7 @@ import json
|
|
26 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
27 |
|
28 |
|
29 |
-
def predict_NuExtract(model,tokenizer,text, schema,example
|
30 |
schema = json.dumps(json.loads(schema), indent=4)
|
31 |
input_llm = "<|input|>\n### Template:\n" + schema + "\n"
|
32 |
for i in example:
|
@@ -34,7 +36,7 @@ def predict_NuExtract(model,tokenizer,text, schema,example = ["","",""]):
|
|
34 |
input_llm += "### Example:\n"+ json.dumps(json.loads(i), indent=4)+"\n"
|
35 |
|
36 |
input_llm += "### Text:\n"+text +"\n<|output|>\n"
|
37 |
-
input_ids = tokenizer(input_llm, return_tensors="pt",truncation
|
38 |
|
39 |
output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
|
40 |
return output.split("<|output|>")[1].split("<|end-output|>")[0]
|
@@ -72,7 +74,7 @@ schema = """{
|
|
72 |
}
|
73 |
}"""
|
74 |
|
75 |
-
prediction = predict_NuExtract(model,tokenizer,text, schema,example
|
76 |
print(prediction)
|
77 |
|
78 |
```
|
|
|
5 |
---
|
6 |
# Structure Extraction Model by NuMind 🔥
|
7 |
|
8 |
+
NuExtract_tiny is a version of [Qwen1.5-0.5](https://huggingface.co/Qwen/Qwen1.5-0.5B), fine-tuned on a private high-quality synthetic dataset for information extraction. To use the model, provide an input text (less than 2000 tokens) and a JSON template describing the information you need to extract.
|
9 |
|
10 |
+
Note: This model is purely extractive, so all text output by the model is present as is in the original text. You can also provide an example of output formatting to help the model understand your task more precisely.
|
11 |
+
|
12 |
+
Note: While this model provides good 0 shot performance, it is intendeed to be fine-tune on a specific task (>=30 examples).
|
13 |
|
14 |
We also provide a base (3.8B) and large(7B) version of this model: [NuExtract](https://huggingface.co/numind/NuExtract) and [NuExtract-large](https://huggingface.co/numind/NuExtract-large)
|
15 |
|
|
|
28 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
29 |
|
30 |
|
31 |
+
def predict_NuExtract(model, tokenizer, text, schema, example=["","",""]):
|
32 |
schema = json.dumps(json.loads(schema), indent=4)
|
33 |
input_llm = "<|input|>\n### Template:\n" + schema + "\n"
|
34 |
for i in example:
|
|
|
36 |
input_llm += "### Example:\n"+ json.dumps(json.loads(i), indent=4)+"\n"
|
37 |
|
38 |
input_llm += "### Text:\n"+text +"\n<|output|>\n"
|
39 |
+
input_ids = tokenizer(input_llm, return_tensors="pt", truncation=True, max_length=4000).to("cuda")
|
40 |
|
41 |
output = tokenizer.decode(model.generate(**input_ids)[0], skip_special_tokens=True)
|
42 |
return output.split("<|output|>")[1].split("<|end-output|>")[0]
|
|
|
74 |
}
|
75 |
}"""
|
76 |
|
77 |
+
prediction = predict_NuExtract(model, tokenizer, text, schema, example=["","",""])
|
78 |
print(prediction)
|
79 |
|
80 |
```
|