Spaces:
Sleeping
Sleeping
Commit
·
c9beefa
1
Parent(s):
302a390
test cv extraction
Browse files
app.py
CHANGED
@@ -97,10 +97,10 @@ def LLM_Inference(cv_text):
|
|
97 |
|
98 |
Do not explain, comment or make up any more information that is not relative to the list of Information extraction. Respond in Vietnamese. Let's work this out in a step by step way to ensure the correct answer. [END].
|
99 |
'''
|
100 |
-
inputs = tokenizer(text, return_tensors='pt').to(device)
|
101 |
with torch.no_grad():
|
102 |
outputs = model.generate(
|
103 |
-
**inputs, max_new_tokens=
|
104 |
)
|
105 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
106 |
|
|
|
97 |
|
98 |
Do not explain, comment or make up any more information that is not relative to the list of Information extraction. Respond in Vietnamese. Let's work this out in a step by step way to ensure the correct answer. [END].
|
99 |
'''
|
100 |
+
inputs = tokenizer(text, return_tensors='pt', max_length=2048,truncation=True).to(device)
|
101 |
with torch.no_grad():
|
102 |
outputs = model.generate(
|
103 |
+
**inputs, max_new_tokens=1024, pad_token_id = tokenizer.eos_token_id
|
104 |
)
|
105 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
106 |
|