Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,22 +2,29 @@ import gradio as gr
|
|
2 |
import pdfplumber
|
3 |
from transformers import pipeline
|
4 |
|
5 |
-
#
|
6 |
-
extractor = pipeline("ner", model="
|
7 |
|
8 |
def extract_seller(pdf_file):
|
9 |
with pdfplumber.open(pdf_file) as pdf:
|
10 |
-
|
|
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
|
15 |
seller_name = None
|
16 |
|
17 |
-
for
|
18 |
-
|
19 |
-
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
return {"Sprzedawca": seller_name if seller_name else "Nie znaleziono"}
|
23 |
|
|
|
2 |
import pdfplumber
|
3 |
from transformers import pipeline
|
4 |
|
5 |
+
# L偶ejszy model NER
|
6 |
+
extractor = pipeline("ner", model="xlm-roberta-large-finetuned-conll03", aggregation_strategy="simple")
|
7 |
|
8 |
def extract_seller(pdf_file):
|
9 |
with pdfplumber.open(pdf_file) as pdf:
|
10 |
+
# Pobranie tekstu ze wszystkich stron
|
11 |
+
full_text = "\n".join(page.extract_text() for page in pdf.pages if page.extract_text())
|
12 |
|
13 |
+
# Podzia艂 na kr贸tkie fragmenty (maks. 512 znak贸w, aby model dzia艂a艂 szybciej)
|
14 |
+
chunks = [full_text[i:i+512] for i in range(0, len(full_text), 512)]
|
15 |
|
16 |
seller_name = None
|
17 |
|
18 |
+
for chunk in chunks:
|
19 |
+
entities = extractor(chunk)
|
20 |
+
|
21 |
+
for entity in entities:
|
22 |
+
if "ORG" in entity["entity_group"]: # Szukamy nazw organizacji
|
23 |
+
seller_name = entity["word"]
|
24 |
+
break # Pobieramy pierwsz膮 wykryt膮 firm臋 jako sprzedawc臋
|
25 |
+
|
26 |
+
if seller_name: # Je艣li znaleziono sprzedawc臋, przerywamy p臋tl臋
|
27 |
+
break
|
28 |
|
29 |
return {"Sprzedawca": seller_name if seller_name else "Nie znaleziono"}
|
30 |
|