Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,59 +3,19 @@ import requests
|
|
3 |
from bs4 import BeautifulSoup
|
4 |
from urllib.parse import urljoin
|
5 |
|
6 |
-
def
|
7 |
-
base_url = "https://vereine-in-deutschland.net"
|
8 |
-
all_links = []
|
9 |
|
10 |
# Konstruiere die vollständige URL
|
11 |
-
initial_url = f"{
|
12 |
|
13 |
try:
|
14 |
# Senden der Anfrage an die initiale URL
|
15 |
response = requests.get(initial_url)
|
16 |
response.raise_for_status() # Überprüfen, ob die Anfrage erfolgreich war
|
17 |
-
|
18 |
# Parse the HTML content using BeautifulSoup
|
19 |
soup = BeautifulSoup(response.content, 'html.parser')
|
20 |
|
21 |
-
# Ermittle die letzte Seite
|
22 |
-
link_element = soup.select_one('li.page-item:nth-child(8) > a:nth-child(1)')
|
23 |
-
|
24 |
-
if link_element and 'href' in link_element.attrs:
|
25 |
-
href = link_element['href']
|
26 |
-
# Extrahiere die letzten beiden Zeichen der URL
|
27 |
-
last_two_chars = href[-2:]
|
28 |
-
|
29 |
-
# Konvertiere die letzten beiden Zeichen in einen Integer
|
30 |
-
last_two_chars_int = int(last_two_chars)
|
31 |
-
else:
|
32 |
-
last_two_chars_int = 1 # Falls die letzte Seite nicht gefunden wird, nimm an, dass es nur eine Seite gibt
|
33 |
-
|
34 |
-
# Schleife durch alle Seiten und sammle Links
|
35 |
-
for page_number in range(1, last_two_chars_int + 1):
|
36 |
-
page_url = f"{base_url}/vereine/Bayern/{ort}/p/{page_number}"
|
37 |
-
response = requests.get(page_url)
|
38 |
-
response.raise_for_status()
|
39 |
-
|
40 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
41 |
-
target_div = soup.select_one('div.row-cols-1:nth-child(4)')
|
42 |
-
|
43 |
-
if target_div:
|
44 |
-
#links = [urljoin(base_url, a['href']) for a in target_div.find_all('a', href=True)]
|
45 |
-
texts = [a.text for a in target_div.find_all('a', href=True)]
|
46 |
-
#all_texts.extend(texts)
|
47 |
-
all_links.extend(texts)
|
48 |
-
else:
|
49 |
-
print(f"Target div not found on page {page_number}")
|
50 |
-
|
51 |
-
#all_links = {key: value for key, value in data.items() if value != ort}
|
52 |
-
|
53 |
-
except Exception as e:
|
54 |
-
return str(e), []
|
55 |
-
|
56 |
-
all_links = all_links[0::2]
|
57 |
-
return all_links
|
58 |
-
#return filtered_data
|
59 |
|
60 |
def scrape_links(links):
|
61 |
results = []
|
@@ -75,7 +35,7 @@ def scrape_links(links):
|
|
75 |
except Exception as e:
|
76 |
results.append((link, str(e)))
|
77 |
|
78 |
-
return
|
79 |
|
80 |
# Erstelle die Gradio-Schnittstelle
|
81 |
with gr.Blocks() as demo:
|
@@ -83,18 +43,12 @@ with gr.Blocks() as demo:
|
|
83 |
|
84 |
ort_input = gr.Textbox(label="Ort", placeholder="Gib den Namen des Ortes ein")
|
85 |
links_output = gr.JSON(label="Gefundene Vereine")
|
86 |
-
#content_output = gr.JSON(label="Inhalt der Links")
|
87 |
-
|
88 |
-
def process_ort(ort):
|
89 |
-
links = parse_links_and_content(ort)
|
90 |
-
#scraped_content = scrape_links(links)
|
91 |
-
return links
|
92 |
|
93 |
# Button zum Starten der Parsung
|
94 |
button = gr.Button("Parse und Scrape")
|
95 |
|
96 |
# Verbinde den Button mit der Funktion
|
97 |
-
button.click(fn=
|
98 |
|
99 |
# Starte die Gradio-Anwendung
|
100 |
demo.launch()
|
|
|
3 |
from bs4 import BeautifulSoup
|
4 |
from urllib.parse import urljoin
|
5 |
|
6 |
+
def parse_links(ort):
|
|
|
|
|
7 |
|
8 |
# Konstruiere die vollständige URL
|
9 |
+
initial_url = f"http://specialist-it.de:3000?q={ort}"
|
10 |
|
11 |
try:
|
12 |
# Senden der Anfrage an die initiale URL
|
13 |
response = requests.get(initial_url)
|
14 |
response.raise_for_status() # Überprüfen, ob die Anfrage erfolgreich war
|
15 |
+
print(response)
|
16 |
# Parse the HTML content using BeautifulSoup
|
17 |
soup = BeautifulSoup(response.content, 'html.parser')
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
def scrape_links(links):
|
21 |
results = []
|
|
|
35 |
except Exception as e:
|
36 |
results.append((link, str(e)))
|
37 |
|
38 |
+
return response
|
39 |
|
40 |
# Erstelle die Gradio-Schnittstelle
|
41 |
with gr.Blocks() as demo:
|
|
|
43 |
|
44 |
ort_input = gr.Textbox(label="Ort", placeholder="Gib den Namen des Ortes ein")
|
45 |
links_output = gr.JSON(label="Gefundene Vereine")
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
# Button zum Starten der Parsung
|
48 |
button = gr.Button("Parse und Scrape")
|
49 |
|
50 |
# Verbinde den Button mit der Funktion
|
51 |
+
button.click(fn=parse_links, inputs=ort_input, outputs=links_output)
|
52 |
|
53 |
# Starte die Gradio-Anwendung
|
54 |
demo.launch()
|