Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,13 @@ from urllib.parse import urljoin
|
|
6 |
def parse_links(ort):
|
7 |
|
8 |
# Konstruiere die vollständige URL
|
9 |
-
initial_url = f"http://specialist-it.de:3000?q={ort}"
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
# Parse the HTML content using BeautifulSoup
|
17 |
-
soup = BeautifulSoup(response.content, 'html.parser')
|
18 |
|
19 |
|
20 |
def scrape_links(links):
|
@@ -28,12 +26,6 @@ def scrape_links(links):
|
|
28 |
# Parse the HTML content using BeautifulSoup
|
29 |
soup = BeautifulSoup(response.content, 'html.parser')
|
30 |
|
31 |
-
# Extrahiere den gewünschten Inhalt (hier als Beispiel der Titel der Seite)
|
32 |
-
content = soup.title.string if soup.title else "No title found"
|
33 |
-
|
34 |
-
results.append((link, content))
|
35 |
-
except Exception as e:
|
36 |
-
results.append((link, str(e)))
|
37 |
|
38 |
return response
|
39 |
|
|
|
6 |
def parse_links(ort):
|
7 |
|
8 |
# Konstruiere die vollständige URL
|
9 |
+
initial_url = f"http://specialist-it.de:3000?q={ort}"
|
10 |
+
# Senden der Anfrage an die initiale URL
|
11 |
+
response = requests.get(initial_url)
|
12 |
+
response.raise_for_status() # Überprüfen, ob die Anfrage erfolgreich war
|
13 |
+
print(response)
|
14 |
+
# Parse the HTML content using BeautifulSoup
|
15 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
|
|
|
|
16 |
|
17 |
|
18 |
def scrape_links(links):
|
|
|
26 |
# Parse the HTML content using BeautifulSoup
|
27 |
soup = BeautifulSoup(response.content, 'html.parser')
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
return response
|
31 |
|