fpurl / app.py
joermd's picture
Update app.py
da60d2c verified
raw
history blame
6.81 kB
import socket
import requests
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import streamlit as st
import matplotlib.pyplot as plt
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
import geoip2.database
def analyze_ip_free(url):
"""
Analyze IP address and geolocation of a given URL
Uses GeoLite2 database to retrieve location information
Args:
url (str): Website URL to analyze
Returns:
dict: IP and location details or error information
"""
try:
domain = urlparse(url).netloc
ip = socket.gethostbyname(domain)
with geoip2.database.Reader('GeoLite2-City.mmdb') as reader:
response = reader.city(ip)
return {
"ip": ip,
"city": response.city.name or "Unknown",
"region": response.subdivisions.most_specific.name or "Unknown",
"country": response.country.name or "Unknown",
"latitude": response.location.latitude or "Unknown",
"longitude": response.location.longitude or "Unknown",
}
except Exception as e:
return {"error": str(e)}
def analyze_uptime_free(url):
"""
Check website availability and response status
Args:
url (str): Website URL to check
Returns:
dict: Uptime status and status code
"""
try:
response = requests.get(url, timeout=5)
return {
"status": "Up" if response.status_code == 200 else "Down",
"status_code": response.status_code,
}
except requests.exceptions.RequestException as e:
return {"status": "Down", "error": str(e)}
def analyze_seo_free(url):
"""
Extract basic SEO information from the website
Args:
url (str): Website URL to analyze
Returns:
dict: SEO-related metadata
"""
try:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
title = soup.title.string if soup.title else "No Title"
meta_description = soup.find("meta", attrs={"name": "description"})
keywords = soup.find("meta", attrs={"name": "keywords"})
return {
"title": title,
"meta_description": meta_description["content"] if meta_description else "No Description",
"keywords": keywords["content"] if keywords else "No Keywords",
}
except Exception as e:
return {"error": str(e)}
def analyze_carbon_free(url):
"""
Estimate website's carbon footprint based on page size
Args:
url (str): Website URL to analyze
Returns:
dict: Page size and estimated CO2 emissions
"""
try:
response = requests.get(url)
page_size = len(response.content) / 1024 # in kilobytes
co2_estimation = page_size * 0.02 # rough CO2 emission estimate
return {
"page_size_kb": round(page_size, 2),
"estimated_co2_g": round(co2_estimation, 2),
}
except Exception as e:
return {"error": str(e)}
def draw_bar_chart(data, title, xlabel, ylabel):
"""
Create a bar chart visualization
Args:
data (dict): Data to visualize
title (str): Chart title
xlabel (str): X-axis label
ylabel (str): Y-axis label
"""
keys, values = list(data.keys()), list(data.values())
plt.figure(figsize=(8, 5))
plt.bar(keys, values, color='skyblue')
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.tight_layout()
plt.savefig('chart.png')
plt.show()
def export_to_pdf_free(results, file_path):
"""
Export analysis results to a PDF report
Args:
results (dict): Analysis results
file_path (str): Path to save PDF
"""
c = canvas.Canvas(file_path, pagesize=letter)
c.drawString(30, 750, "Website Analysis Report")
c.drawString(30, 730, "=" * 50)
y = 700
for section, content in results.items():
c.drawString(30, y, f"{section}:")
y -= 20
for key, value in content.items():
c.drawString(50, y, f"- {key}: {value}")
y -= 20
y -= 20
c.save()
def main():
"""
Main Streamlit application for website analysis
"""
st.title("أداة تحليل المواقع")
st.write("تحليل شامل للمواقع باستخدام أدوات مجانية")
# URL input
url = st.text_input("أدخل رابط الموقع:", "https://example.com")
if url:
# IP Analysis
st.subheader("1. تحليل عنوان IP والموقع الجغرافي")
ip_data = analyze_ip_free(url)
if "error" in ip_data:
st.error(ip_data["error"])
else:
st.json(ip_data)
# Uptime Analysis
st.subheader("2. تحليل توافر الموقع")
uptime_data = analyze_uptime_free(url)
if "error" in uptime_data:
st.error(uptime_data["error"])
else:
st.json(uptime_data)
# SEO Analysis
st.subheader("3. تحليل تحسين محركات البحث (SEO)")
seo_data = analyze_seo_free(url)
if "error" in seo_data:
st.error(seo_data["error"])
else:
st.json(seo_data)
# Carbon Analysis
st.subheader("4. تحليل الأثر البيئي")
carbon_data = analyze_carbon_free(url)
if "error" in carbon_data:
st.error(carbon_data["error"])
else:
st.json(carbon_data)
# Carbon Analysis Chart
st.subheader("رسم بياني لتحليل الأثر البيئي")
co2_data = {
"Page Size (KB)": carbon_data["page_size_kb"],
"CO2 Emission (g)": carbon_data["estimated_co2_g"]
}
draw_bar_chart(co2_data, "Carbon Analysis", "Category", "Value")
st.image("chart.png")
# PDF Export
st.subheader("5. تصدير التقرير إلى PDF")
if st.button("تصدير التقرير"):
results = {
"IP Analysis": ip_data,
"Uptime Analysis": uptime_data,
"SEO Analysis": seo_data,
"Carbon Analysis": carbon_data,
}
file_path = "website_analysis_report.pdf"
export_to_pdf_free(results, file_path)
st.success(f"تم تصدير التقرير إلى {file_path}")
with open(file_path, "rb") as pdf_file:
st.download_button("تحميل التقرير", data=pdf_file, file_name="website_analysis_report.pdf")
if __name__ == "__main__":
main()