Spaces:
Sleeping
Sleeping
File size: 4,713 Bytes
9ec1981 3005818 9ec1981 d4ff3d2 2f59867 9ec1981 47a8e15 2f59867 47a8e15 9ec1981 ab66ee6 2c22b89 9ec1981 91e387f 969a87c e1c3d93 969a87c 9ec1981 e1c3d93 9ec1981 e1c3d93 9ec1981 2c22b89 9ec1981 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
import streamlit as st
import pandas as pd
import numpy as np
import requests
from urllib.parse import urlparse, quote
import re
from bs4 import BeautifulSoup
import time
from joblib import Parallel, delayed
@st.cache_data
def convert_df(df):
return df.to_csv()
def extract_website_domain(url):
parsed_url = urlparse(url)
return parsed_url.netloc
def google_address(address):
address_number = re.findall(r'\b\d+\b', address)[0]
address_zip =re.search(r'(\d{5})$', address).group()[:2]
search_query = quote(address)
url=f'https://www.google.com/search?q={search_query}'
response = requests.get(url)
soup = BeautifulSoup(response.content, "html.parser")
texts_links = []
for link in soup.find_all("a"):
t,l=link.get_text(), link.get("href")
if (l[:11]=='/url?q=http') and (len(t)>20 ):
texts_links.append((t,l))
text = soup.get_text()
texts_links_des=[]
for i,t_l in enumerate(texts_links):
start=text.find(texts_links[i][0][:50])
try:
end=text.find(texts_links[i+1][0][:50])
except:
end=text.find('Related searches')
description=text[start:end]
texts_links_des.append((t_l[0],t_l[1],description))
df=pd.DataFrame(texts_links_des,columns=['Title','Link','Description'])
df['Description']=df['Description'].bfill()
df['Address']=df['Title'].str.extract(r'(.+? \d{5})')
df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
df['Website'] = df['Link'].apply(extract_website_domain)
df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
df['Square Footage']=df['Square Footage'].replace({',':''},regex=True).str.replace(r'\D', '')
df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
df['Baths']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"((\d+) bath|(\d+(?:\.\d+)?) bath)")[0]
df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
df['Year Built']=df['Description'].str.extract(r"built in (\d{4})")
df_final=df[df['Address'].notnull()]
df_final=df_final[(df_final['Address'].str.contains(str(address_number))) & (df_final['Address'].str.contains(str(address_zip)))]
df_final.insert(0,'Address Input',address)
return df_final
def catch_errors(addresses):
try:
return google_address(addresses,trial=True)
except:
return pd.DataFrame({'Address Input':[addresses]})
def process_multiple_address(addresses):
results=Parallel(n_jobs=32, prefer="threads")(delayed(catch_errors)(i) for i in addresses)
return results
st.set_page_config(layout="wide")
# col1, col2 = st.columns((2))
address_file = st.sidebar.radio('Choose',('Single Address', 'File'))
address = st.sidebar.text_input("Address", "190 Pebble Creek Dr Etna, OH 43062")
uploaded_file = st.sidebar.file_uploader("Choose a file")
# uploaded_file='C:/Users/mritchey/Documents/addresses 100 generated.xlsx'
return_sq = st.sidebar.radio('Return Only Results with Square Footage',('No', 'Yes'))
if address_file == 'File' and not None:
try:
df = pd.read_csv(uploaded_file)
except:
df = pd.read_excel(uploaded_file)
address_cols=list(df.columns[:4])
df[address_cols[-1]]=df[address_cols[-1]].astype(str).str[:5].astype(int).astype(str)
df[address_cols[-1]]=df[address_cols[-1]].apply(lambda x: x.zfill(5))
df['Address All']=df[address_cols[0]]+', '+df[address_cols[1]]+', '+df[address_cols[2]]+' '+df[address_cols[3]]
results= process_multiple_address(df['Address All'].values)
results=pd.concat(results).reset_index(drop=1)
results.index=results.index+1
else:
results=google_address(address).reset_index(drop=1)
results.index=results.index+1
results=results[['Address Input', 'Address', 'Website','Square Footage', 'Beds', 'Baths', 'Year Built',
'Link', 'Description',
]]
if return_sq=='Yes':
results=results.query("`Square Footage`==`Square Footage`").reset_index(drop=1)
results.index=results.index+1
st.dataframe(
results,
column_config={
"Link": st.column_config.LinkColumn("Link"),
},
hide_index=True,
)
csv2 = convert_df(results)
st.download_button(
label="Download data as CSV",
data=csv2,
file_name=f'{address}.csv',
mime='text/csv')
st.markdown(""" <style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style> """, unsafe_allow_html=True) |