mattritchey commited on
Commit
e802586
·
1 Parent(s): db19bb6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -13
app.py CHANGED
@@ -9,6 +9,7 @@ import time
9
  from joblib import Parallel, delayed
10
  from nltk import ngrams
11
 
 
12
  @st.cache_data
13
  def convert_df(df):
14
  return df.to_csv()
@@ -19,6 +20,7 @@ def normalize_string(string):
19
 
20
  return normalized_string
21
 
 
22
  def jaccard_similarity(string1, string2,n = 2, normalize=True):
23
  try:
24
  if normalize:
@@ -30,18 +32,30 @@ def jaccard_similarity(string1, string2,n = 2, normalize=True):
30
  except:
31
  similarity=0
32
 
 
 
 
33
  return similarity
34
 
 
 
 
 
 
 
 
 
 
 
35
 
36
  def extract_website_domain(url):
37
  parsed_url = urlparse(url)
38
  return parsed_url.netloc
39
 
40
 
41
- def google_address(address):
42
-
43
- address_number = re.findall(r'\b\d+\b', address)[0]
44
- address_zip =re.search(r'(\d{5})$', address).group()[:2]
45
 
46
  search_query = quote(address)
47
  url=f'https://www.google.com/search?q={search_query}'
@@ -54,7 +68,6 @@ def google_address(address):
54
  if (l[:11]=='/url?q=http') and (len(t)>20 ):
55
  texts_links.append((t,l))
56
 
57
-
58
  text = soup.get_text()
59
 
60
  texts_links_des=[]
@@ -70,13 +83,16 @@ def google_address(address):
70
 
71
  df=pd.DataFrame(texts_links_des,columns=['Title','Link','Description'])
72
  df['Description']=df['Description'].bfill()
73
- df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})')
 
74
  df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
75
  df['Website'] = df['Link'].apply(extract_website_domain)
76
 
77
  df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
78
- df['Square Footage']=df['Square Footage'].replace({',':''},regex=True).str.replace(r'\D', '')
79
-
 
 
80
  df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
81
 
82
 
@@ -84,7 +100,8 @@ def google_address(address):
84
  df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
85
 
86
  df['Year Built']=df['Description'].str.extract(r"built in (\d{4})")
87
- df['Match Percent']=[jaccard_similarity(address,i)*100 for i in df['Address Output']]
 
88
  df['Google Search Result']=[*range(1,df.shape[0]+1)]
89
 
90
  # df_final=df[df['Address Output'].notnull()]
@@ -93,7 +110,8 @@ def google_address(address):
93
  df.insert(0,'Address Input',address)
94
 
95
  return df
96
-
 
97
  def catch_errors(addresses):
98
  try:
99
  return google_address(addresses)
@@ -106,12 +124,10 @@ def process_multiple_address(addresses):
106
  return results
107
 
108
 
109
-
110
  st.set_page_config(layout="wide")
111
 
112
  address = st.sidebar.text_input("Single Address:", "190 Pebble Creek Dr Etna, OH 43062")
113
  uploaded_file = st.sidebar.file_uploader("Upload Multiple Addresses:")
114
- # address_file = st.sidebar.radio('Choose',('Single Address', 'File'))
115
  match_percent = st.sidebar.selectbox('Address Match Percentage At Least:',(70, 80, 90, 100, 0))
116
  return_sq = st.sidebar.radio('Return Only Results with Square Footage',('No', 'Yes'))
117
 
@@ -136,7 +152,7 @@ else:
136
  # results.index=results.index+1
137
 
138
 
139
- results=results[['Address Input', 'Address Output','Match Percent', 'Website','Square Footage', 'Beds', 'Baths', 'Year Built',
140
  'Link','Google Search Result', 'Description' ]]
141
  results=results.query(f"`Match Percent`>={match_percent}")
142
 
 
9
  from joblib import Parallel, delayed
10
  from nltk import ngrams
11
 
12
+
13
  @st.cache_data
14
  def convert_df(df):
15
  return df.to_csv()
 
20
 
21
  return normalized_string
22
 
23
+
24
  def jaccard_similarity(string1, string2,n = 2, normalize=True):
25
  try:
26
  if normalize:
 
32
  except:
33
  similarity=0
34
 
35
+ if string2=='did not extract address':
36
+ similarity=0
37
+
38
  return similarity
39
 
40
+ def jaccard_sim_split_word_number(string1,string2):
41
+ numbers1 = ' '.join(re.findall(r'\d+', string1))
42
+ words1 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string1))
43
+
44
+ numbers2 = ' '.join(re.findall(r'\d+', string2))
45
+ words2 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string2))
46
+
47
+ number_similarity=jaccard_similarity(numbers1,numbers2)
48
+ words_similarity=jaccard_similarity(words1,words2)
49
+ return (number_similarity+words_similarity)/2
50
 
51
  def extract_website_domain(url):
52
  parsed_url = urlparse(url)
53
  return parsed_url.netloc
54
 
55
 
56
+ def google_address(address):
57
+ # address_number = re.findall(r'\b\d+\b', address)[0]
58
+ # address_zip =re.search(r'(\d{5})$', address).group()[:2]
 
59
 
60
  search_query = quote(address)
61
  url=f'https://www.google.com/search?q={search_query}'
 
68
  if (l[:11]=='/url?q=http') and (len(t)>20 ):
69
  texts_links.append((t,l))
70
 
 
71
  text = soup.get_text()
72
 
73
  texts_links_des=[]
 
83
 
84
  df=pd.DataFrame(texts_links_des,columns=['Title','Link','Description'])
85
  df['Description']=df['Description'].bfill()
86
+ df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})').fillna("**DID NOT EXTRACT ADDRESS**")
87
+
88
  df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
89
  df['Website'] = df['Link'].apply(extract_website_domain)
90
 
91
  df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
92
+ try:
93
+ df['Square Footage']=df['Square Footage'].replace({',':''},regex=True).str.replace(r'\D', '')
94
+ except:
95
+ pass
96
  df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
97
 
98
 
 
100
  df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
101
 
102
  df['Year Built']=df['Description'].str.extract(r"built in (\d{4})")
103
+
104
+ df['Match Percent']=[jaccard_sim_split_word_number(address,i)*100 for i in df['Address Output']]
105
  df['Google Search Result']=[*range(1,df.shape[0]+1)]
106
 
107
  # df_final=df[df['Address Output'].notnull()]
 
110
  df.insert(0,'Address Input',address)
111
 
112
  return df
113
+
114
+
115
  def catch_errors(addresses):
116
  try:
117
  return google_address(addresses)
 
124
  return results
125
 
126
 
 
127
  st.set_page_config(layout="wide")
128
 
129
  address = st.sidebar.text_input("Single Address:", "190 Pebble Creek Dr Etna, OH 43062")
130
  uploaded_file = st.sidebar.file_uploader("Upload Multiple Addresses:")
 
131
  match_percent = st.sidebar.selectbox('Address Match Percentage At Least:',(70, 80, 90, 100, 0))
132
  return_sq = st.sidebar.radio('Return Only Results with Square Footage',('No', 'Yes'))
133
 
 
152
  # results.index=results.index+1
153
 
154
 
155
+ results=results[['Address Input', 'Address Output','Match Percent','Website','Square Footage', 'Beds', 'Baths', 'Year Built',
156
  'Link','Google Search Result', 'Description' ]]
157
  results=results.query(f"`Match Percent`>={match_percent}")
158