mattritchey commited on
Commit
eac0454
·
1 Parent(s): ae1104e

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +124 -7
main.py CHANGED
@@ -1,9 +1,16 @@
1
- # https://medium.com/@qacheampong/building-and-deploying-a-fastapi-app-with-hugging-face-9210e9b4a713
2
- # https://huggingface.co/spaces/Queensly/FastAPI_in_Docker
3
-
4
  from fastapi import FastAPI
5
  import uvicorn
6
 
 
 
 
 
 
 
 
 
 
 
7
  app = FastAPI()
8
 
9
 
@@ -11,13 +18,123 @@ app = FastAPI()
11
  #Root endpoints
12
  @app.get("/")
13
  def root():
14
- return {"API": "Sum of 2 Squares"}
 
 
 
 
 
 
15
 
16
- @app.get('/Sum_Square')
17
- async def predict(number_1: int, number_2: int):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
- prediction = number_1**2 + number_2**2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  return prediction
22
 
23
 
 
 
 
 
1
  from fastapi import FastAPI
2
  import uvicorn
3
 
4
+ import pandas as pd
5
+ import numpy as np
6
+ import requests
7
+ from urllib.parse import urlparse, quote
8
+ import re
9
+ from bs4 import BeautifulSoup
10
+ import time
11
+ from joblib import Parallel, delayed
12
+ from nltk import ngrams
13
+
14
  app = FastAPI()
15
 
16
 
 
18
  #Root endpoints
19
  @app.get("/")
20
  def root():
21
+ return {"API": "Google Address Scrap"}
22
+
23
+
24
+
25
+ def normalize_string(string):
26
+ normalized_string = string.lower()
27
+ normalized_string = re.sub(r'[^\w\s]', '', normalized_string)
28
 
29
+ return normalized_string
30
+
31
+
32
+ def jaccard_similarity(string1, string2,n = 2, normalize=True):
33
+ try:
34
+ if normalize:
35
+ string1,string2= normalize_string(string1),normalize_string(string2)
36
+
37
+ grams1 = set(ngrams(string1, n))
38
+ grams2 = set(ngrams(string2, n))
39
+ similarity = len(grams1.intersection(grams2)) / len(grams1.union(grams2))
40
+ except:
41
+ similarity=0
42
+
43
+ if string2=='did not extract address':
44
+ similarity=0
45
+
46
+ return similarity
47
+
48
+ def jaccard_sim_split_word_number(string1,string2):
49
+ numbers1 = ' '.join(re.findall(r'\d+', string1))
50
+ words1 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string1))
51
+
52
+ numbers2 = ' '.join(re.findall(r'\d+', string2))
53
+ words2 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string2))
54
+
55
+ number_similarity=jaccard_similarity(numbers1,numbers2)
56
+ words_similarity=jaccard_similarity(words1,words2)
57
+ return (number_similarity+words_similarity)/2
58
+
59
+ def extract_website_domain(url):
60
+ parsed_url = urlparse(url)
61
+ return parsed_url.netloc
62
+
63
+
64
+ def google_address(address):
65
+
66
+ search_query = quote(address)
67
+ url=f'https://www.google.com/search?q={search_query}'
68
+ response = requests.get(url)
69
+ soup = BeautifulSoup(response.content, "html.parser")
70
+
71
+ texts_links = []
72
+ for link in soup.find_all("a"):
73
+ t,l=link.get_text(), link.get("href")
74
+ if (l[:11]=='/url?q=http') and (len(t)>20 ):
75
+ texts_links.append((t,l))
76
 
77
+ text = soup.get_text()
78
+
79
+ texts_links_des=[]
80
+ for i,t_l in enumerate(texts_links):
81
+ start=text.find(texts_links[i][0][:50])
82
+ try:
83
+ end=text.find(texts_links[i+1][0][:50])
84
+ except:
85
+ end=text.find('Related searches')
86
+
87
+ description=text[start:end]
88
+ texts_links_des.append((t_l[0],t_l[1],description))
89
+
90
+ df=pd.DataFrame(texts_links_des,columns=['Title','Link','Description'])
91
+ df['Description']=df['Description'].bfill()
92
+ df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})').fillna("**DID NOT EXTRACT ADDRESS**")
93
+
94
+ df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
95
+ df['Website'] = df['Link'].apply(extract_website_domain)
96
+
97
+ df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
98
+ try:
99
+ df['Square Footage']=df['Square Footage'].replace({',':''},regex=True).str.replace(r'\D', '')
100
+ except:
101
+ pass
102
+ df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
103
+
104
+
105
+ df['Baths']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"((\d+) bath|(\d+(?:\.\d+)?) bath)")[0]
106
+ df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
107
+
108
+ df['Year Built']=df['Description'].str.extract(r"built in (\d{4})")
109
+
110
+ df['Match Percent']=[jaccard_sim_split_word_number(address,i)*100 for i in df['Address Output']]
111
+ df['Google Search Result']=[*range(1,df.shape[0]+1)]
112
+
113
+ df.insert(0,'Address Input',address)
114
+
115
+ return df
116
+
117
+
118
+ def catch_errors(addresses):
119
+ try:
120
+ return google_address(addresses)
121
+ except:
122
+ return pd.DataFrame({'Address Input':[addresses]})
123
+
124
 
125
+ def process_multiple_address(addresses):
126
+ results=Parallel(n_jobs=32, prefer="threads")(delayed(catch_errors)(i) for i in addresses)
127
+ return results
128
+
129
+
130
+ @app.get('/Sum_Square')
131
+ async def predict(address_input: str):
132
+
133
+ address_input_split = address_input.split(';')
134
+ results = process_multiple_address(address_input_split)
135
+ results = pd.concat(results).reset_index(drop=1)
136
+ prediction = results[['Address Input', 'Address Output', 'Match Percent', 'Website', 'Square Footage', 'Beds', 'Baths', 'Year Built',
137
+ 'Link', 'Google Search Result', 'Description']]
138
  return prediction
139
 
140