Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,14 +1,19 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
|
|
|
|
4 |
import pandas as pd
|
5 |
import numpy as np
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
from
|
10 |
-
|
11 |
-
from
|
|
|
|
|
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
@@ -17,118 +22,92 @@ app = FastAPI()
|
|
17 |
#Root endpoints
|
18 |
@app.get("/")
|
19 |
def root():
|
20 |
-
return {"API": "
|
21 |
-
|
22 |
-
def geocode_address(address):
|
23 |
|
|
|
24 |
try:
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
30 |
except:
|
31 |
-
|
32 |
-
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=2)
|
33 |
-
location = geolocator.geocode(address)
|
34 |
-
lat, lon = location.latitude, location.longitude
|
35 |
|
36 |
-
|
37 |
-
|
38 |
-
def get_hail_data(address, start_date, end_date, radius_miles, get_max):
|
39 |
-
|
40 |
-
resolution=1 # mrms 1 and hrrr is 3
|
41 |
-
radius = int(np.ceil(radius_miles*1.6/resolution))
|
42 |
-
|
43 |
-
|
44 |
-
start_date = pd.Timestamp(str(start_date)).strftime('%Y%m%d')
|
45 |
-
end_date = pd.Timestamp(str(end_date)).strftime('%Y%m%d')
|
46 |
-
date_years = pd.date_range(start=start_date, end=end_date, freq='M')
|
47 |
-
date_range_days = pd.date_range(start_date, end_date)
|
48 |
-
years = list(set([d.year for d in date_years]))
|
49 |
-
|
50 |
-
if len(years) == 0:
|
51 |
-
years = [pd.Timestamp(start_date).year]
|
52 |
|
53 |
-
|
54 |
-
lat, lon= geocode_address(address)
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
'Data/2022_hail.h5',
|
67 |
-
'Data/2021_hail.h5',
|
68 |
-
'Data/2020_hail.h5'
|
69 |
-
]
|
70 |
-
|
71 |
-
files_choosen = [i for i in files if any(i for j in years if str(j) in i)]
|
72 |
-
|
73 |
-
# Query and Collect H5 Data
|
74 |
-
all_data = []
|
75 |
-
all_dates = []
|
76 |
-
for file in files_choosen:
|
77 |
-
with h5py.File(file, 'r') as f:
|
78 |
-
# Get Dates from H5
|
79 |
-
dates = f['dates'][:]
|
80 |
-
date_idx = np.where((dates >= int(start_date))
|
81 |
-
& (dates <= int(end_date)))[0]
|
82 |
-
|
83 |
-
# Select Data by Date and Radius
|
84 |
-
dates = dates[date_idx]
|
85 |
-
data = f['hail'][date_idx, row-radius_miles:row +
|
86 |
-
radius_miles+1, col-radius_miles:col+radius_miles+1]
|
87 |
-
|
88 |
-
all_data.append(data)
|
89 |
-
all_dates.append(dates)
|
90 |
-
|
91 |
-
data_all = np.vstack(all_data)
|
92 |
-
dates_all = np.concatenate(all_dates)
|
93 |
-
|
94 |
-
# Convert to Inches
|
95 |
-
data_mat = np.where(data_all < 0, 0, data_all)*0.0393701
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
|
101 |
-
# Process to DataFrame
|
102 |
-
# Find Max of Data
|
103 |
-
if get_max == True:
|
104 |
-
data_max = np.max(data_mat, axis=(1, 2))
|
105 |
-
df_data = pd.DataFrame({'Date': dates_all,
|
106 |
-
'Hail_max': data_max})
|
107 |
-
# Get all Data
|
108 |
-
else:
|
109 |
-
data_all = list(data_mat)
|
110 |
-
df_data = pd.DataFrame({'Date': dates_all,
|
111 |
-
'Hail_all': data_all})
|
112 |
|
113 |
-
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
|
120 |
-
return df_data
|
121 |
|
122 |
|
123 |
@app.get('/Hail_Docker_Data')
|
124 |
-
async def predict(address: str
|
125 |
-
|
126 |
-
|
127 |
-
results
|
128 |
-
|
129 |
except:
|
130 |
-
results
|
131 |
-
|
132 |
-
return results
|
133 |
|
134 |
|
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
import pandas as pd
|
7 |
import numpy as np
|
8 |
+
import requests
|
9 |
+
from urllib.parse import urlparse, quote
|
10 |
+
import re
|
11 |
+
from bs4 import BeautifulSoup
|
12 |
+
import time
|
13 |
+
from joblib import Parallel, delayed
|
14 |
+
from nltk import ngrams
|
15 |
+
from googlesearch import search
|
16 |
+
|
17 |
|
18 |
app = FastAPI()
|
19 |
|
|
|
22 |
#Root endpoints
|
23 |
@app.get("/")
|
24 |
def root():
|
25 |
+
return {"API": "AdressScrap"}
|
|
|
|
|
26 |
|
27 |
+
def jaccard_similarity(string1, string2,n = 2, normalize=True):
|
28 |
try:
|
29 |
+
if normalize:
|
30 |
+
string1,string2= normalize_string(string1),normalize_string(string2)
|
31 |
+
|
32 |
+
grams1 = set(ngrams(string1, n))
|
33 |
+
grams2 = set(ngrams(string2, n))
|
34 |
+
similarity = len(grams1.intersection(grams2)) / len(grams1.union(grams2))
|
35 |
except:
|
36 |
+
similarity=0
|
|
|
|
|
|
|
37 |
|
38 |
+
if string2=='did not extract address':
|
39 |
+
similarity=0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
return similarity
|
|
|
42 |
|
43 |
+
def jaccard_sim_split_word_number(string1,string2):
|
44 |
+
numbers1 = ' '.join(re.findall(r'\d+', string1))
|
45 |
+
words1 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string1))
|
46 |
|
47 |
+
numbers2 = ' '.join(re.findall(r'\d+', string2))
|
48 |
+
words2 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string2))
|
49 |
+
|
50 |
+
number_similarity=jaccard_similarity(numbers1,numbers2)
|
51 |
+
words_similarity=jaccard_similarity(words1,words2)
|
52 |
+
return (number_similarity+words_similarity)/2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
+
def extract_website_domain(url):
|
55 |
+
parsed_url = urlparse(url)
|
56 |
+
return parsed_url.netloc
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
+
def google_address(address):
|
60 |
+
all_data=[i for i in search(address, ssl_verify=False, advanced=True,
|
61 |
+
num_results=11)]
|
62 |
+
|
63 |
+
|
64 |
+
df=pd.DataFrame({'Title':[i.title for i in all_data],
|
65 |
+
'Link':[i.url for i in all_data],
|
66 |
+
'Description':[i.description for i in all_data],})
|
67 |
+
|
68 |
+
df=df.query("Title==Title")
|
69 |
+
df['Link']=df['Link'].str.replace('/www.','https://www.')
|
70 |
+
|
71 |
+
# df['Description']=df['Description'].bfill()
|
72 |
+
df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})').fillna("**DID NOT EXTRACT ADDRESS**")
|
73 |
+
|
74 |
+
df['Link']=[i[7:i.find('&sa=')] for i in df['Link']]
|
75 |
+
df['Website'] = df['Link'].apply(extract_website_domain)
|
76 |
+
|
77 |
+
df['Square Footage']=df['Description'].str.extract(r"((\d+) Square Feet|(\d+) sq. ft.|(\d+) sqft|(\d+) Sq. Ft.|(\d+) sq|(\d+(?:,\d+)?) Sq\. Ft\.|(\d+(?:,\d+)?) sq)")[0]
|
78 |
+
try:
|
79 |
+
df['Square Footage']=df['Square Footage'].replace({',':''},regex=True).str.replace(r'\D', '')
|
80 |
+
except:
|
81 |
+
pass
|
82 |
+
df['Beds']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"(\d+) bed")
|
83 |
+
|
84 |
+
|
85 |
+
df['Baths']=df['Description'].replace({'-':' ','total':''},regex=True).str.extract(r"((\d+) bath|(\d+(?:\.\d+)?) bath)")[0]
|
86 |
+
df['Baths']=df['Baths'].str.extract(r'([\d.]+)').astype(float)
|
87 |
+
|
88 |
+
df['Year Built']=df['Description'].str.extract(r"built in (\d{4})")
|
89 |
+
|
90 |
+
df['Match Percent']=[jaccard_sim_split_word_number(address,i)*100 for i in df['Address Output']]
|
91 |
+
df['Google Search Result']=[*range(1,df.shape[0]+1)]
|
92 |
+
|
93 |
+
# df_final=df[df['Address Output'].notnull()]
|
94 |
+
# df_final=df_final[(df_final['Address Output'].str.contains(str(address_number))) & (df_final['Address Output'].str.contains(str(address_zip)))]
|
95 |
|
96 |
+
df.insert(0,'Address Input',address)
|
97 |
+
|
98 |
+
return df
|
99 |
|
|
|
100 |
|
101 |
|
102 |
@app.get('/Hail_Docker_Data')
|
103 |
+
async def predict(address: str):
|
104 |
+
try:
|
105 |
+
results= google_address(addresses)
|
106 |
+
results=results[['Address Input', 'Address Output','Match Percent','Website','Square Footage', 'Beds', 'Baths', 'Year Built',
|
107 |
+
'Link','Google Search Result', 'Description' ]]
|
108 |
except:
|
109 |
+
results= pd.DataFrame({'Address Input':[addresses]})
|
110 |
+
|
111 |
+
return results
|
112 |
|
113 |
|