Spaces:
Sleeping
Sleeping
Update main.py
Browse files
main.py
CHANGED
@@ -1,15 +1,14 @@
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
|
|
|
|
4 |
import pandas as pd
|
5 |
import numpy as np
|
6 |
-
import
|
7 |
-
|
8 |
-
import
|
9 |
-
from
|
10 |
-
import time
|
11 |
-
from joblib import Parallel, delayed
|
12 |
-
from nltk import ngrams
|
13 |
|
14 |
app = FastAPI()
|
15 |
|
@@ -18,124 +17,101 @@ app = FastAPI()
|
|
18 |
#Root endpoints
|
19 |
@app.get("/")
|
20 |
def root():
|
21 |
-
return {"API": "
|
22 |
|
23 |
|
|
|
24 |
|
25 |
-
def normalize_string(string):
|
26 |
-
normalized_string = string.lower()
|
27 |
-
normalized_string = re.sub(r'[^\w\s]', '', normalized_string)
|
28 |
-
|
29 |
-
return normalized_string
|
30 |
-
|
31 |
-
|
32 |
-
def jaccard_similarity(string1, string2,n = 2, normalize=True):
|
33 |
try:
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
similarity = len(grams1.intersection(grams2)) / len(grams1.union(grams2))
|
40 |
except:
|
41 |
-
|
|
|
|
|
|
|
42 |
|
43 |
-
|
44 |
-
similarity=0
|
45 |
-
|
46 |
-
return similarity
|
47 |
-
|
48 |
-
def jaccard_sim_split_word_number(string1,string2):
|
49 |
-
numbers1 = ' '.join(re.findall(r'\d+', string1))
|
50 |
-
words1 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string1))
|
51 |
-
|
52 |
-
numbers2 = ' '.join(re.findall(r'\d+', string2))
|
53 |
-
words2 = ' '.join(re.findall(r'\b[A-Za-z]+\b', string2))
|
54 |
-
|
55 |
-
number_similarity=jaccard_similarity(numbers1,numbers2)
|
56 |
-
words_similarity=jaccard_similarity(words1,words2)
|
57 |
-
return (number_similarity+words_similarity)/2
|
58 |
-
|
59 |
-
def extract_website_domain(url):
|
60 |
-
parsed_url = urlparse(url)
|
61 |
-
return parsed_url.netloc
|
62 |
|
63 |
|
64 |
-
def
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
texts_links = []
|
72 |
-
for link in soup.find_all("a"):
|
73 |
-
t,l=link.get_text(), link.get("href")
|
74 |
-
if (l[:11]=='/url?q=http') and (len(t)>20 ):
|
75 |
-
texts_links.append((t,l))
|
76 |
|
77 |
-
text = soup.get_text()
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
start=text.find(texts_links[i][0][:50])
|
82 |
-
try:
|
83 |
-
end=text.find(texts_links[i+1][0][:50])
|
84 |
-
except:
|
85 |
-
end=text.find('Related searches')
|
86 |
-
|
87 |
-
description=text[start:end]
|
88 |
-
texts_links_des.append((t_l[0],t_l[1],description))
|
89 |
|
90 |
-
|
91 |
-
|
92 |
-
df['Address Output']=df['Title'].str.extract(r'(.+? \d{5})').fillna("**DID NOT EXTRACT ADDRESS**")
|
93 |
|
94 |
-
|
95 |
-
|
|
|
96 |
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
|
|
|
104 |
|
105 |
-
|
106 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
107 |
|
108 |
-
|
|
|
109 |
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
df.insert(0,'Address Input',address)
|
114 |
|
115 |
-
|
116 |
-
|
|
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
return google_address(addresses)
|
121 |
-
except:
|
122 |
-
return pd.DataFrame({'Address Input':[addresses]})
|
123 |
-
|
124 |
-
|
125 |
-
def process_multiple_address(addresses):
|
126 |
-
results=Parallel(n_jobs=32, prefer="threads")(delayed(catch_errors)(i) for i in addresses)
|
127 |
-
return results
|
128 |
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
|
130 |
-
|
131 |
-
|
|
|
|
|
132 |
|
133 |
-
|
134 |
-
|
135 |
-
results = pd.concat(results).reset_index(drop=1)
|
136 |
-
prediction = results[['Address Input', 'Address Output', 'Match Percent', 'Website', 'Square Footage', 'Beds', 'Baths', 'Year Built',
|
137 |
-
'Link', 'Google Search Result', 'Description']]
|
138 |
-
return prediction.to_json()
|
139 |
|
140 |
-
|
141 |
-
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
import uvicorn
|
3 |
|
4 |
+
from geopy.extra.rate_limiter import RateLimiter
|
5 |
+
from geopy.geocoders import Nominatim
|
6 |
import pandas as pd
|
7 |
import numpy as np
|
8 |
+
import pickle
|
9 |
+
import rasterio
|
10 |
+
import h5py
|
11 |
+
from skimage.morphology import disk
|
|
|
|
|
|
|
12 |
|
13 |
app = FastAPI()
|
14 |
|
|
|
17 |
#Root endpoints
|
18 |
@app.get("/")
|
19 |
def root():
|
20 |
+
return {"API": "Hail Docker Data"}
|
21 |
|
22 |
|
23 |
+
def geocode_address(address):
|
24 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
try:
|
26 |
+
address2 = address.replace(' ', '+').replace(',', '%2C')
|
27 |
+
df = pd.read_json(
|
28 |
+
f'https://geocoding.geo.census.gov/geocoder/locations/onelineaddress?address={address2}&benchmark=2020&format=json')
|
29 |
+
results = df.iloc[:1, 0][0][0]['coordinates']
|
30 |
+
lat, lon = results['y'], results['x']
|
|
|
31 |
except:
|
32 |
+
geolocator = Nominatim(user_agent='GTA Lookup')
|
33 |
+
geocode = RateLimiter(geolocator.geocode, min_delay_seconds=2)
|
34 |
+
location = geolocator.geocode(address)
|
35 |
+
lat, lon = location.latitude, location.longitude
|
36 |
|
37 |
+
return lat, lon
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
|
40 |
+
def get_hail_data(address, start_date,end_date,radius_miles):
|
41 |
|
42 |
+
start_date = pd.Timestamp(str(start_date)).strftime('%Y%m%d')
|
43 |
+
end_date = pd.Timestamp(str(end_date)).strftime('%Y%m%d')
|
44 |
+
date_years = pd.date_range(start=start_date, end=end_date, freq='M')
|
45 |
+
years = list(set([d.year for d in date_years]))
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
|
|
47 |
|
48 |
+
if len(years)==0:
|
49 |
+
years=[pd.Timestamp(start_date).year]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
# Geocode Address
|
52 |
+
lat, lon= geocode_address(address)
|
|
|
53 |
|
54 |
+
# Convert Lat Lon to row & col on Array
|
55 |
+
transform = pickle.load(open('Data/transform_mrms.pkl', 'rb'))
|
56 |
+
row, col = rasterio.transform.rowcol(transform, lon, lat)
|
57 |
|
58 |
+
files = [
|
59 |
+
'Data/2023_hail.h5',
|
60 |
+
'Data/2022_hail.h5',
|
61 |
+
'Data/2021_hail.h5',
|
62 |
+
'Data/2020_hail.h5'
|
63 |
+
]
|
64 |
|
65 |
+
files_choosen=[i for i in files if any(i for j in years if str(j) in i)]
|
66 |
|
67 |
+
# Query and Collect H5 Data
|
68 |
+
all_data=[]
|
69 |
+
all_dates=[]
|
70 |
+
for file in files_choosen:
|
71 |
+
with h5py.File(file, 'r') as f:
|
72 |
+
# Get Dates from H5
|
73 |
+
dates = f['dates'][:]
|
74 |
+
date_idx=np.where((dates>=int(start_date)) & (dates<=int(end_date)) )[0]
|
75 |
+
|
76 |
+
# Select Data by Date and Radius
|
77 |
+
dates=dates[date_idx]
|
78 |
+
data = f['hail'][date_idx, row-radius_miles:row +
|
79 |
+
radius_miles+1, col-radius_miles:col+radius_miles+1]
|
80 |
+
|
81 |
+
all_data.append(data)
|
82 |
+
all_dates.append(dates)
|
83 |
|
84 |
+
data_all=np.vstack(all_data)
|
85 |
+
dates_all=np.concatenate(all_dates)
|
86 |
|
87 |
+
# Convert to Inches
|
88 |
+
data_mat = np.where(data_all < 0, 0, data_all)*0.0393701
|
|
|
|
|
89 |
|
90 |
+
# Get Radius of Data
|
91 |
+
disk_mask = np.where(disk(radius_miles)==1,True, False)
|
92 |
+
data_mat = np.where(disk_mask, data_mat, -1)
|
93 |
|
94 |
+
# Find Max of Data
|
95 |
+
data_max = np.max(data_mat, axis=(1, 2)).round(3)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
|
97 |
+
# Process to DataFrame
|
98 |
+
date_range_days = pd.date_range(start_date,end_date)
|
99 |
+
df_data=pd.DataFrame({'Date':dates_all,
|
100 |
+
'Hail_max':data_max})
|
101 |
+
df_data['Date']=pd.to_datetime(df_data['Date'],format='%Y%m%d')
|
102 |
+
df_data=df_data.set_index('Date')
|
103 |
|
104 |
+
df_data = df_data.reindex(date_range_days, fill_value=0).reset_index().rename(columns={'index': 'Date'})
|
105 |
+
df_data['Date']=df_data['Date'].dt.strftime('%Y-%m-%d')
|
106 |
+
return df_data
|
107 |
+
|
108 |
|
109 |
+
@app.get('/Hail_Docker_Data')
|
110 |
+
async def predict(address, start_date,end_date, radius_miles):
|
|
|
|
|
|
|
|
|
111 |
|
112 |
+
try:
|
113 |
+
results = get_hail_data(address, start_date,end_date, radius_miles)
|
114 |
+
except:
|
115 |
+
results=pd.DataFrame({'Date':['error'],'df_data':['error']})
|
116 |
+
|
117 |
+
return results.to_json()
|