File size: 9,667 Bytes
1997c01
 
 
 
 
d189e4c
c1b14f2
 
3b30238
 
 
 
cad05f7
6dffaaa
fc47506
b3ce4c2
41a489a
 
 
 
 
 
a94835d
 
0577c6a
f6965f4
 
 
 
 
 
 
 
 
39f75bf
79b6783
 
 
fc47506
3b30238
 
 
d22b489
 
 
 
 
 
 
d1715e3
83bc4a4
d1715e3
83bc4a4
552f08c
f252c3c
315dd1d
aa33587
 
 
f6965f4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
552f08c
 
 
1997c01
 
 
 
 
 
 
6866b1f
7260f6e
51b1074
83cd13d
e4cab2d
fa181bd
 
e4cab2d
 
bc59653
699f39b
51b1074
56aa7c5
 
51b1074
56aa7c5
 
5b71dc5
bc59653
 
 
0939b73
d77dbe7
 
a4d5793
d8e3d53
0b6419d
d8e3d53
0b6419d
d8e3d53
c024d74
e517d5e
8a12b0f
7c424e1
 
fdcef48
8a12b0f
7c424e1
 
fdcef48
 
7c424e1
 
 
 
98e3569
 
 
 
 
 
8a12b0f
0391643
be8fbbb
10608aa
 
 
 
 
4dcad40
10608aa
 
 
 
68472bb
7260f6e
0391643
39f75bf
 
0391643
 
 
39f75bf
0391643
 
 
 
 
 
 
 
68472bb
0391643
7260f6e
0391643
68472bb
0391643
 
 
 
68472bb
 
0391643
68472bb
 
0391643
c23380f
fbce734
1729426
 
 
980735d
5b343bd
 
1729426
0fdf9da
b9a4efc
45fd1bd
0fdf9da
39f75bf
3ea2200
7260f6e
9f66294
68472bb
9f66294
e688670
157cf08
51b1074
e688670
1729426
f83432c
7a4a991
 
 
39f75bf
7260f6e
c6a5618
7260f6e
 
98e3569
157cf08
5b343bd
b2048d5
68472bb
c6a5618
1997c01
 
68472bb
 
0fdf9da
0fc8c61
68472bb
 
 
1997c01
 
83cd13d
19ae57e
1997c01
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
import gradio as gr
import pandas as pd
import numpy as np
import json
from io import StringIO
from collections import OrderedDict
import os

# ---------------------- Accessing data from Notion ---------------------- #


from notion_client import Client as client_notion
from config import landuseDatabaseId , subdomainAttributesDatabaseId 
from imports_utils import fetch_all_database_pages
from imports_utils import get_property_value
from imports_utils import notion
from config import landuseColumnName 
from config import subdomainColumnName 
from config import sqmPerEmployeeColumnName
from config import thresholdsColumnName 
from config import maxPointsColumnName
from config import domainColumnName 
from imports_utils import fetchDomainMapper
from imports_utils import fetchSubdomainMapper

from imports_utils import notionToken

if notionToken is None:
    raise Exception("Notion token not found. Please check the environment variables.")
else:
    print("Notion token found successfully!")
    landuse_attributes  = fetch_all_database_pages(notion, landuseDatabaseId)
    livability_attributes  = fetch_all_database_pages(notion, subdomainAttributesDatabaseId)
    landuseMapperDict = fetchDomainMapper (landuse_attributes)
    livabilityMapperDict = fetchSubdomainMapper (livability_attributes)




# ---------------------- Accessing data from Speckle ---------------------- #


from specklepy.api.client import SpeckleClient
from specklepy.api.credentials import get_default_account, get_local_accounts
from specklepy.transports.server import ServerTransport
from specklepy.api import operations
from specklepy.objects.geometry import Polyline, Point
from specklepy.objects import Base

import imports_utils
import speckle_utils
import data_utils

from config import landuseDatabaseId , streamId,  dmBranchName, dmCommitId, luBranchName, luCommitId
from imports_utils import speckleToken
from imports_utils import fetchDistanceMatrices
from config import distanceMatrixActivityNodes
from config import distanceMatrixTransportStops


if speckleToken is None:
    raise Exception("Speckle token not found")
else:
    print("Speckle token found successfully!")
    
    CLIENT = SpeckleClient(host="https://speckle.xyz/")
    account = get_default_account()
    CLIENT.authenticate_with_token(token=speckleToken)
    
    streamDistanceMatrices = speckle_utils.getSpeckleStream(streamId,dmBranchName,CLIENT, dmCommitId)
    matrices = fetchDistanceMatrices (streamDistanceMatrices)
    streamLanduses = speckle_utils.getSpeckleStream(streamId,luBranchName,CLIENT, luCommitId)
    streamData = streamLanduses["@Data"]["@{0}"]
    
    df_speckle_lu = speckle_utils.get_dataframe(streamData, return_original_df=False)
    df_lu = df_speckle_lu.copy()
    df_lu = df_lu.astype(str)
    df_lu =  df_lu.set_index("ids", drop=False)
    
    df_dm = matrices[distanceMatrixActivityNodes]
    df_dm_transport = matrices[distanceMatrixTransportStops]
    dm_dictionary = df_dm.to_dict('index')
    df_dm_transport_dictionary = df_dm_transport.to_dict('index')
    
    # filter activity nodes attributes
    mask_connected = df_dm.index.tolist()
    lu_columns = []
    for name in df_lu.columns:
      if name.startswith("lu+"):
        lu_columns.append(name)
    
    df_lu_filtered = df_lu[lu_columns].loc[mask_connected]
    df_lu_filtered.columns = [col.replace('lu+', '') for col in df_lu_filtered.columns]
    df_lu_filtered.columns = [col.replace('ASSETS+', '') for col in df_lu_filtered.columns]
    df_lu_filtered = df_lu_filtered.astype(int)
    df_lu_filtered = df_lu_filtered.T.groupby(level=0).sum().T
    
    df_lu_filtered_dict = df_lu_filtered.to_dict('index')




def test(input_json):
    print("Received input")
    # Parse the input JSON string
    try:
        inputs = json.loads(input_json)
    except json.JSONDecodeError:
        inputs = json.loads(input_json.replace("'", '"'))

    
    # ------------------------- Accessing input data from Grasshopper ------------------------- #
    

    matrix = inputs['input']["matrix"]
    matrix_transport = inputs['input']["transportMatrix"]  
    landuses = inputs['input']["landuse_areas"]    
    
   
    if df_lu_filtered is None or df_lu_filtered.empty:
        landuses = inputs['input']["landuse_areas"]    
        df_landuses = pd.DataFrame(landuses).T
        df_landuses = df_landuses.round(0).astype(int)
    else:
        df_landuses = df_lu_filtered
        df_landuses = df_landuses.round(0).astype(int)
    
    
    #df_landuses = df_lu_filtered
    #df_landuses = df_landuses.round(0).astype(int)
    
    attributeMapperDict_gh = inputs['input']["attributeMapperDict"]
    landuseMapperDict_gh = inputs['input']["landuseMapperDict"]
    
    alpha = inputs['input']["alpha"]
    alpha = float(alpha)
    threshold = inputs['input']["threshold"]
    threshold = float(threshold)
    
    df_matrix = pd.DataFrame(matrix).T
    df_matrix = df_matrix.round(0).astype(int)



    from imports_utils import splitDictByStrFragmentInColumnName
    
    # List containing the substrings to check against
    tranportModes = ["DRT", "GMT", "HSR"]

    result_dicts = splitDictByStrFragmentInColumnName(df_dm_transport_dictionary, tranportModes)

    # Accessing each dictionary
    art_dict = result_dicts["DRT"]
    gmt_dict = result_dicts["GMT"]

    df_art_matrix = pd.DataFrame(art_dict).T
    df_art_matrix = df_art_matrix.round(0).astype(int)  
    df_gmt_matrix = pd.DataFrame(gmt_dict).T
    df_gmt_matrix = df_art_matrix.round(0).astype(int)     
    

    # create a mask based on the matrix size and ids, crop activity nodes to the mask
    mask_connected = df_dm.index.tolist()

    valid_indexes = [idx for idx in mask_connected if idx in df_landuses.index]
    # Identify and report missing indexes
    missing_indexes = set(mask_connected) - set(valid_indexes)
    if missing_indexes:
        print(f"Error: The following indexes were not found in the DataFrame: {missing_indexes}, length: {len(missing_indexes)}")
    
    # Apply the filtered mask
    df_landuses_filtered = df_landuses.loc[valid_indexes]

    """
    # find a set of unique domains, to which subdomains are aggregated    
    temp = []
    for key, values in livabilityMapperDict.items():
      domain = livabilityMapperDict[key]['domain']
      for item in domain:
        if ',' in item:
          domain_list = item.split(',')
          livabilityMapperDict[key]['domain'] = domain_list
          for domain in domain_list:
            temp.append(domain) 
        else:
          if item != 0: 
              temp.append(item)  
    
    domainsUnique = list(set(temp))

    
    # find a list of unique subdomains, to which land uses are aggregated
    temp = []    
    for key, values in landuseMapperDict.items():
      subdomain = str(landuseMapperDict[key]["subdomain livability"])
      if subdomain != 0: 
        temp.append(subdomain) 
        
    subdomainsUnique = list(set(temp))

    """
    
    domainsUnique = findUniqueDomains(livabilityMapperDict)
    subdomainsUnique = findUniqueSubdomains(landuseMapperDict)
    
    from imports_utils import landusesToSubdomains
    from imports_utils import FindWorkplacesNumber
    from imports_utils import computeAccessibility
    from imports_utils import computeAccessibility_pointOfInterest    
    from imports_utils import remap  
    from imports_utils import accessibilityToLivability


    
    
    LivabilitySubdomainsWeights = landusesToSubdomains(df_dm,df_lu_filtered,landuseMapperDict,subdomainsUnique)

    
    WorkplacesNumber = FindWorkplacesNumber(df_dm,livabilityMapperDict,LivabilitySubdomainsWeights,subdomainsUnique)
    
    # prepare an input weights dataframe for the parameter LivabilitySubdomainsInputs
    LivabilitySubdomainsInputs =pd.concat([LivabilitySubdomainsWeights, WorkplacesNumber], axis=1)
   
    subdomainsAccessibility = computeAccessibility(df_dm,LivabilitySubdomainsInputs,alpha,threshold)   
    artAccessibility = computeAccessibility_pointOfInterest(df_art_matrix,'ART',alpha,threshold)
    gmtAccessibility = computeAccessibility_pointOfInterest(df_gmt_matrix,'GMT+HSR',alpha,threshold)
    
    AccessibilityInputs = pd.concat([subdomainsAccessibility, artAccessibility,gmtAccessibility], axis=1)
        

    if 'jobs' not in subdomainsAccessibility.columns:
        print("Error: Column 'jobs' does not exist in the subdomainsAccessibility.")

    livability = accessibilityToLivability(df_dm,AccessibilityInputs,livabilityMapperDict,domainsUnique)
    

    livability_dictionary = livability.to_dict('index')
    LivabilitySubdomainsInputs_dictionary = LivabilitySubdomainsInputs.to_dict('index')
    subdomainsAccessibility_dictionary = AccessibilityInputs.to_dict('index')
    artmatrix = df_art_matrix.to_dict('index')

    LivabilitySubdomainsWeights_dictionary = LivabilitySubdomainsWeights.to_dict('index')
    
    
    # Prepare the output
    output = {
        "subdomainsAccessibility_dictionary": subdomainsAccessibility_dictionary,
        "livability_dictionary": livability_dictionary,
        "subdomainsWeights_dictionary": LivabilitySubdomainsInputs_dictionary,
        "luDomainMapper": landuseMapperDict,
        "attributeMapper": livabilityMapperDict,
        "fetchDm": dm_dictionary,
        "landuses":df_lu_filtered_dict 
    }


    
    return json.dumps(output)

    # Define the Gradio interface with a single JSON input
iface = gr.Interface(
    fn=test,
    inputs=gr.Textbox(label="Input JSON", lines=20, placeholder="Enter JSON with all parameters here..."),
    outputs=gr.JSON(label="Output JSON"),
    title="testspace"
)

iface.launch()