Spaces:
Sleeping
Sleeping
Upload 19 files
Browse files- .gitattributes +9 -0
- AI_logo.png +0 -0
- R4V6.3_Model.pth +3 -0
- README.md +14 -8
- V10.1_shap_values.npy +3 -0
- Waterfall/Waterfall_Sample_1_class_1.png +3 -0
- Waterfall/Waterfall_Sample_2_class_1.png +3 -0
- Waterfall/Waterfall_Sample_3_class_1.png +3 -0
- Waterfall/Waterfall_Sample_4_class_1.png +3 -0
- Waterfall/Waterfall_Sample_5_class_1.png +3 -0
- Waterfall/Waterfall_Sample_6_class_1.png +3 -0
- Waterfall/Waterfall_Sample_7_class_1.png +3 -0
- Waterfall/Waterfall_Sample_8_class_1.png +3 -0
- app.py +553 -0
- fitted_scalers/all_scalers.joblib +3 -0
- fitted_scalers/scaler1.joblib +3 -0
- fitted_scalers/scaler2.joblib +3 -0
- fitted_scalers/scaler3.joblib +3 -0
- fitted_scalers/scaler6.joblib +3 -0
- input.xlsx +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
input.xlsx filter=lfs diff=lfs merge=lfs -text
|
37 |
+
Waterfall/Waterfall_Sample_1_class_1.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
Waterfall/Waterfall_Sample_2_class_1.png filter=lfs diff=lfs merge=lfs -text
|
39 |
+
Waterfall/Waterfall_Sample_3_class_1.png filter=lfs diff=lfs merge=lfs -text
|
40 |
+
Waterfall/Waterfall_Sample_4_class_1.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
Waterfall/Waterfall_Sample_5_class_1.png filter=lfs diff=lfs merge=lfs -text
|
42 |
+
Waterfall/Waterfall_Sample_6_class_1.png filter=lfs diff=lfs merge=lfs -text
|
43 |
+
Waterfall/Waterfall_Sample_7_class_1.png filter=lfs diff=lfs merge=lfs -text
|
44 |
+
Waterfall/Waterfall_Sample_8_class_1.png filter=lfs diff=lfs merge=lfs -text
|
AI_logo.png
ADDED
![]() |
R4V6.3_Model.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c18f473899a9ade159f098f50727343e48899ef2d4b3276128aa7ce8f6186915
|
3 |
+
size 11070226
|
README.md
CHANGED
@@ -1,14 +1,20 @@
|
|
1 |
---
|
2 |
-
title: Liquefaction
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: streamlit
|
7 |
-
sdk_version: 1.
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license: apache-2.0
|
11 |
-
short_description: Liquefaction Probability Calculator V1.0
|
12 |
---
|
13 |
|
14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: Liquefaction Probability Calculator V1.0
|
3 |
+
emoji: 🌊
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
sdk: streamlit
|
7 |
+
sdk_version: 1.29.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
|
|
10 |
---
|
11 |
|
12 |
+
# Liquefaction Probability Calculator V1.0
|
13 |
+
|
14 |
+
This application predicts liquefaction probability using a deep learning model that combines LSTM, Transformer, and FFT-based approaches.
|
15 |
+
|
16 |
+
## Description
|
17 |
+
|
18 |
+
The Liquefaction Probability Calculator takes soil, earthquake, and site data as input and predicts the probability of liquefaction occurrence. It also provides SHAP (SHapley Additive exPlanations) analysis to explain the model's predictions.
|
19 |
+
|
20 |
+
## Requirements
|
V10.1_shap_values.npy
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7cc9d4ccaed0c7c414ba8503deaef50e51f8cd30156da294738164252a57ca7e
|
3 |
+
size 12028928
|
Waterfall/Waterfall_Sample_1_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_2_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_3_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_4_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_5_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_6_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_7_class_1.png
ADDED
![]() |
Git LFS Details
|
Waterfall/Waterfall_Sample_8_class_1.png
ADDED
![]() |
Git LFS Details
|
app.py
ADDED
@@ -0,0 +1,553 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# source myenv/bin/activate
|
2 |
+
# deactivate
|
3 |
+
|
4 |
+
|
5 |
+
import streamlit as st
|
6 |
+
import pandas as pd
|
7 |
+
import numpy as np
|
8 |
+
import torch
|
9 |
+
from torch.utils.data import TensorDataset
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import shap
|
12 |
+
import os
|
13 |
+
import torch.nn as nn
|
14 |
+
import math
|
15 |
+
from pytorch_lightning import LightningModule
|
16 |
+
from PIL import Image
|
17 |
+
from joblib import load
|
18 |
+
|
19 |
+
# Display logo
|
20 |
+
logo = Image.open('AI_logo.png')
|
21 |
+
st.image(logo, width=100)
|
22 |
+
|
23 |
+
# Model Components
|
24 |
+
class PositionalEncoding(nn.Module):
|
25 |
+
def __init__(self, d_model, max_len=5000):
|
26 |
+
super(PositionalEncoding, self).__init__()
|
27 |
+
self.dropout = nn.Dropout(p=0.1)
|
28 |
+
pe = torch.zeros(max_len, d_model)
|
29 |
+
position = torch.arange(0, max_len).unsqueeze(1)
|
30 |
+
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
|
31 |
+
pe[:, 0::2] = torch.sin(position * div_term)
|
32 |
+
pe[:, 1::2] = torch.cos(position * div_term)
|
33 |
+
pe = pe.unsqueeze(0).transpose(0, 1)
|
34 |
+
self.register_buffer('pe', pe)
|
35 |
+
|
36 |
+
def forward(self, x):
|
37 |
+
x = x + self.pe[:x.size(0), :]
|
38 |
+
return self.dropout(x)
|
39 |
+
|
40 |
+
class EQ_encoder(nn.Module):
|
41 |
+
def __init__(self):
|
42 |
+
super(EQ_encoder, self).__init__()
|
43 |
+
self.lstm_layer = nn.LSTM(input_size=1, hidden_size=100, num_layers=10, batch_first=True)
|
44 |
+
self.dense1 = nn.Linear(100, 50)
|
45 |
+
self.dense2 = nn.Linear(50, 16)
|
46 |
+
self.relu = nn.ReLU()
|
47 |
+
|
48 |
+
def forward(self, x):
|
49 |
+
output, (hidden_last, cell_last) = self.lstm_layer(x)
|
50 |
+
last_output = hidden_last[-1]
|
51 |
+
x = last_output.reshape(x.size(0), -1)
|
52 |
+
x = self.dense1(x)
|
53 |
+
x = torch.relu(x)
|
54 |
+
x = self.dense2(x)
|
55 |
+
x = torch.relu(x)
|
56 |
+
return x
|
57 |
+
|
58 |
+
class AttentionBlock(nn.Module):
|
59 |
+
def __init__(self, d_model, num_heads, dropout=0.1):
|
60 |
+
super(AttentionBlock, self).__init__()
|
61 |
+
assert d_model % num_heads == 0, "d_model must be divisible by num_heads"
|
62 |
+
self.d_k = d_model // num_heads
|
63 |
+
self.num_heads = num_heads
|
64 |
+
self.w_q = nn.Linear(d_model, d_model)
|
65 |
+
self.w_k = nn.Linear(d_model, d_model)
|
66 |
+
self.w_v = nn.Linear(d_model, d_model)
|
67 |
+
self.w_o = nn.Linear(d_model, d_model)
|
68 |
+
self.dropout = nn.Dropout(dropout)
|
69 |
+
|
70 |
+
def forward(self, query, key, value, mask=None):
|
71 |
+
batch_size = query.size(0)
|
72 |
+
query = self.w_q(query).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
73 |
+
key = self.w_k(key).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
74 |
+
value = self.w_v(value).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
|
75 |
+
scores = torch.matmul(query, key.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.d_k, dtype=torch.float32))
|
76 |
+
if mask is not None:
|
77 |
+
scores = scores.masked_fill(mask == 0, -1e9)
|
78 |
+
attention_weights = torch.softmax(scores, dim=-1)
|
79 |
+
attention_weights = self.dropout(attention_weights)
|
80 |
+
output = torch.matmul(attention_weights, value)
|
81 |
+
output = output.transpose(1, 2).contiguous().view(batch_size, -1, self.num_heads * self.d_k)
|
82 |
+
output = self.w_o(output)
|
83 |
+
return output
|
84 |
+
|
85 |
+
class FFTAttentionReducer(nn.Module):
|
86 |
+
def __init__(self, input_dim, output_dim, num_heads, seq_len_out):
|
87 |
+
super(FFTAttentionReducer, self).__init__()
|
88 |
+
self.positional_encoding = PositionalEncoding(d_model=64)
|
89 |
+
self.embed_dim = 64
|
90 |
+
self.heads = num_heads
|
91 |
+
self.head_dim = self.embed_dim // self.heads
|
92 |
+
assert (self.head_dim * self.heads == self.embed_dim), "Embed dim must be divisible by number of heads"
|
93 |
+
self.input_proj = nn.Linear(2, 64)
|
94 |
+
self.q = nn.Linear(self.embed_dim, self.embed_dim)
|
95 |
+
self.k = nn.Linear(self.embed_dim, self.embed_dim)
|
96 |
+
self.v = nn.Linear(self.embed_dim, self.embed_dim)
|
97 |
+
self.fc_out = nn.Linear(self.embed_dim, self.embed_dim)
|
98 |
+
self.fc1 = nn.Linear(self.embed_dim, output_dim)
|
99 |
+
self.pool = nn.AdaptiveAvgPool1d(seq_len_out)
|
100 |
+
self.norm1 = nn.LayerNorm(self.embed_dim)
|
101 |
+
|
102 |
+
def forward(self, x):
|
103 |
+
x = self.input_proj(x)
|
104 |
+
x = self.positional_encoding(x)
|
105 |
+
batch_size, seq_len, _ = x.shape
|
106 |
+
for _ in range(1):
|
107 |
+
residual = x
|
108 |
+
q = self.q(x).reshape(batch_size, seq_len, self.heads, self.head_dim).permute(0, 2, 1, 3)
|
109 |
+
k = self.k(x).reshape(batch_size, seq_len, self.heads, self.head_dim).permute(0, 2, 1, 3)
|
110 |
+
v = self.v(x).reshape(batch_size, seq_len, self.heads, self.head_dim).permute(0, 2, 1, 3)
|
111 |
+
attention_scores = torch.matmul(q, k.transpose(-2, -1)) / (self.embed_dim ** (1/2))
|
112 |
+
attention_scores = torch.softmax(attention_scores, dim=-1)
|
113 |
+
out = torch.matmul(attention_scores, v)
|
114 |
+
out = out.transpose(1, 2).contiguous().view(batch_size, seq_len, self.embed_dim)
|
115 |
+
x = self.norm1(out + residual)
|
116 |
+
out = self.fc_out(x)
|
117 |
+
out = self.fc1(out)
|
118 |
+
out = out.transpose(1, 2)
|
119 |
+
out = self.pool(out.contiguous())
|
120 |
+
out = out.transpose(1, 2)
|
121 |
+
return out
|
122 |
+
|
123 |
+
class PositionWiseFeedForward(nn.Module):
|
124 |
+
def __init__(self, d_model, d_ff):
|
125 |
+
super(PositionWiseFeedForward, self).__init__()
|
126 |
+
self.fc1 = nn.Linear(d_model, d_ff)
|
127 |
+
self.relu = nn.ReLU()
|
128 |
+
self.tanh = nn.Tanh()
|
129 |
+
self.fc2 = nn.Linear(d_ff, d_model)
|
130 |
+
self.leaky_relu = nn.LeakyReLU(negative_slope=0.01)
|
131 |
+
|
132 |
+
def forward(self, x):
|
133 |
+
return self.fc2(self.leaky_relu(self.fc1(x)))
|
134 |
+
|
135 |
+
class encoder(nn.Module):
|
136 |
+
def __init__(self, dim=2):
|
137 |
+
super(encoder, self).__init__()
|
138 |
+
self.input_proj = nn.Linear(2, 64)
|
139 |
+
self.dim = dim
|
140 |
+
self.attention_layer = nn.MultiheadAttention(embed_dim=64, num_heads=4, dropout=0.1)
|
141 |
+
self.norm1 = nn.LayerNorm(64)
|
142 |
+
self.norm2 = nn.LayerNorm(64)
|
143 |
+
self.dense1 = nn.Linear(40, 16)
|
144 |
+
self.dense2 = nn.Linear(16, 2)
|
145 |
+
self.softmax = nn.Softmax(dim=1)
|
146 |
+
self.model_eq = EQ_encoder()
|
147 |
+
self.positional_encoding = PositionalEncoding(d_model=64)
|
148 |
+
self.feed_forward = PositionWiseFeedForward(d_model=64, d_ff=20)
|
149 |
+
self.atten = AttentionBlock(d_model=64, num_heads=4, dropout=0.1)
|
150 |
+
self.relu = nn.ReLU()
|
151 |
+
self.tanh = nn.Tanh()
|
152 |
+
self.sigmoid = nn.Sigmoid()
|
153 |
+
|
154 |
+
def forward(self, x):
|
155 |
+
x = self.input_proj(x)
|
156 |
+
x = self.positional_encoding(x)
|
157 |
+
for _ in range(1):
|
158 |
+
residual = x
|
159 |
+
x = self.atten(x, x, x)
|
160 |
+
x = self.norm1(x)
|
161 |
+
x = self.feed_forward(x)
|
162 |
+
x = self.norm2(x)
|
163 |
+
x = x + residual
|
164 |
+
return x
|
165 |
+
|
166 |
+
class encoder_LSTM(nn.Module):
|
167 |
+
def __init__(self):
|
168 |
+
super(encoder_LSTM, self).__init__()
|
169 |
+
self.lstm_layer = nn.LSTM(input_size=4, hidden_size=20, num_layers=5, batch_first=True)
|
170 |
+
self.dense1 = nn.Linear(100, 50)
|
171 |
+
self.dense2 = nn.Linear(50, 16)
|
172 |
+
self.softmax = nn.Softmax(dim=1)
|
173 |
+
|
174 |
+
def forward(self, x):
|
175 |
+
output, (hidden_last, cell_last) = self.lstm_layer(x)
|
176 |
+
last_output = hidden_last[-1]
|
177 |
+
x = last_output.reshape(x.size(0), -1)
|
178 |
+
x = self.dense1(x)
|
179 |
+
x = torch.sigmoid(x)
|
180 |
+
x = self.dense2(x)
|
181 |
+
return x
|
182 |
+
|
183 |
+
class com_model(LightningModule):
|
184 |
+
def __init__(self):
|
185 |
+
super(com_model, self).__init__()
|
186 |
+
self.best_val_loss = float('inf')
|
187 |
+
self.best_val_acc = 0
|
188 |
+
self.train_loss_history = []
|
189 |
+
self.train_loss_accuracy = []
|
190 |
+
self.train_accuracy_history = []
|
191 |
+
self.val_loss_history = []
|
192 |
+
self.val_accuracy_history = []
|
193 |
+
|
194 |
+
self.model_eq = EQ_encoder()
|
195 |
+
self.encoder = encoder(dim=6)
|
196 |
+
self.flatten = nn.Flatten()
|
197 |
+
self.modelEQA = FFTAttentionReducer(input_dim=64, output_dim=64, num_heads=2, seq_len_out=10)
|
198 |
+
self.modelEQA2 = FFTAttentionReducer(input_dim=64, output_dim=64, num_heads=2, seq_len_out=10)
|
199 |
+
self.cross_attention_layer = nn.MultiheadAttention(embed_dim=64, num_heads=8)
|
200 |
+
self.encoder_LSTM = encoder_LSTM()
|
201 |
+
self.dense2 = nn.Linear(2*640, 100)
|
202 |
+
self.dense3 = nn.Linear(100, 30)
|
203 |
+
self.dense4 = nn.Linear(34, 2)
|
204 |
+
self.relu = nn.ReLU()
|
205 |
+
self.dropout = torch.nn.Dropout(0.4)
|
206 |
+
self.leaky_relu = nn.LeakyReLU(negative_slope=0.01)
|
207 |
+
self.softmax = nn.Softmax(dim=1)
|
208 |
+
|
209 |
+
def forward(self, x1, x2, x3):
|
210 |
+
int1_x = self.encoder(x1)
|
211 |
+
int2_x = self.modelEQA(x2)
|
212 |
+
concatenated_tensor = torch.cat((int1_x, int2_x), dim=2)
|
213 |
+
x = concatenated_tensor.view(-1, 2*640)
|
214 |
+
x = self.dense2(x)
|
215 |
+
x = self.dropout(x)
|
216 |
+
x = self.dense3(x)
|
217 |
+
x = self.leaky_relu(x)
|
218 |
+
x = torch.cat((x, x3), dim=1)
|
219 |
+
x = self.dense4(x)
|
220 |
+
x = self.leaky_relu(x)
|
221 |
+
out_y = self.softmax(x)
|
222 |
+
return out_y
|
223 |
+
|
224 |
+
def configure_optimizers(self):
|
225 |
+
optimizer = torch.optim.Adam(self.parameters(), lr=1e-4, weight_decay=1e-3)
|
226 |
+
return optimizer
|
227 |
+
|
228 |
+
def create_waterfall_plot(shap_values, n_features, output_index, X, model, base_values, raw_data, sample_name, lique_y, test_data, df_spt=None, df_soil_type=None):
|
229 |
+
"""Create a waterfall plot for SHAP values"""
|
230 |
+
model.eval()
|
231 |
+
with torch.no_grad():
|
232 |
+
x = test_data[X:X+1]
|
233 |
+
split_idx1 = 20
|
234 |
+
split_idx2 = split_idx1 + 10000
|
235 |
+
x1 = x[:, :split_idx1].view(-1, 2, 10).permute(0, 2, 1)
|
236 |
+
x2 = x[:, split_idx1:split_idx2].view(-1, 2, 5000).permute(0, 2, 1)
|
237 |
+
x3 = x[:, split_idx2:]
|
238 |
+
predictions = model(x1, x2, x3)
|
239 |
+
|
240 |
+
# Get the liquefaction probability (1 - no_liquefaction_prob)
|
241 |
+
model_prob = predictions[0, output_index].item()
|
242 |
+
|
243 |
+
base_value = base_values[output_index]
|
244 |
+
sample_shap = shap_values[X, :, output_index].copy() # Make a copy to avoid modifying original
|
245 |
+
|
246 |
+
# Scale SHAP values to match model prediction
|
247 |
+
shap_sum = sample_shap.sum()
|
248 |
+
target_sum = model_prob - base_value
|
249 |
+
if shap_sum != 0: # Avoid division by zero
|
250 |
+
scaling_factor = target_sum / shap_sum
|
251 |
+
sample_shap = sample_shap * scaling_factor
|
252 |
+
|
253 |
+
verification_results = {
|
254 |
+
'base_value': base_value,
|
255 |
+
'model_prediction': model_prob,
|
256 |
+
'shap_sum': sample_shap.sum(),
|
257 |
+
'final_probability': base_value + sample_shap.sum(),
|
258 |
+
'prediction_difference': abs(model_prob - (base_value + sample_shap.sum()))
|
259 |
+
}
|
260 |
+
|
261 |
+
# Process features
|
262 |
+
feature_names = []
|
263 |
+
feature_values = []
|
264 |
+
shap_values_list = []
|
265 |
+
|
266 |
+
# Process SPT and Soil features (first 20)
|
267 |
+
for idx in range(20):
|
268 |
+
if idx < 10:
|
269 |
+
name = f'SPT_{idx+1}'
|
270 |
+
val = df_spt.iloc[X, idx + 1] # +1 because first column is index/name
|
271 |
+
else:
|
272 |
+
name = f'Soil_{idx+1-10}'
|
273 |
+
val = df_soil_type.iloc[X, idx - 9] # -9 to get correct soil type column
|
274 |
+
feature_names.append(name)
|
275 |
+
feature_values.append(float(val))
|
276 |
+
shap_values_list.append(float(sample_shap[idx]))
|
277 |
+
|
278 |
+
# Add combined EQ feature
|
279 |
+
eq_sum = float(np.sum(sample_shap[20:5020]))
|
280 |
+
if abs(eq_sum) > 0:
|
281 |
+
feature_names.append('EQ')
|
282 |
+
feature_values.append(0) # EQ feature is already normalized
|
283 |
+
shap_values_list.append(eq_sum)
|
284 |
+
|
285 |
+
# Add combined Depth feature
|
286 |
+
depth_sum = float(np.sum(sample_shap[5020:10020]))
|
287 |
+
if abs(depth_sum) > 0:
|
288 |
+
feature_names.append('Depth')
|
289 |
+
feature_values.append(df_spt.iloc[X, 17])
|
290 |
+
shap_values_list.append(depth_sum)
|
291 |
+
|
292 |
+
# Add site features
|
293 |
+
feature_names.extend(['WT'])
|
294 |
+
feature_values.append(df_spt.iloc[X, 11])
|
295 |
+
shap_values_list.append(sample_shap[10020])
|
296 |
+
|
297 |
+
feature_names.extend(['Dist_epi'])
|
298 |
+
feature_values.append(df_spt.iloc[X, 12])
|
299 |
+
shap_values_list.append(sample_shap[10021])
|
300 |
+
|
301 |
+
feature_names.extend(['Dist_Water'])
|
302 |
+
feature_values.append(df_spt.iloc[X, 18])
|
303 |
+
shap_values_list.append(sample_shap[10022])
|
304 |
+
|
305 |
+
feature_names.extend(['Vs30'])
|
306 |
+
feature_values.append(df_spt.iloc[X, 19])
|
307 |
+
shap_values_list.append(sample_shap[10023])
|
308 |
+
|
309 |
+
# Convert to numpy arrays for consistent handling
|
310 |
+
abs_values = np.abs(shap_values_list)
|
311 |
+
actual_n_features = len(feature_names)
|
312 |
+
sorted_indices = np.argsort(abs_values)
|
313 |
+
top_indices = sorted_indices[-actual_n_features:].tolist()
|
314 |
+
|
315 |
+
# Create final arrays
|
316 |
+
final_names = []
|
317 |
+
final_values = []
|
318 |
+
final_shap = []
|
319 |
+
|
320 |
+
for i in reversed(top_indices):
|
321 |
+
if 0 <= i < len(feature_names):
|
322 |
+
final_names.append(feature_names[i])
|
323 |
+
final_values.append(feature_values[i])
|
324 |
+
final_shap.append(shap_values_list[i])
|
325 |
+
|
326 |
+
# Create SHAP explanation
|
327 |
+
explainer = shap.Explanation(
|
328 |
+
values=np.array(final_shap),
|
329 |
+
feature_names=final_names,
|
330 |
+
base_values=base_value,
|
331 |
+
data=np.array(final_values)
|
332 |
+
)
|
333 |
+
|
334 |
+
# Create plot
|
335 |
+
plt.clf()
|
336 |
+
plt.close('all')
|
337 |
+
fig = plt.figure(figsize=(12, 16))
|
338 |
+
shap.plots.waterfall(explainer, max_display=len(final_names), show=False)
|
339 |
+
plt.title(
|
340 |
+
f'Sample {X+1}, {sample_name[X][0]} ({lique_y[X][0]})',
|
341 |
+
fontsize=16,
|
342 |
+
pad=20,
|
343 |
+
fontweight='bold'
|
344 |
+
)
|
345 |
+
|
346 |
+
# Save plot
|
347 |
+
os.makedirs('Waterfall', exist_ok=True)
|
348 |
+
waterfall_path = f'Waterfall/Waterfall_Sample_{X+1}_class_{output_index}.png'
|
349 |
+
fig.savefig(waterfall_path, dpi=300, bbox_inches='tight')
|
350 |
+
plt.close()
|
351 |
+
|
352 |
+
return waterfall_path, verification_results
|
353 |
+
|
354 |
+
@st.cache_resource
|
355 |
+
def load_model():
|
356 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
357 |
+
model = com_model()
|
358 |
+
model.load_state_dict(torch.load('R4V6.3_Model.pth', map_location=device))
|
359 |
+
model = model.to(device)
|
360 |
+
model.eval()
|
361 |
+
return model
|
362 |
+
|
363 |
+
def preprocess_fft_eq(data):
|
364 |
+
"""Apply FFT preprocessing to earthquake data"""
|
365 |
+
# Ensure data is float32
|
366 |
+
data = data.astype(np.float32)
|
367 |
+
|
368 |
+
# Reshape to 2D if needed (samples, time_steps)
|
369 |
+
orig_shape = data.shape
|
370 |
+
if len(orig_shape) == 3:
|
371 |
+
data = data.reshape(orig_shape[0], orig_shape[1])
|
372 |
+
|
373 |
+
# Convert to torch tensor
|
374 |
+
data = torch.from_numpy(data).float()
|
375 |
+
|
376 |
+
# Apply FFT
|
377 |
+
fft_result = torch.fft.fft(data, dim=1)
|
378 |
+
|
379 |
+
# Get magnitude spectrum
|
380 |
+
magnitude = torch.abs(fft_result)
|
381 |
+
|
382 |
+
# Normalize
|
383 |
+
magnitude = magnitude / 150
|
384 |
+
|
385 |
+
# Convert back to numpy and reshape to original dimensions
|
386 |
+
magnitude = magnitude.numpy()
|
387 |
+
if len(orig_shape) == 3:
|
388 |
+
magnitude = magnitude.reshape(orig_shape)
|
389 |
+
|
390 |
+
return magnitude
|
391 |
+
|
392 |
+
def preprocess_data(df_spt, df_soil_type, df_EQ_data):
|
393 |
+
# Initialize scalers
|
394 |
+
scalers = load('fitted_scalers/all_scalers.joblib')
|
395 |
+
scaler1 = scalers['scaler1']
|
396 |
+
scaler2 = scalers['scaler2']
|
397 |
+
scaler3 = scalers['scaler3']
|
398 |
+
scaler6 = scalers['scaler6']
|
399 |
+
|
400 |
+
# Convert dataframes to numpy arrays
|
401 |
+
spt = np.array(df_spt)
|
402 |
+
soil_type = np.array(df_soil_type)
|
403 |
+
EQ_dta = np.array(df_EQ_data)
|
404 |
+
|
405 |
+
# Process SPT data
|
406 |
+
data_spt = scaler1.transform(spt[:, 1:11])
|
407 |
+
data_soil_type = soil_type[:, 1:11]/2 # normalize
|
408 |
+
|
409 |
+
# Process feature data
|
410 |
+
feature_n = spt[:, 11:13]
|
411 |
+
feature = scaler2.transform(feature_n)
|
412 |
+
|
413 |
+
# Process water and vs30 data
|
414 |
+
dis_water = spt[:, 18:19]
|
415 |
+
vs_30 = spt[:, 19:20]
|
416 |
+
dis_water = scaler3.transform(dis_water)
|
417 |
+
vs_30r = scaler6.transform(vs_30)
|
418 |
+
|
419 |
+
# Process EQ data
|
420 |
+
EQ_data = EQ_dta[:, 1:5001]
|
421 |
+
EQ_depth_S = spt[:, 17:18]/30
|
422 |
+
|
423 |
+
# Reshape EQ data
|
424 |
+
EQ_data = EQ_data.astype(np.float32)
|
425 |
+
EQ_data = np.reshape(EQ_data, (-1, EQ_data.shape[1], 1))
|
426 |
+
|
427 |
+
EQ_data_fft = preprocess_fft_eq(EQ_data)
|
428 |
+
|
429 |
+
# Create EQ feature
|
430 |
+
EQ_feature = np.zeros((EQ_data_fft.shape[0], EQ_data_fft.shape[1], 2))
|
431 |
+
EQ_feature[:,:,0:1] = EQ_data_fft
|
432 |
+
for i in range(0, (EQ_data.shape[0])):
|
433 |
+
EQ_feature[i,:,1] = EQ_depth_S[i,0]
|
434 |
+
|
435 |
+
# Create soil data
|
436 |
+
soil_data = np.stack([data_spt, data_soil_type], axis=2)
|
437 |
+
X_train_CNN = np.zeros((soil_data.shape[0], soil_data.shape[1], feature.shape[1]))
|
438 |
+
X_train_CNN[:,:,0:2] = soil_data
|
439 |
+
|
440 |
+
# Create feature_sta
|
441 |
+
feature_sta = np.concatenate((feature, dis_water, vs_30r), axis=1)
|
442 |
+
|
443 |
+
return X_train_CNN, EQ_feature, feature_sta
|
444 |
+
|
445 |
+
def main():
|
446 |
+
st.title("Liquefaction Probability Calculator V 1.0")
|
447 |
+
|
448 |
+
# Initialize session state
|
449 |
+
if 'processed' not in st.session_state:
|
450 |
+
st.session_state.processed = False
|
451 |
+
|
452 |
+
# Add example file download
|
453 |
+
with open('input.xlsx', 'rb') as file:
|
454 |
+
st.download_button(
|
455 |
+
label="Download Example Input File",
|
456 |
+
data=file,
|
457 |
+
file_name="example_input.xlsx",
|
458 |
+
mime="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
|
459 |
+
)
|
460 |
+
|
461 |
+
# File upload
|
462 |
+
uploaded_file = st.file_uploader("Upload Excel file", type=['xlsx'])
|
463 |
+
|
464 |
+
if uploaded_file is not None:
|
465 |
+
try:
|
466 |
+
if not st.session_state.processed:
|
467 |
+
# Read the Excel file
|
468 |
+
df_spt = pd.read_excel(uploaded_file, sheet_name='SPT')
|
469 |
+
df_soil_type = pd.read_excel(uploaded_file, sheet_name='soil_type')
|
470 |
+
df_EQ_data = pd.read_excel(uploaded_file, sheet_name='EQ_data')
|
471 |
+
|
472 |
+
st.success("File uploaded successfully!")
|
473 |
+
|
474 |
+
# Add calculate button
|
475 |
+
if st.button("Calculate Liquefaction Probability"):
|
476 |
+
with st.spinner("Processing data and calculating probabilities..."):
|
477 |
+
# Preprocess data
|
478 |
+
X_train_CNN, EQ_feature, feature_sta = preprocess_data(df_spt, df_soil_type, df_EQ_data)
|
479 |
+
|
480 |
+
# Load model
|
481 |
+
model = load_model()
|
482 |
+
|
483 |
+
# Convert to tensors
|
484 |
+
X_train_CNN = torch.FloatTensor(X_train_CNN)
|
485 |
+
EQ_feature = torch.FloatTensor(EQ_feature)
|
486 |
+
feature_sta = torch.FloatTensor(feature_sta)
|
487 |
+
|
488 |
+
# Make prediction
|
489 |
+
with torch.no_grad():
|
490 |
+
predictions = model(X_train_CNN, EQ_feature, feature_sta)
|
491 |
+
|
492 |
+
# Display results
|
493 |
+
st.subheader("Prediction Results")
|
494 |
+
|
495 |
+
# Create a DataFrame for results
|
496 |
+
liquefaction_probs = [pred[1].item() for pred in predictions]
|
497 |
+
results_df = pd.DataFrame({
|
498 |
+
'Liquefaction Probability': liquefaction_probs
|
499 |
+
}, index=range(1, len(predictions) + 1))
|
500 |
+
results_df.index.name = 'Sample'
|
501 |
+
|
502 |
+
# Display results in a table
|
503 |
+
st.dataframe(
|
504 |
+
results_df.style.format({
|
505 |
+
'Liquefaction Probability': '{:.4f}'
|
506 |
+
}),
|
507 |
+
use_container_width=True
|
508 |
+
)
|
509 |
+
|
510 |
+
# Create and display SHAP waterfall plots
|
511 |
+
st.subheader("SHAP Analysis")
|
512 |
+
|
513 |
+
# Load pre-computed SHAP values
|
514 |
+
loaded_shap_values = np.load('V10.1_shap_values.npy')
|
515 |
+
|
516 |
+
for i in range(len(predictions)):
|
517 |
+
with st.expander(f"Sample {i+1}"):
|
518 |
+
# Create waterfall plot
|
519 |
+
waterfall_path, _ = create_waterfall_plot(
|
520 |
+
shap_values=loaded_shap_values,
|
521 |
+
n_features=25,
|
522 |
+
output_index=1,
|
523 |
+
X=i,
|
524 |
+
model=model,
|
525 |
+
base_values=[0.4510177, 0.5489824],
|
526 |
+
raw_data=torch.cat([
|
527 |
+
X_train_CNN.reshape(len(X_train_CNN), 10, 2).transpose(-1, 1).reshape(len(X_train_CNN), -1),
|
528 |
+
EQ_feature.reshape(len(EQ_feature), 5000, 2).transpose(-1, 1).reshape(len(EQ_feature), -1),
|
529 |
+
feature_sta
|
530 |
+
], dim=1),
|
531 |
+
sample_name=df_spt.iloc[:, :1].values,
|
532 |
+
lique_y=df_spt.iloc[:, 16:17].values,
|
533 |
+
test_data=torch.cat([
|
534 |
+
X_train_CNN.reshape(len(X_train_CNN), 10, 2).transpose(-1, 1).reshape(len(X_train_CNN), -1),
|
535 |
+
EQ_feature.reshape(len(EQ_feature), 5000, 2).transpose(-1, 1).reshape(len(EQ_feature), -1),
|
536 |
+
feature_sta
|
537 |
+
], dim=1),
|
538 |
+
df_spt=df_spt,
|
539 |
+
df_soil_type=df_soil_type
|
540 |
+
)
|
541 |
+
|
542 |
+
if os.path.exists(waterfall_path):
|
543 |
+
st.image(waterfall_path)
|
544 |
+
|
545 |
+
st.session_state.processed = True
|
546 |
+
|
547 |
+
except Exception as e:
|
548 |
+
st.error(f"An error occurred: {str(e)}")
|
549 |
+
else:
|
550 |
+
st.session_state.processed = False
|
551 |
+
|
552 |
+
if __name__ == "__main__":
|
553 |
+
main()
|
fitted_scalers/all_scalers.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:066624535ec006d4d2105174aa6ac645fedc8d7aee382fc865b8ceb571cdd701
|
3 |
+
size 1770
|
fitted_scalers/scaler1.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94b481c2276886fa4a037a8ea210cc31c272a143123b608307d6f5f45ffeb706
|
3 |
+
size 855
|
fitted_scalers/scaler2.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:540ef7a64d67cc9f1a01b4083528a2faacb313d3ff1cdf81ebbccc28b8577463
|
3 |
+
size 663
|
fitted_scalers/scaler3.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1082ac439f2b032a9f583f65f328b2a58a901f410b7818683bf9e36a8be5ea7a
|
3 |
+
size 623
|
fitted_scalers/scaler6.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ae824cd093e74d3bda8e5f372e4ad23fe7bc59fa4d8bff6501ca4c8f8d93db67
|
3 |
+
size 623
|
input.xlsx
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:84c4876cd960c81acb46b444de01740d047c672653ddf886a328bef5acd8f8f6
|
3 |
+
size 491391
|