Spaces:
Running
Running
Romain Graux
commited on
Commit
·
0c74c50
1
Parent(s):
c20d7c1
Add logo, instructions and guidelines
Browse files- .gitmodules +1 -1
- app.py +138 -43
- examples/clean_data_example.xlsx +0 -0
- examples/noisy_data_example.xlsx +0 -0
- requirements.txt +3 -1
- res/spock_logo.png +0 -0
- spock +1 -1
.gitmodules
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
[submodule "spock"]
|
2 |
path = spock
|
3 |
-
url = https://github.com/rlaplaza/spock.git
|
|
|
1 |
[submodule "spock"]
|
2 |
path = spock
|
3 |
+
url = https://github.com/rlaplaza/spock.git
|
app.py
CHANGED
@@ -15,6 +15,9 @@ if spock_dir not in sys.path:
|
|
15 |
sys.path.append(spock_dir)
|
16 |
|
17 |
|
|
|
|
|
|
|
18 |
# Check if the dataframe contains a target column
|
19 |
def check_columns(df: pd.DataFrame) -> None:
|
20 |
if not any(["target" in col.lower() for col in df.columns]):
|
@@ -41,6 +44,8 @@ def cached_run_fn(df, wp, verb, imputer_strat, plotmode, seed, prefit, setcbms):
|
|
41 |
setcbms=setcbms,
|
42 |
fig=None,
|
43 |
ax=None,
|
|
|
|
|
44 |
)
|
45 |
return fig, stdout_io.getvalue()
|
46 |
|
@@ -58,11 +63,14 @@ def mock_fn(df, *args, **kwargs):
|
|
58 |
|
59 |
|
60 |
# Load data from the uploaded file
|
61 |
-
def load_data(
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
66 |
|
67 |
|
68 |
# Context manager to capture stdout with a timestamp
|
@@ -87,64 +95,149 @@ def capture_stdout_with_timestamp():
|
|
87 |
|
88 |
@st.experimental_dialog("Import Data")
|
89 |
def import_data():
|
|
|
|
|
|
|
90 |
st.write("Choose a dataset or upload your own file")
|
91 |
|
92 |
option = st.radio("Select an option:", ["Use example dataset", "Upload file"])
|
93 |
|
94 |
if option == "Use example dataset":
|
95 |
examples = {
|
96 |
-
"
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
}
|
|
|
99 |
selected_example = st.selectbox(
|
100 |
-
"Choose an example dataset",
|
|
|
|
|
101 |
)
|
102 |
-
|
103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
st.session_state.df = df
|
|
|
105 |
st.rerun()
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
|
|
|
|
117 |
|
118 |
|
119 |
def main():
|
120 |
-
st.
|
121 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
# Instructions
|
124 |
with st.expander("Instructions", expanded=False):
|
125 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
"""
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
"""
|
133 |
-
|
|
|
|
|
|
|
|
|
|
|
134 |
|
135 |
if "df" not in st.session_state:
|
136 |
-
if st.button("Import Data"):
|
137 |
import_data()
|
138 |
st.stop()
|
139 |
|
140 |
# Display the data
|
141 |
-
st.header("
|
142 |
st.dataframe(st.session_state.df, use_container_width=True)
|
143 |
|
144 |
-
# Option to import new data
|
145 |
-
if st.button("Import New Data"):
|
146 |
-
import_data()
|
147 |
-
|
148 |
# Settings
|
149 |
with st.sidebar:
|
150 |
st.header("Settings")
|
@@ -152,15 +245,15 @@ def main():
|
|
152 |
wp = st.number_input(
|
153 |
"Weighting Power",
|
154 |
min_value=0,
|
155 |
-
value=
|
156 |
-
help="
|
157 |
)
|
158 |
verb = st.number_input(
|
159 |
"Verbosity",
|
160 |
min_value=0,
|
161 |
max_value=7,
|
162 |
-
value=
|
163 |
-
help="
|
164 |
)
|
165 |
|
166 |
imputer_strat_dict = {
|
@@ -191,9 +284,11 @@ def main():
|
|
191 |
prefit = st.toggle("Prefit", value=False)
|
192 |
setcbms = st.toggle("CBMS", value=True)
|
193 |
|
194 |
-
#
|
195 |
-
st.
|
196 |
-
|
|
|
|
|
197 |
with st.spinner("Generating plot..."):
|
198 |
fig, logs = cached_run_fn(
|
199 |
st.session_state.df,
|
|
|
15 |
sys.path.append(spock_dir)
|
16 |
|
17 |
|
18 |
+
ACCEPTED_FILE_EXT = ["csv", "xlsx", "xls", "xlsm", "xlsb", "odf", "ods", "odt"]
|
19 |
+
|
20 |
+
|
21 |
# Check if the dataframe contains a target column
|
22 |
def check_columns(df: pd.DataFrame) -> None:
|
23 |
if not any(["target" in col.lower() for col in df.columns]):
|
|
|
44 |
setcbms=setcbms,
|
45 |
fig=None,
|
46 |
ax=None,
|
47 |
+
save_fig=False,
|
48 |
+
save_csv=False,
|
49 |
)
|
50 |
return fig, stdout_io.getvalue()
|
51 |
|
|
|
63 |
|
64 |
|
65 |
# Load data from the uploaded file
|
66 |
+
def load_data(filepath, filename):
|
67 |
+
if filename.split(".")[-1] not in ACCEPTED_FILE_EXT:
|
68 |
+
raise ValueError(
|
69 |
+
f"Invalid file type. Please upload a file with one of the following extensions: {ACCEPTED_FILE_EXT}"
|
70 |
+
)
|
71 |
+
return (
|
72 |
+
pd.read_csv(filepath) if filename.endswith(".csv") else pd.read_excel(filepath)
|
73 |
+
)
|
74 |
|
75 |
|
76 |
# Context manager to capture stdout with a timestamp
|
|
|
95 |
|
96 |
@st.experimental_dialog("Import Data")
|
97 |
def import_data():
|
98 |
+
filepath = None
|
99 |
+
filename = None
|
100 |
+
|
101 |
st.write("Choose a dataset or upload your own file")
|
102 |
|
103 |
option = st.radio("Select an option:", ["Use example dataset", "Upload file"])
|
104 |
|
105 |
if option == "Use example dataset":
|
106 |
examples = {
|
107 |
+
"Clean dataset": {
|
108 |
+
"filepath": "examples/clean_data_example.xlsx",
|
109 |
+
"description": "The clean dataset is a reference dataset that includes 1 target variable and 2 descriptors. This is a typical example, where the goal of the model is to find a single descriptor or a combined descriptor (mathematical function of descriptor 1 and 2) that gives volcano like correlation with the target variable.",
|
110 |
+
},
|
111 |
+
"Noisy dataset": {
|
112 |
+
"filepath": "examples/noisy_data_example.xlsx",
|
113 |
+
"description": "The noisy dataset is a reference dataset that includes 1 target variable and 1 descriptor. This is a specific example where the kinetic data was compiled from duplicate or triplicate experiments, and the performance metric (target variable) is represented by the average value and standard deviation. In such instances, a single experimental is populated over three rows, such the first, second, and third row contains information on the upper bound, mean, and lower bound data, respectively, of the performance metric. The descriptor is values corresponding to these observations remain the same. The model fits through the data and generates a volcano-like trend.",
|
114 |
+
},
|
115 |
}
|
116 |
+
|
117 |
selected_example = st.selectbox(
|
118 |
+
"Choose an example dataset",
|
119 |
+
list(examples.keys()),
|
120 |
+
# format_func=lambda x: examples[x]["description"],
|
121 |
)
|
122 |
+
|
123 |
+
st.info(examples[selected_example]["description"])
|
124 |
+
|
125 |
+
if st.button("Load Example", use_container_width=True):
|
126 |
+
filepath = examples[selected_example]["filepath"]
|
127 |
+
filename = filepath.split("/")[-1]
|
128 |
+
else:
|
129 |
+
file = st.file_uploader("Upload a file", type=ACCEPTED_FILE_EXT)
|
130 |
+
if file is not None:
|
131 |
+
filepath = file
|
132 |
+
filename = file.name
|
133 |
+
|
134 |
+
if filepath is not None and filename is not None:
|
135 |
+
try:
|
136 |
+
df = load_data(filepath, filename)
|
137 |
st.session_state.df = df
|
138 |
+
st.session_state.filename = filename
|
139 |
st.rerun()
|
140 |
+
except Exception as e:
|
141 |
+
st.error(f"Error loading file: {e}")
|
142 |
+
|
143 |
+
|
144 |
+
@st.experimental_dialog("Guidelines", width="large")
|
145 |
+
def guidelines():
|
146 |
+
st.write(
|
147 |
+
"""
|
148 |
+
To effectively populate an Excel sheet (.xlsx or .csv format) to upload on the SPOCK web-app, we recommend the following practices to ensure the data is curated and organized properly. Begin by placing the name of the catalyst in the first column, followed by performance metric (target variable) in the second column. The header of the second column must be labeled as Target Tr and ensure this column does not have any missing or erroneous entries. Next, place each descriptor of interest (input features) in the adjacent columns, one variable per column (see the example provided: “clean_data_example”). Label each column clearly and ensure all cells are filled correctly without missing values or placeholders like "NAN" or "NA". All variables, including the performance metric and descriptors, must be numerical values and follow consistent formatting styles and decimal points. Double-check for outliers or anomalies that could skew model training and remove or correct these entries if necessary.
|
149 |
+
|
150 |
+
In cases where the kinetic data was compiled from duplicate or triplicate experiments, the performance metric will be represented by the average value and standard deviation. In such instances, a single experimental observation needs to be populated over three rows, such the first, second, and third row contains information on the upper bound, mean, and lower bound data, respectively, of the performance metric. The descriptor values corresponding to these observations remain the same (see the example provided: “noisy_data_example”). Before proceeding with model training, validate the calculations for the mean and standard deviations by cross-checking with the original raw data and using statistical formulas to ensure accuracy. Document all essential transformations or preprocessing steps in a separate document linked to the main sheet. This documentation helps ensure transparency and reproducibility in subsequent steps of the project. Maintain version control to track changes and updates to the dataset, ensuring long term reproducibility of results.
|
151 |
+
"""
|
152 |
+
)
|
153 |
|
154 |
|
155 |
def main():
|
156 |
+
st.set_page_config(
|
157 |
+
page_title="Navicat Spock",
|
158 |
+
page_icon="🌋",
|
159 |
+
initial_sidebar_state="expanded",
|
160 |
+
)
|
161 |
+
_, center, _ = st.columns(spec=[0.2, 0.6, 0.2])
|
162 |
+
center.image("res/spock_logo.png")
|
163 |
+
st.subheader("Generate volcano plots from catalytic data")
|
164 |
|
165 |
# Instructions
|
166 |
with st.expander("Instructions", expanded=False):
|
167 |
+
known_tab, unknown_tab = st.tabs(["Descriptor Known", "Descriptor Unknown"])
|
168 |
+
|
169 |
+
with known_tab:
|
170 |
+
st.markdown(
|
171 |
+
"""
|
172 |
+
### When the Descriptor is Known
|
173 |
+
|
174 |
+
1. **Prepare Your Data**
|
175 |
+
- Organize data in a tabular format
|
176 |
+
- Column 1: Catalyst name
|
177 |
+
- Column 2: Performance metric
|
178 |
+
- Column 3: Descriptor
|
179 |
+
- Label columns according to guidelines
|
180 |
+
|
181 |
+
2. **Import Data**
|
182 |
+
- Click "Import Data" to upload your Excel or CSV file
|
183 |
+
|
184 |
+
3. **Review and Adjust**
|
185 |
+
- Check your data in the displayed table
|
186 |
+
- Modify plot settings in the sidebar if needed
|
187 |
+
|
188 |
+
4. **Generate and Analyze**
|
189 |
+
- Click "Generate plot"
|
190 |
+
- Examine the plot and logs in their respective tabs
|
191 |
+
|
192 |
+
5. **Refine Results**
|
193 |
+
- Adjust the weighting power parameter
|
194 |
+
- Repeat steps 4-5 until you achieve satisfactory results
|
195 |
"""
|
196 |
+
)
|
197 |
+
|
198 |
+
with unknown_tab:
|
199 |
+
st.markdown(
|
200 |
+
"""
|
201 |
+
### When the Descriptor is Unknown
|
202 |
+
|
203 |
+
1. **Prepare Your Data**
|
204 |
+
- Organize data in a tabular format
|
205 |
+
- Column 1: Catalyst name
|
206 |
+
- Column 2: Performance metric
|
207 |
+
- Columns 3+: Potential descriptors
|
208 |
+
- Label columns according to guidelines
|
209 |
+
|
210 |
+
2. **Import Data**
|
211 |
+
- Click "Import Data" to upload your Excel or CSV file
|
212 |
+
|
213 |
+
3. **Review and Adjust**
|
214 |
+
- Check your data in the displayed table
|
215 |
+
- Modify plot settings in the sidebar if needed
|
216 |
+
|
217 |
+
4. **Generate and Analyze**
|
218 |
+
- Click "Generate plot"
|
219 |
+
- Examine the plot and logs in their respective tabs
|
220 |
+
|
221 |
+
5. **Refine Results**
|
222 |
+
- Adjust the weighting power parameter
|
223 |
+
- Repeat steps 4-5 until you achieve satisfactory results
|
224 |
"""
|
225 |
+
)
|
226 |
+
|
227 |
+
if st.button(
|
228 |
+
"Click here for more information/guidelines", use_container_width=True
|
229 |
+
):
|
230 |
+
guidelines()
|
231 |
|
232 |
if "df" not in st.session_state:
|
233 |
+
if st.button("Import Data", type="primary", use_container_width=True):
|
234 |
import_data()
|
235 |
st.stop()
|
236 |
|
237 |
# Display the data
|
238 |
+
st.header(f"Dataset : {st.session_state.filename}")
|
239 |
st.dataframe(st.session_state.df, use_container_width=True)
|
240 |
|
|
|
|
|
|
|
|
|
241 |
# Settings
|
242 |
with st.sidebar:
|
243 |
st.header("Settings")
|
|
|
245 |
wp = st.number_input(
|
246 |
"Weighting Power",
|
247 |
min_value=0,
|
248 |
+
value=1,
|
249 |
+
help="The weighting power is the tuning parameter to fit the line segments on the data. Default value is set to 1. We recommend to vary this value between 0-3 for desired results.",
|
250 |
)
|
251 |
verb = st.number_input(
|
252 |
"Verbosity",
|
253 |
min_value=0,
|
254 |
max_value=7,
|
255 |
+
value=3,
|
256 |
+
help="This parameter is used to generate reports based on the outcome of the mode liftting. Default value is set to 3. We recommend to vary this value between 2-5 for desired level of report (log) generation.",
|
257 |
)
|
258 |
|
259 |
imputer_strat_dict = {
|
|
|
284 |
prefit = st.toggle("Prefit", value=False)
|
285 |
setcbms = st.toggle("CBMS", value=True)
|
286 |
|
287 |
+
# Option to import new data
|
288 |
+
if st.button("Import New Data", type="secondary", use_container_width=True):
|
289 |
+
import_data()
|
290 |
+
|
291 |
+
if st.button("Generate plot", type="primary", use_container_width=True):
|
292 |
with st.spinner("Generating plot..."):
|
293 |
fig, logs = cached_run_fn(
|
294 |
st.session_state.df,
|
examples/clean_data_example.xlsx
ADDED
Binary file (10.8 kB). View file
|
|
examples/noisy_data_example.xlsx
ADDED
Binary file (10.6 kB). View file
|
|
requirements.txt
CHANGED
@@ -1 +1,3 @@
|
|
1 |
-
streamlit==1.36.0
|
|
|
|
|
|
1 |
+
streamlit==1.36.0
|
2 |
+
openpyxl==3.1.5
|
3 |
+
pandas==2.2.2
|
res/spock_logo.png
ADDED
![]() |
spock
CHANGED
@@ -1 +1 @@
|
|
1 |
-
Subproject commit
|
|
|
1 |
+
Subproject commit c41043c35bdcc110e309842eb8976495cb386972
|