baakaani commited on
Commit
2c8af4e
·
1 Parent(s): 7eea572

adding download button and fixing bugs

Browse files
Files changed (1) hide show
  1. utils/config_fabric.py +27 -2
utils/config_fabric.py CHANGED
@@ -11,6 +11,8 @@ import streamlit as st
11
  import subprocess
12
  import time
13
  import shutil
 
 
14
 
15
  st.set_page_config(layout='wide')
16
  INPUT_XES="output/inputlog_temp.xes"
@@ -272,6 +274,12 @@ def set_generator_experiments(generator_params):
272
 
273
  return generator_params
274
 
 
 
 
 
 
 
275
  if __name__ == '__main__':
276
  play_header()
277
 
@@ -347,11 +355,15 @@ if __name__ == '__main__':
347
 
348
  # Prepare output path for feature extraction
349
  directory = Path(step_config['output_path']).parts
350
- path = os.path.join(directory[0], 'features', *directory[1:])
 
351
 
352
  # Clean existing output path if it exists
353
  if os.path.exists(path):
354
  shutil.rmtree(path)
 
 
 
355
 
356
  # Simulate running the command with a loop and update progress
357
  with st.spinner("Generating logs.."):
@@ -364,14 +376,27 @@ if __name__ == '__main__':
364
  file_paths = [os.path.join(root, file)
365
  for root, _, files in os.walk(path)
366
  for file in files]
 
 
 
 
 
367
 
 
 
 
 
 
 
 
368
  # Read and concatenate all JSON files into a DataFrame
369
  dataframes = pd.concat([pd.read_json(file, lines=True) for file in file_paths], ignore_index=True)
370
 
371
  # Reorder columns with 'target_similarity' as the last column
372
  columns = [col for col in dataframes.columns if col != 'target_similarity'] + ['target_similarity']
373
  dataframes = dataframes[columns]
374
-
 
375
  # Set 'log' as the index
376
  dataframes.set_index('log', inplace=True)
377
 
 
11
  import subprocess
12
  import time
13
  import shutil
14
+ import zipfile
15
+ import io
16
 
17
  st.set_page_config(layout='wide')
18
  INPUT_XES="output/inputlog_temp.xes"
 
274
 
275
  return generator_params
276
 
277
+ def sort_key(val):
278
+ parts = val.split('_')
279
+ # Extract and convert the numeric parts
280
+ part1 = int(parts[0][5:]) # e.g., from 'genEL1', extract '1'
281
+ return (part1)
282
+
283
  if __name__ == '__main__':
284
  play_header()
285
 
 
355
 
356
  # Prepare output path for feature extraction
357
  directory = Path(step_config['output_path']).parts
358
+ path = os.path.join(directory[0], 'features', *directory[1:]) # for feature storage
359
+ path_to_logs = os.path.join(*directory[:]) # for log storage
360
 
361
  # Clean existing output path if it exists
362
  if os.path.exists(path):
363
  shutil.rmtree(path)
364
+
365
+ if os.path.exists(path_to_logs):
366
+ shutil.rmtree(path_to_logs)
367
 
368
  # Simulate running the command with a loop and update progress
369
  with st.spinner("Generating logs.."):
 
376
  file_paths = [os.path.join(root, file)
377
  for root, _, files in os.walk(path)
378
  for file in files]
379
+
380
+ # Download the generated logs as a ZIP file
381
+ download_file_paths = [os.path.join(root, file)
382
+ for root, _, files in os.walk(path_to_logs)
383
+ for file in files]
384
 
385
+ zip_buffer = io.BytesIO()
386
+ with zipfile.ZipFile(zip_buffer, 'w') as zip_file:
387
+ for file in download_file_paths:
388
+ zip_file.write(file, os.path.basename(file))
389
+ zip_buffer.seek(0)
390
+ st.download_button(label="Download generated logs", data=zip_buffer, file_name='generated_logs.zip', mime='application/zip')
391
+
392
  # Read and concatenate all JSON files into a DataFrame
393
  dataframes = pd.concat([pd.read_json(file, lines=True) for file in file_paths], ignore_index=True)
394
 
395
  # Reorder columns with 'target_similarity' as the last column
396
  columns = [col for col in dataframes.columns if col != 'target_similarity'] + ['target_similarity']
397
  dataframes = dataframes[columns]
398
+ dataframes = dataframes.sort_values(by='log', key=lambda col: col.map(sort_key))
399
+
400
  # Set 'log' as the index
401
  dataframes.set_index('log', inplace=True)
402