arcan3 commited on
Commit
d77c82e
·
1 Parent(s): f9e67d5

added revisions

Browse files
Files changed (3) hide show
  1. app.py +55 -9
  2. funcs/plot_func.py +3 -3
  3. funcs/som.py +61 -11
app.py CHANGED
@@ -93,13 +93,13 @@ def process_som_data(data, prediction):
93
 
94
  return processed_data
95
 
96
- # ml inference
97
- def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
98
 
99
  try:
100
- train_x, train_y = read_json_files(file)
101
  except:
102
- train_x, train_y = read_json_files(file.name)
103
 
104
  # Convert tensors to numpy arrays if necessary
105
  if isinstance(train_x, torch.Tensor):
@@ -126,6 +126,44 @@ def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
126
 
127
  os.system('curl -X POST -F "csv_file=@animation_table.csv" https://metric-space.ngrok.io/generate --output animation.mp4')
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  # prediction = cluster_som.predict(embedding10d)
130
  fig = cluster.plot_activation_v2(embedding10d, slice_select)
131
 
@@ -157,6 +195,10 @@ with gr.Blocks(title='Cabasus') as cabasus_sensor:
157
  processed_file_box = gr.File(label='Processed CSV File')
158
  json_file_box = gr.File(label='Generated Json file')
159
 
 
 
 
 
160
  plot_box_leg = gr.Plot(label="Filtered Signal Plot")
161
  slice_slider = gr.Slider(minimum=1, maximum=300, label='Slice select', step=1)
162
 
@@ -187,14 +229,12 @@ with gr.Blocks(title='Cabasus') as cabasus_sensor:
187
  button_label_Add = gr.Button('attach label')
188
  slice_json_label_box = gr.File(label='Slice json labelled file')
189
 
190
- with gr.Row():
191
- animation = gr.Video(label='animation')
192
- real_video = gr.Video(label='real')
193
 
194
  slices_per_leg = gr.Textbox(label="Debug information")
195
 
196
- csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
197
- outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box])
198
  leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
199
  outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
200
  repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
@@ -203,6 +243,12 @@ with gr.Blocks(title='Cabasus') as cabasus_sensor:
203
  outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
204
 
205
  som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures, animation])
 
 
 
 
 
 
206
  button_label_Add.click(attach_label_to_json, inputs=[slice_json_box, label_name], outputs=[slice_json_label_box])
207
 
208
  cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
 
93
 
94
  return processed_data
95
 
96
+ def get_som_mp4_v2(csv_file_box, slice_size_slider, sample_rate, window_size_slider, reducer=reducer10d, cluster=cluster_som):
97
+ processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box = process_data(csv_file_box, slice_size_slider, sample_rate, window_size_slider)
98
 
99
  try:
100
+ train_x, train_y = read_json_files(json_file_box)
101
  except:
102
+ train_x, train_y = read_json_files(json_file_box.name)
103
 
104
  # Convert tensors to numpy arrays if necessary
105
  if isinstance(train_x, torch.Tensor):
 
126
 
127
  os.system('curl -X POST -F "csv_file=@animation_table.csv" https://metric-space.ngrok.io/generate --output animation.mp4')
128
 
129
+ # prediction = cluster_som.predict(embedding10d)
130
+ som_video = cluster.plot_activation(embedding10d)
131
+ som_video.write_videofile('som_sequence.mp4')
132
+
133
+ return processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box, 'som_sequence.mp4', 'animation.mp4'
134
+
135
+ # ml inference
136
+ def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
137
+ try:
138
+ train_x, train_y = read_json_files(file)
139
+ except:
140
+ train_x, train_y = read_json_files(file.name)
141
+
142
+ # Convert tensors to numpy arrays if necessary
143
+ if isinstance(train_x, torch.Tensor):
144
+ train_x = train_x.numpy()
145
+ if isinstance(train_y, torch.Tensor):
146
+ train_y = train_y.numpy()
147
+
148
+ # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
149
+ data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)
150
+
151
+ #compute the 10 dimensional embeding vector
152
+ embedding10d = reducer.transform(data)
153
+
154
+ # retrieve the prediction and get the animation
155
+ prediction = cluster_som.predict(embedding10d)
156
+ processed_data = process_som_data(data,prediction)
157
+
158
+ # Write the processed data to a CSV file
159
+ # header = ['Gait', 'TS', 'State', 'Condition', 'Shape1', 'Shape2', 'Shape3', 'Shape4', 'Color1', 'Color2', 'Color3', 'Color4', 'Danger1', 'Danger2', 'Danger3', 'Danger4']
160
+ # with open('animation_table.csv', 'w', newline='') as csvfile:
161
+ # csv_writer = csv.writer(csvfile)
162
+ # csv_writer.writerow(header)
163
+ # csv_writer.writerows(processed_data)
164
+
165
+ # os.system('curl -X POST -F "csv_file=@animation_table.csv" https://metric-space.ngrok.io/generate --output animation.mp4')
166
+
167
  # prediction = cluster_som.predict(embedding10d)
168
  fig = cluster.plot_activation_v2(embedding10d, slice_select)
169
 
 
195
  processed_file_box = gr.File(label='Processed CSV File')
196
  json_file_box = gr.File(label='Generated Json file')
197
 
198
+ with gr.Row():
199
+ animation = gr.Video(label='animation')
200
+ activation_video = gr.Video(label='real')
201
+
202
  plot_box_leg = gr.Plot(label="Filtered Signal Plot")
203
  slice_slider = gr.Slider(minimum=1, maximum=300, label='Slice select', step=1)
204
 
 
229
  button_label_Add = gr.Button('attach label')
230
  slice_json_label_box = gr.File(label='Slice json labelled file')
231
 
232
+
 
 
233
 
234
  slices_per_leg = gr.Textbox(label="Debug information")
235
 
236
+ # csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
237
+ # outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box])
238
  leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider],
239
  outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
240
  repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
 
243
  outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
244
 
245
  som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures, animation])
246
+
247
+ #redoing the whole calculation with the file loading
248
+ csv_file_box.change(get_som_mp4_v2, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider],
249
+ outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box,
250
+ activation_video, animation])
251
+
252
  button_label_Add.click(attach_label_to_json, inputs=[slice_json_box, label_name], outputs=[slice_json_label_box])
253
 
254
  cabasus_sensor.queue(concurrency_count=2).launch(debug=True)
funcs/plot_func.py CHANGED
@@ -44,9 +44,9 @@ def plot_sensor_data_from_json(json_file, sensor, slice_select=1):
44
  ax = plt.plot(data['time'].to_list(), data[sensor].to_list(), '-b')
45
 
46
  df_temp = data[data['slice selection'] == int(slice_select)].reset_index()
47
- y = [np.NaN]*((int(slice_select)-1)*len(df_temp[sensor].to_list())) + df_temp[sensor].to_list() + [np.NaN]*((len(slices) - int(slice_select))*len(df_temp[sensor].to_list()))
48
- x = data['time'].to_list()
49
- ax = plt.plot(x, y, '-')
50
 
51
  plt.xlabel("Timestamp")
52
  plt.ylabel(sensor)
 
44
  ax = plt.plot(data['time'].to_list(), data[sensor].to_list(), '-b')
45
 
46
  df_temp = data[data['slice selection'] == int(slice_select)].reset_index()
47
+ # y = [np.NaN]*((int(slice_select)-1)*len(df_temp[sensor].to_list())) + df_temp[sensor].to_list() + [np.NaN]*((len(slices) - int(slice_select))*len(df_temp[sensor].to_list()))
48
+ # x = data['time'].to_list()
49
+ # ax = plt.plot(x, y, '-')
50
 
51
  plt.xlabel("Timestamp")
52
  plt.ylabel(sensor)
funcs/som.py CHANGED
@@ -15,7 +15,7 @@ from sklearn.datasets import make_blobs
15
  from sklearn.preprocessing import LabelEncoder
16
  from sklearn.cluster import KMeans
17
  from sklearn.semi_supervised import LabelSpreading
18
- from moviepy.editor import *
19
 
20
  class ClusterSOM:
21
  def __init__(self):
@@ -390,20 +390,26 @@ class ClusterSOM:
390
  img = imageio.imread(buf)
391
  images.append(img)
392
  plt.close()
 
 
 
 
 
 
393
 
394
- # Save the images as a GIF
395
- imageio.mimsave(f"{filename}.gif", images, duration=500, loop=1)
396
 
397
- # Load the gif
398
- gif_file = f"{filename}.gif" # Replace with the path to your GIF file
399
- clip = VideoFileClip(gif_file)
400
 
401
- # Convert the gif to mp4
402
- mp4_file = f"{filename}.mp4" # Replace with the desired output path
403
- clip.write_videofile(mp4_file, codec='libx264')
404
 
405
- # Close the clip to release resources
406
- clip.close()
407
 
408
  def save(self, file_path):
409
  """
@@ -470,4 +476,48 @@ class ClusterSOM:
470
 
471
  plt.tight_layout()
472
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
473
  return fig
 
15
  from sklearn.preprocessing import LabelEncoder
16
  from sklearn.cluster import KMeans
17
  from sklearn.semi_supervised import LabelSpreading
18
+ from moviepy.editor import ImageSequenceClip
19
 
20
  class ClusterSOM:
21
  def __init__(self):
 
390
  img = imageio.imread(buf)
391
  images.append(img)
392
  plt.close()
393
+
394
+ # Create the video using moviepy and save it as a mp4 file
395
+ video = ImageSequenceClip(images, fps=1)
396
+
397
+ return video
398
+
399
 
400
+ # # Save the images as a GIF
401
+ # imageio.mimsave(f"{filename}.gif", images, duration=500, loop=1)
402
 
403
+ # # Load the gif
404
+ # gif_file = f"{filename}.gif" # Replace with the path to your GIF file
405
+ # clip = VideoFileClip(gif_file)
406
 
407
+ # # Convert the gif to mp4
408
+ # mp4_file = f"{filename}.mp4" # Replace with the desired output path
409
+ # clip.write_videofile(mp4_file, codec='libx264')
410
 
411
+ # # Close the clip to release resources
412
+ # clip.close()
413
 
414
  def save(self, file_path):
415
  """
 
476
 
477
  plt.tight_layout()
478
 
479
+ return fig
480
+
481
+ def plot_activation_v3(self, data, slice_select):
482
+ """
483
+ Generate a GIF visualization of the prediction output using the activation maps of individual SOMs.
484
+ """
485
+ if len(self.som_models) == 0:
486
+ raise ValueError("SOM models not trained yet.")
487
+
488
+ try:
489
+ prediction = self.predict([data[int(slice_select)-1]])[0]
490
+ except:
491
+ prediction = self.predict([data[int(slice_select)-2]])[0]
492
+
493
+ fig, axes = plt.subplots(1, len(self.som_models), figsize=(20, 5), sharex=True, sharey=True)
494
+ fig.suptitle(f"Activation map for SOM {prediction[0]}, node {prediction[1]}", fontsize=16)
495
+
496
+ for idx, (som_key, som) in enumerate(self.som_models.items()):
497
+ ax = axes[idx]
498
+ activation_map = np.zeros(som._weights.shape[:2])
499
+ for x in range(som._weights.shape[0]):
500
+ for y in range(som._weights.shape[1]):
501
+ activation_map[x, y] = np.linalg.norm(data[int(slice_select)-1] - som._weights[x, y])
502
+
503
+ winner = som.winner(data[int(slice_select)-1]) # Find the BMU for this SOM
504
+ activation_map[winner] = 0 # Set the BMU's value to 0 so it will be red in the colormap
505
+
506
+ if som_key == prediction[0]: # Active SOM
507
+ im_active = ax.imshow(activation_map, cmap='viridis', origin='lower', interpolation='none')
508
+ ax.plot(winner[1], winner[0], 'r+') # Mark the BMU with a red plus sign
509
+ ax.set_title(f"SOM {som_key}", color='blue', fontweight='bold')
510
+ if hasattr(self, 'label_centroids'):
511
+ label_idx = self.label_encodings.inverse_transform([som_key - 1])[0]
512
+ ax.set_xlabel(f"Label: {label_idx}", fontsize=12)
513
+ else: # Inactive SOM
514
+ im_inactive = ax.imshow(activation_map, cmap='gray', origin='lower', interpolation='none')
515
+ ax.set_title(f"SOM {som_key}")
516
+
517
+ ax.set_xticks(range(activation_map.shape[1]))
518
+ ax.set_yticks(range(activation_map.shape[0]))
519
+ ax.grid(True, linestyle='-', linewidth=0.5)
520
+
521
+ plt.tight_layout()
522
+
523
  return fig