File size: 11,375 Bytes
f9e67d5
 
 
 
247dc37
f9e67d5
a5bd089
 
247dc37
 
611742b
247dc37
a5bd089
 
247dc37
 
 
 
7a69981
247dc37
 
7a69981
247dc37
f9e67d5
 
7a69981
 
 
 
 
 
 
f9e67d5
 
 
7a69981
 
 
 
 
 
 
 
 
 
 
 
 
f9e67d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d77c82e
 
247dc37
 
7a69981
 
d77c82e
247dc37
7a69981
 
d77c82e
247dc37
 
 
 
 
 
 
 
 
 
 
 
 
f9e67d5
 
 
 
 
 
 
 
 
 
 
7a69981
f9e67d5
d77c82e
 
 
7a69981
 
 
d77c82e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247dc37
 
0231841
a5bd089
611742b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5bd089
 
 
 
247dc37
 
 
 
46fcc2f
d77c82e
 
7a69981
 
 
 
 
d77c82e
73e4be8
d377ed0
 
 
 
73e4be8
a5bd089
247dc37
 
63e4f49
247dc37
 
d377ed0
a5bd089
 
5124a31
a5bd089
73e4be8
a5bd089
 
46fcc2f
73e4be8
d377ed0
46fcc2f
611742b
 
 
 
 
 
d77c82e
46fcc2f
7a91765
03507e5
d77c82e
 
247dc37
df766c6
247dc37
611742b
247dc37
df766c6
247dc37
0231841
d77c82e
 
 
 
 
 
611742b
a5bd089
03507e5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263

import os
import csv
import json
import torch
import numpy as np
import gradio as gr

from phate import PHATEAE
from funcs.som import ClusterSOM
from funcs.tools import numpy_to_native

from funcs.processor import process_data
from funcs.plot_func import plot_sensor_data_from_json
from funcs.dataloader import BaseDataset2, read_json_files

DEVICE = torch.device("cpu")
reducer10d = PHATEAE(epochs=30, n_components=10, lr=.0001, batch_size=128, t='auto', knn=8, relax=True, metric='euclidean')
reducer10d.load('models/r10d_6.pth')

cluster_som = ClusterSOM()
cluster_som.load("models/cluster_som6.pkl")

def map_som2animation(som_value):
    mapping = {
                2: 0,  # walk
                1: 1,  # trot
                3: 2,  # gallop
                5: 3,  # idle
                4: 3,  # other
                -1:3,   #other
            }
    
    return mapping.get(som_value, None)

# def map_som2animation_v2(som_value):
#     mapping = {
#                 versammelter_trab: center of SOM-1,
#                 arbeits-trab: south-east od SOM-1,
#                 mittels-trab: North of SOM-1,
#                 starker-trab: North-west of SOM1,

#                 starker-schritt: 

#             }
    
#     return mapping.get(som_value, None)

def deviation_scores(tensor_data, scale=50):
    if len(tensor_data) < 5:
        raise ValueError("The input tensor must have at least 5 elements.")
    
    # Extract the side values and reference value from the input tensor
    side_values = tensor_data[-5:-1].numpy()
    reference_value = tensor_data[-1].item()

    # Calculate the absolute differences between the side values and the reference
    absolute_differences = np.abs(side_values - reference_value)
    
    # Check for zero division
    if np.sum(absolute_differences) == 0:
        # All side values are equal to the reference, so their deviation scores are 0
        return int(reference_value/20*32768), [0, 0, 0, 0]

    # Calculate the deviation scores for each side value
    scores = absolute_differences * scale
    
    # Clip the scores between 0 and 1
    clipped_scores = np.clip(scores, 0, 1)

    return int(reference_value/20*32768), clipped_scores.tolist()

def process_som_data(data, prediction):
    processed_data = []

    for i in range(0, len(data)):
        TS, scores_list = deviation_scores(data[i][0])

        # If TS is missing (None), interpolate it using surrounding values
        if TS is None:
            if i > 0 and i < len(data) - 1:
                prev_TS = processed_data[-1][1]
                next_TS = deviation_scores(data[i + 1][0])[0]
                TS = (prev_TS + next_TS) // 2
            elif i > 0:
                TS = processed_data[-1][1]  # Use the previous TS value
            else:
                TS = 0  # Default to 0 if no surrounding values are available


        # Set Gait, State, and Condition

        #0-walk 1-trot 2-gallop 3-idle
        gait = map_som2animation(prediction[0][0])
        state = 0
        condition = 0

        # Calculate Shape, Color, and Danger values
        shape_values = scores_list
        color_values = scores_list
        danger_values = [1 if score == 1 else 0 for score in scores_list]

        # Create a row with the required format
        row = [gait, TS, state, condition] + shape_values + color_values + danger_values
        processed_data.append(row)

    return processed_data

def get_som_mp4_v2(csv_file_box, slice_size_slider, sample_rate, window_size_slider, reducer=reducer10d, cluster=cluster_som):
    processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box = process_data(csv_file_box, slice_size_slider, sample_rate, window_size_slider)

    try:
        if json_file_box is None:
            return processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box, None, None
        train_x, train_y  = read_json_files(json_file_box)
    except:
        if json_file_box.name is None:
            return processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box, None, None
        train_x, train_y  = read_json_files(json_file_box.name)

    # Convert tensors to numpy arrays if necessary
    if isinstance(train_x, torch.Tensor):
        train_x = train_x.numpy()
    if isinstance(train_y, torch.Tensor):
        train_y = train_y.numpy()

    # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
    data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)

    #compute the 10 dimensional embeding vector
    embedding10d = reducer.transform(data)

    # retrieve the prediction and get the animation
    prediction = cluster_som.predict(embedding10d)
    processed_data = process_som_data(data,prediction)

    # Write the processed data to a CSV file
    header = ['Gait', 'TS', 'State', 'Condition', 'Shape1', 'Shape2', 'Shape3', 'Shape4', 'Color1', 'Color2', 'Color3', 'Color4', 'Danger1', 'Danger2', 'Danger3', 'Danger4']
    with open('animation_table.csv', 'w', newline='') as csvfile:
        csv_writer = csv.writer(csvfile)
        csv_writer.writerow(header)
        csv_writer.writerows(processed_data)
    
    # os.system('curl -X POST -F "csv_file=@animation_table.csv" https://metric-space.ngrok.io/generate --output animation.mp4')

    # prediction = cluster_som.predict(embedding10d)
    som_video = cluster.plot_activation(embedding10d)
    som_video.write_videofile('som_sequence.mp4')
        
    # return processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box, 'som_sequence.mp4', 'animation.mp4'
    return processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box, 'som_sequence.mp4', None

# ml inference
def get_som_mp4(file, slice_select, reducer=reducer10d, cluster=cluster_som):
    try:
        train_x, train_y  = read_json_files(file)
    except:
        train_x, train_y  = read_json_files(file.name)

    # Convert tensors to numpy arrays if necessary
    if isinstance(train_x, torch.Tensor):
        train_x = train_x.numpy()
    if isinstance(train_y, torch.Tensor):
        train_y = train_y.numpy()

    # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
    data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)

    #compute the 10 dimensional embeding vector
    embedding10d = reducer.transform(data)

    fig = cluster.plot_activation_v2(embedding10d, slice_select)

    return fig

def attach_label_to_json(json_file, label_text):
    # Read the JSON file
    try:
        with open(json_file, "r") as f:
            slices = json.load(f)
    except:
        with open(json_file.name, "r") as f:
            slices = json.load(f)
    
    slices['label'] = label_text

    with open(f'manual_labelled_{os.path.basename(json_file.name)}', "w") as f:
        json.dump(numpy_to_native(slices), f, indent=2)
    
    return f'manual_labelled_{os.path.basename(json_file.name)}'


with gr.Blocks(title='Cabasus') as cabasus_sensor:
    title = gr.Markdown("<h2><center>Data gathering and processing</center></h2>")
    with gr.Tab("Convert"):
        with gr.Row():
            csv_file_box = gr.File(label='Upload CSV File') 
            with gr.Column():
                processed_file_box = gr.File(label='Processed CSV File') 
                json_file_box = gr.File(label='Generated Json file')

        with gr.Row():
            animation = gr.Video(label='animation')
            activation_video = gr.Video(label='activation channels')

        with gr.Row():
            real_video = gr.Video(label='real video')
            trend_graph = gr.Video(label='trend graph')

        plot_box_leg = gr.Plot(label="Filtered Signal Plot")
        slice_slider = gr.Slider(minimum=1, maximum=300, label='Slice select', step=1)

        som_create = gr.Button('generate som')
        som_figures = gr.Plot(label="som activations")

        with gr.Row():
            slice_size_slider = gr.Slider(minimum=16, maximum=512, step=1, value=64, label="Slice Size", visible=False)
            sample_rate = gr.Slider(minimum=1, maximum=199, step=1, value=20, label="Sample rate", visible=False)     
        with gr.Row():
            window_size_slider = gr.Slider(minimum=0, maximum=100, step=2, value=10, label="Window Size", visible=False)
            repeat_process = gr.Button('Restart process', visible=False)  

        with gr.Row():
            leg_dropdown = gr.Dropdown(choices=['GZ1', 'GZ2', 'GZ3', 'GZ4'], label='select leg', value='GZ1')
            
        with gr.Row():
            get_all_slice = gr.Plot(label="Real Signal Plot")
            plot_box_overlay = gr.Plot(label="Overlay Signal Plot")
        
        with gr.Row():
            plot_slice_leg = gr.Plot(label="Sliced Signal Plot", visible=False)
        
        with gr.Row():
            slice_json_box = gr.File(label='Slice json file')
            with gr.Column():
                label_name = gr.Textbox(label="enter the label name")
                button_label_Add = gr.Button('attach label')
            slice_json_label_box = gr.File(label='Slice json labelled file')

        
        
        slices_per_leg = gr.Textbox(label="Debug information")
        
        # csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], 
        #                     outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box])
        leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], 
                            outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
        repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], 
                             outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box])
        slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], 
                            outputs=[plot_box_leg, plot_slice_leg, get_all_slice, slice_json_box, plot_box_overlay])
        
        som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures])

        #redoing the whole calculation with the file loading
        csv_file_box.change(get_som_mp4_v2, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], 
                         outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice, slice_json_box,
                                  activation_video, animation])

        button_label_Add.click(attach_label_to_json, inputs=[slice_json_box, label_name], outputs=[slice_json_label_box])

cabasus_sensor.queue(concurrency_count=2).launch(debug=True)