File size: 4,142 Bytes
247dc37
a5bd089
 
247dc37
 
 
a5bd089
 
247dc37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a5bd089
 
 
 
 
247dc37
 
 
 
46fcc2f
a5bd089
247dc37
 
63e4f49
247dc37
 
a5bd089
 
715f3b0
a5bd089
 
 
 
46fcc2f
 
29afbe3
a5bd089
247dc37
46fcc2f
247dc37
46fcc2f
 
03507e5
247dc37
 
 
 
 
 
 
 
 
 
a5bd089
03507e5
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import torch
import gradio as gr

from phate import PHATEAE
from funcs.som import ClusterSOM

from funcs.processor import process_data
from funcs.plot_func import plot_sensor_data_from_json
from funcs.dataloader import BaseDataset2, read_json_files

DEVICE = torch.device("cpu")
reducer10d = PHATEAE(epochs=30, n_components=10, lr=.0001, batch_size=128, t='auto', knn=8, relax=True, metric='euclidean')
reducer10d.load('models/r10d_2.pth')

cluster_som = ClusterSOM()
cluster_som.load("models/cluster_som2.pkl")

# ml inference
def get_som_mp4(file, slice_select=1, reducer=reducer10d, cluster=cluster_som):

    try:
        train_x, train_y  = read_json_files(file)
    except:
        train_x, train_y  = read_json_files(file.name)

    # Convert tensors to numpy arrays if necessary
    if isinstance(train_x, torch.Tensor):
        train_x = train_x.numpy()
    if isinstance(train_y, torch.Tensor):
        train_y = train_y.numpy()

    # load the time series slices of the data 4*3*2*64 (feeds+axis*sensor*samples) + 5 for time diff
    data = BaseDataset2(train_x.reshape(len(train_x), -1) / 32768, train_y)

    #compute the 10 dimensional embeding vector
    embedding10d = reducer.transform(data)

    # prediction = cluster_som.predict(embedding10d)
    fig = cluster.plot_activation_v2(embedding10d, slice_select)

    return fig

with gr.Blocks(title='Cabasus') as cabasus_sensor:
    title = gr.Markdown("<h2><center>Data gathering and processing</center></h2>")
    with gr.Tab("Convert"):
        with gr.Row():
            csv_file_box = gr.File(label='Upload CSV File') 
            with gr.Column():
                processed_file_box = gr.File(label='Processed CSV File') 
                json_file_box = gr.File(label='Generated Json file')

        with gr.Row():
            slice_size_slider = gr.Slider(minimum=16, maximum=512, step=1, value=64, label="Slice Size", visible=False)
            sample_rate = gr.Slider(minimum=1, maximum=199, step=1, value=20, label="Sample rate", visible=False)     
        with gr.Row():
            window_size_slider = gr.Slider(minimum=0, maximum=100, step=2, value=10, label="Window Size", visible=False)
            repeat_process = gr.Button('Restart process', visible=False)  
        with gr.Row():
            leg_dropdown = gr.Dropdown(choices=['GZ1', 'GZ2', 'GZ3', 'GZ4'], label='select leg', value='GZ1')
            slice_slider = gr.Slider(minimum=1, maximum=300, label='Current slice', step=1)
        with gr.Row():
            plot_box_leg = gr.Plot(label="Filtered Signal Plot")
            plot_box_overlay = gr.Plot(label="Overlay Signal Plot")
        
        with gr.Row():
            plot_slice_leg = gr.Plot(label="Sliced Signal Plot")
            get_all_slice = gr.Plot(label="Real Signal Plot")
        
        som_create = gr.Button('generate som')
        with gr.Row():
            som_figures = gr.Plot(label="som activations")
        
        slices_per_leg = gr.Textbox(label="Number of slices found per LEG")
        
        csv_file_box.change(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], 
                            outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
        leg_dropdown.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], 
                            outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
        repeat_process.click(process_data, inputs=[csv_file_box, slice_size_slider, sample_rate, window_size_slider], 
                             outputs=[processed_file_box, json_file_box, slices_per_leg, plot_box_leg, plot_box_overlay, slice_slider, plot_slice_leg, get_all_slice])
        slice_slider.change(plot_sensor_data_from_json, inputs=[json_file_box, leg_dropdown, slice_slider], 
                            outputs=[plot_box_leg, plot_slice_leg, get_all_slice])
        
        som_create.click(get_som_mp4, inputs=[json_file_box, slice_slider], outputs=[som_figures])

cabasus_sensor.queue(concurrency_count=2).launch(debug=True)