Spaces:
Sleeping
Sleeping
mriusero
commited on
Commit
·
3273528
1
Parent(s):
e55813a
feat: perf
Browse files- src/production/flow.py +10 -3
- src/production/metrics/machine.py +24 -35
- src/production/metrics/tools.py +20 -28
- src/ui/dashboard.py +0 -1
src/production/flow.py
CHANGED
@@ -20,7 +20,6 @@ async def generate_data(state):
|
|
20 |
4: 0.07
|
21 |
}
|
22 |
|
23 |
-
# Initialize raw_df if it doesn't exist
|
24 |
if 'raw_df' not in state['data']:
|
25 |
state['data']['raw_df'] = pd.DataFrame(columns=[
|
26 |
"Part ID", "Timestamp", "Position", "Orientation", "Tool ID",
|
@@ -79,9 +78,17 @@ async def generate_data(state):
|
|
79 |
"Downtime End": "N/A"
|
80 |
}])
|
81 |
|
82 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
-
print(f"
|
85 |
part_id += 1
|
86 |
await asyncio.sleep(0.2)
|
87 |
|
|
|
20 |
4: 0.07
|
21 |
}
|
22 |
|
|
|
23 |
if 'raw_df' not in state['data']:
|
24 |
state['data']['raw_df'] = pd.DataFrame(columns=[
|
25 |
"Part ID", "Timestamp", "Position", "Orientation", "Tool ID",
|
|
|
78 |
"Downtime End": "N/A"
|
79 |
}])
|
80 |
|
81 |
+
if (
|
82 |
+
(not new_row.empty and not new_row.isna().all().all())
|
83 |
+
and \
|
84 |
+
(not state['data']['raw_df'].empty and not state['data']['raw_df'].isna().all().all())
|
85 |
+
):
|
86 |
+
state['data']['raw_df'] = pd.concat([state['data']['raw_df'], new_row], ignore_index=True)
|
87 |
+
|
88 |
+
elif not new_row.empty and not new_row.isna().all().all():
|
89 |
+
state['data']['raw_df'] = new_row.copy()
|
90 |
|
91 |
+
print(f"- part {part_id} data generated")
|
92 |
part_id += 1
|
93 |
await asyncio.sleep(0.2)
|
94 |
|
src/production/metrics/machine.py
CHANGED
@@ -1,51 +1,40 @@
|
|
1 |
import pandas as pd
|
2 |
|
3 |
async def machine_metrics(raw_data):
|
4 |
-
"""
|
5 |
-
Calculate machine efficiency metrics from raw production data.
|
6 |
-
:param raw_data: collection of raw production data containing timestamps, downtime, and compliance information.
|
7 |
-
:return: a dictionary with calculated metrics including opening time, required time, unplanned stop time, operating time, net time, useful time, quality rate, operating rate, availability rate, TRS (Total Resource Score), MTBF (Mean Time Between Failures), and MTTR (Mean Time To Repair).
|
8 |
-
"""
|
9 |
df = pd.DataFrame(raw_data)
|
10 |
-
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
|
11 |
-
df['Downtime Start'] = pd.to_datetime(df['Downtime Start'], format="%Y-%m-%d %H:%M:%S", errors='coerce')
|
12 |
-
df['Downtime End'] = pd.to_datetime(df['Downtime End'], format="%Y-%m-%d %H:%M:%S", errors='coerce')
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
operating_time = required_time - unplanned_stop_time # Operating time
|
21 |
|
22 |
-
|
23 |
-
|
|
|
24 |
|
25 |
-
|
26 |
-
useful_time = net_time - pd.Timedelta(seconds=nok_time) # Useful time
|
27 |
|
28 |
-
|
29 |
-
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
availability_rate = (operating_time / required_time) * 100 # Availability rate
|
34 |
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
-
|
39 |
-
if len(downtime_df) > 0:
|
40 |
-
mtbf = operating_time / len(downtime_df)
|
41 |
-
else:
|
42 |
-
mtbf = pd.Timedelta(0)
|
43 |
|
44 |
-
|
45 |
-
if
|
46 |
-
|
47 |
-
else:
|
48 |
-
mttr = pd.Timedelta(0)
|
49 |
|
50 |
return {
|
51 |
"opening_time": str(opening_time),
|
|
|
1 |
import pandas as pd
|
2 |
|
3 |
async def machine_metrics(raw_data):
|
|
|
|
|
|
|
|
|
|
|
4 |
df = pd.DataFrame(raw_data)
|
|
|
|
|
|
|
5 |
|
6 |
+
datetime_cols = ['Timestamp', 'Downtime Start', 'Downtime End']
|
7 |
+
for col in datetime_cols:
|
8 |
+
df[col] = pd.to_datetime(df[col], errors='coerce', format="%Y-%m-%d %H:%M:%S")
|
9 |
|
10 |
+
opening_time = df['Timestamp'].max() - df['Timestamp'].min()
|
11 |
+
required_time = opening_time # planned_stop_time = 0 non implémenté
|
|
|
12 |
|
13 |
+
downtime_df = df.dropna(subset=['Downtime Start', 'Downtime End'])
|
14 |
+
unplanned_stop_time = (downtime_df['Downtime End'] - downtime_df['Downtime Start']).sum()
|
15 |
+
operating_time = required_time - unplanned_stop_time
|
16 |
|
17 |
+
net_time = operating_time # cadency_variance = 0 non implémenté
|
|
|
18 |
|
19 |
+
nok_count = (df['Compliance'] != 'OK').sum()
|
20 |
+
useful_time = net_time - pd.Timedelta(seconds=nok_count)
|
21 |
|
22 |
+
total_parts = len(df)
|
23 |
+
compliant_parts = (df['Compliance'] == 'OK').sum()
|
|
|
24 |
|
25 |
+
operating_sec = operating_time.total_seconds()
|
26 |
+
net_sec = net_time.total_seconds()
|
27 |
+
required_sec = required_time.total_seconds()
|
28 |
+
|
29 |
+
quality_rate = (compliant_parts / total_parts) * 100 if total_parts > 0 else 0
|
30 |
+
operating_rate = (net_sec / operating_sec) * 100 if operating_sec > 0 else 0
|
31 |
+
availability_rate = (operating_sec / required_sec) * 100 if required_sec > 0 else 0
|
32 |
|
33 |
+
TRS = (quality_rate / 100) * (operating_rate / 100) * (availability_rate / 100) * 100
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
downtime_count = len(downtime_df)
|
36 |
+
mtbf = operating_time / downtime_count if downtime_count > 0 else pd.Timedelta(0)
|
37 |
+
mttr = unplanned_stop_time / downtime_count if downtime_count > 0 else pd.Timedelta(0)
|
|
|
|
|
38 |
|
39 |
return {
|
40 |
"opening_time": str(opening_time),
|
src/production/metrics/tools.py
CHANGED
@@ -1,15 +1,9 @@
|
|
1 |
import numpy as np
|
|
|
2 |
import asyncio
|
|
|
3 |
|
4 |
def stats_metrics(data, column, usl, lsl):
|
5 |
-
"""
|
6 |
-
Calculate rolling mean, standard deviation, Cp, and Cpk for a given column.
|
7 |
-
Args:
|
8 |
-
data (pd.DataFrame): DataFrame containing the production data.
|
9 |
-
column (str): The column for which to calculate metrics.
|
10 |
-
usl (float): Upper specification limit.
|
11 |
-
lsl (float): Lower specification limit.
|
12 |
-
"""
|
13 |
rolling_mean = data[column].expanding().mean()
|
14 |
rolling_std = data[column].expanding().std()
|
15 |
cp = (usl - lsl) / (6 * rolling_std)
|
@@ -20,37 +14,35 @@ def stats_metrics(data, column, usl, lsl):
|
|
20 |
cpk[rolling_std == 0] = 0
|
21 |
return rolling_mean, rolling_std, cp, cpk
|
22 |
|
23 |
-
|
24 |
-
async def process_unique_tool(tool, raw_data):
|
25 |
-
"""
|
26 |
-
Process data for a single tool and save the results to a CSV file.
|
27 |
-
Args:
|
28 |
-
tool (str): Tool ID to process.
|
29 |
-
raw_data (pd.DataFrame): DataFrame containing the raw production data.
|
30 |
-
"""
|
31 |
-
tool_data = raw_data[raw_data['Tool ID'] == tool].copy()
|
32 |
-
tool_data = tool_data[tool_data['Tool ID'] != 'N/A']
|
33 |
tool_data['pos_rolling_mean'], tool_data['pos_rolling_std'], tool_data['pos_rolling_cp'], tool_data['pos_rolling_cpk'] = stats_metrics(tool_data, 'Position', 0.5, 0.3)
|
34 |
tool_data['ori_rolling_mean'], tool_data['ori_rolling_std'], tool_data['ori_rolling_cp'], tool_data['ori_rolling_cpk'] = stats_metrics(tool_data, 'Orientation', 0.6, 0.2)
|
35 |
return tool, tool_data
|
36 |
|
37 |
-
|
38 |
async def tools_metrics(raw_data):
|
39 |
-
|
40 |
-
|
41 |
-
|
|
|
42 |
metrics = {}
|
43 |
-
tools = raw_data['Tool ID'].unique()
|
44 |
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
for tool, tool_data in results:
|
49 |
metrics[f"tool_{tool}"] = tool_data
|
50 |
|
51 |
-
|
52 |
-
all_tools_data = raw_data.copy()
|
53 |
-
all_tools_data = all_tools_data[all_tools_data['Tool ID'] != 'N/A']
|
54 |
all_tools_data['pos_rolling_mean'], all_tools_data['pos_rolling_std'], all_tools_data['pos_rolling_cp'], all_tools_data['pos_rolling_cpk'] = stats_metrics(all_tools_data, 'Position', 0.5, 0.3)
|
55 |
all_tools_data['ori_rolling_mean'], all_tools_data['ori_rolling_std'], all_tools_data['ori_rolling_cp'], all_tools_data['ori_rolling_cpk'] = stats_metrics(all_tools_data, 'Orientation', 0.6, 0.2)
|
56 |
metrics['all'] = all_tools_data
|
|
|
1 |
import numpy as np
|
2 |
+
import pandas as pd
|
3 |
import asyncio
|
4 |
+
from concurrent.futures import ThreadPoolExecutor
|
5 |
|
6 |
def stats_metrics(data, column, usl, lsl):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
rolling_mean = data[column].expanding().mean()
|
8 |
rolling_std = data[column].expanding().std()
|
9 |
cp = (usl - lsl) / (6 * rolling_std)
|
|
|
14 |
cpk[rolling_std == 0] = 0
|
15 |
return rolling_mean, rolling_std, cp, cpk
|
16 |
|
17 |
+
def process_unique_tool(tool, tool_data):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
tool_data['pos_rolling_mean'], tool_data['pos_rolling_std'], tool_data['pos_rolling_cp'], tool_data['pos_rolling_cpk'] = stats_metrics(tool_data, 'Position', 0.5, 0.3)
|
19 |
tool_data['ori_rolling_mean'], tool_data['ori_rolling_std'], tool_data['ori_rolling_cp'], tool_data['ori_rolling_cpk'] = stats_metrics(tool_data, 'Orientation', 0.6, 0.2)
|
20 |
return tool, tool_data
|
21 |
|
|
|
22 |
async def tools_metrics(raw_data):
|
23 |
+
filtered_data = raw_data[raw_data['Tool ID'] != 'N/A']
|
24 |
+
tools = filtered_data['Tool ID'].unique()
|
25 |
+
|
26 |
+
loop = asyncio.get_running_loop()
|
27 |
metrics = {}
|
|
|
28 |
|
29 |
+
with ThreadPoolExecutor() as executor:
|
30 |
+
tasks = [
|
31 |
+
loop.run_in_executor(
|
32 |
+
executor,
|
33 |
+
process_unique_tool,
|
34 |
+
tool,
|
35 |
+
filtered_data[filtered_data['Tool ID'] == tool].copy()
|
36 |
+
)
|
37 |
+
for tool in tools
|
38 |
+
]
|
39 |
+
|
40 |
+
results = await asyncio.gather(*tasks)
|
41 |
|
42 |
for tool, tool_data in results:
|
43 |
metrics[f"tool_{tool}"] = tool_data
|
44 |
|
45 |
+
all_tools_data = filtered_data.copy()
|
|
|
|
|
46 |
all_tools_data['pos_rolling_mean'], all_tools_data['pos_rolling_std'], all_tools_data['pos_rolling_cp'], all_tools_data['pos_rolling_cpk'] = stats_metrics(all_tools_data, 'Position', 0.5, 0.3)
|
47 |
all_tools_data['ori_rolling_mean'], all_tools_data['ori_rolling_std'], all_tools_data['ori_rolling_cp'], all_tools_data['ori_rolling_cpk'] = stats_metrics(all_tools_data, 'Orientation', 0.6, 0.2)
|
48 |
metrics['all'] = all_tools_data
|
src/ui/dashboard.py
CHANGED
@@ -20,7 +20,6 @@ async def dataflow(state):
|
|
20 |
|
21 |
if state['running']:
|
22 |
if 'gen_task' not in state or state['gen_task'] is None or state['gen_task'].done():
|
23 |
-
print("Launching generate_data in background")
|
24 |
state['gen_task'] = asyncio.create_task(generate_data(state))
|
25 |
|
26 |
raw_data = state['data'].get('raw_df', pd.DataFrame())
|
|
|
20 |
|
21 |
if state['running']:
|
22 |
if 'gen_task' not in state or state['gen_task'] is None or state['gen_task'].done():
|
|
|
23 |
state['gen_task'] = asyncio.create_task(generate_data(state))
|
24 |
|
25 |
raw_data = state['data'].get('raw_df', pd.DataFrame())
|