File size: 1,979 Bytes
9248464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import PowerTransformer
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
import argparse
import joblib

def train(dataset_pth):
        df=pd.read_csv(dataset_pth)
        features=["Torque(Nm)","Hydraulic_Pressure(bar)","Cutting(kN)","Coolant_Pressure(bar)","Spindle_Speed(RPM)","Coolant_Temperature","Downtime"]

        df=df[features]
        df.dropna(inplace=True,ignore_index=True)
        X=df.drop("Downtime",axis=1)
        y=df["Downtime"]

        X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=42,stratify=y)

        transform=PowerTransformer()
        X_train=transform.fit_transform(X_train)
        X_test=transform.transform(X_test)

        encoder=LabelEncoder()
        y_train=encoder.fit_transform(y_train)
        y_test=encoder.transform(y_test)

        model=RandomForestClassifier(random_state=42)
        model.fit(X_train,y_train)
        predict=model.predict(X_test)

        cwd=os.getcwd()
        transform_pth=os.path.join(cwd,"app","transform.pkl")
        encoder_pth=os.path.join(cwd,"app","encoder.pkl")
        model_pth=os.path.join(cwd,"app","model.pkl")

        joblib.dump(transform,transform_pth)
        joblib.dump(encoder,encoder_pth)
        joblib.dump(model,model_pth)

        return {"Accuracy":accuracy_score(y_test,predict),
                "F1_Score":f1_score(y_test,predict)}

if __name__=="__main__":
    parser=argparse.ArgumentParser()
    parser.add_argument("--dataset_pth",default="/home/sudhanshu/manufacturing_defect/Manufacturing_Downtime_Dataset.csv")
    args=parser.parse_args()
    results=train(args.dataset_pth)

    print(f"Accuracy: {results['Accuracy']}\n")
    print(f"F1_Score: {results['F1_Score']}")