File size: 2,491 Bytes
dab724c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8155b6
 
dab724c
a8155b6
 
 
 
dab724c
a8155b6
 
dab724c
a8155b6
 
dab724c
a8155b6
 
 
 
 
 
dab724c
a8155b6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import pandas as pd
from flask import Flask, request, jsonify

from sklearn.compose import ColumnTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder, StandardScaler
from streamlit import *
import joblib


# Load the CSV data
data = pd.read_csv('dataset.csv')

# Split the data into features and labels
X = data.drop('PlacedOrNot', axis=1)
y = data['PlacedOrNot']

# Encode categorical features
categorical_features = ['HistoryOfBacklogs']
for feature in categorical_features:
    encoder = LabelEncoder()
    X[feature] = encoder.fit_transform(X[feature])

# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Create the pipeline
numerical_features = ['Internships', 'CGPA']
numerical_transformer = StandardScaler()
categorical_features = [ 'HistoryOfBacklogs']
categorical_transformer = SimpleImputer(strategy='most_frequent')
preprocessor = ColumnTransformer(
    transformers=[
        ('num', numerical_transformer, numerical_features),
        ('cat', categorical_transformer, categorical_features)
    ])

pipeline = Pipeline([
    ('preprocessor', preprocessor),
    ('classifier', RandomForestClassifier(random_state=42))
])

# Train the model
pipeline.fit(X_train, y_train)

# Evaluate the model
accuracy = pipeline.score(X_test, y_test)
print('Accuracy:', accuracy)
input_type = 'csv'
output_type = 'label'

# Define the function to make predictions using the trained model
def predict_placement(Internships, CGPA, HistoryOfBacklogs):
    # Create a DataFrame from the input data
    input_df = pd.DataFrame({'Internships': [Internships], 'CGPA': [CGPA], 'HistoryOfBacklogs': [HistoryOfBacklogs]})
    
    # Make a prediction using the trained model
    prediction = pipeline.predict(input_df)[0]
    
    # Return the predicted label
    return 'Placed' if prediction else 'Not Placed'

# Create the Gradio interface
iface = gr.Interface(fn=predict_placement, 
                     inputs=input_type, 
                     outputs=output_type, 
                     title='Student Job Placement Predictor', 
                     description='Predicts whether a student will be placed in a job or not based on internships, CGPA, and history of backlogs.')

# Launch the Gradio interface
iface.launch()