isurulkh commited on
Commit
c234a0e
·
verified ·
1 Parent(s): b7614b2

Upload 11 files

Browse files
Crop_Dataset.csv ADDED
The diff for this file is too large to render. See raw diff
 
Task_1_intellihack.ipynb ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "source": [
20
+ "!pip install -U scikit-learn"
21
+ ],
22
+ "metadata": {
23
+ "colab": {
24
+ "base_uri": "https://localhost:8080/"
25
+ },
26
+ "id": "yBUpTF0liOBf",
27
+ "outputId": "e71b2db0-5438-400e-891c-53ee35c10e4f"
28
+ },
29
+ "execution_count": 3,
30
+ "outputs": [
31
+ {
32
+ "output_type": "stream",
33
+ "name": "stdout",
34
+ "text": [
35
+ "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.10/dist-packages (1.4.2)\n",
36
+ "Requirement already satisfied: numpy>=1.19.5 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.25.2)\n",
37
+ "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.11.4)\n",
38
+ "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (1.4.0)\n",
39
+ "Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from scikit-learn) (3.5.0)\n"
40
+ ]
41
+ }
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 4,
47
+ "metadata": {
48
+ "colab": {
49
+ "base_uri": "https://localhost:8080/"
50
+ },
51
+ "id": "QhegGo_LT4a_",
52
+ "outputId": "59e8d839-82e5-41ab-ca92-35721dcd69a0"
53
+ },
54
+ "outputs": [
55
+ {
56
+ "output_type": "stream",
57
+ "name": "stdout",
58
+ "text": [
59
+ " N P K temperature humidity ph rainfall Total_Nutrients \\\n",
60
+ "0 90 42 43 20.879744 82.002744 6.502985 202.935536 175 \n",
61
+ "1 85 58 41 21.770462 80.319644 7.038096 226.655537 184 \n",
62
+ "2 60 55 44 23.004459 82.320763 7.840207 263.964248 159 \n",
63
+ "3 74 35 40 26.491096 80.158363 6.980401 242.864034 149 \n",
64
+ "4 78 42 42 20.130175 81.604873 7.628473 262.717340 162 \n",
65
+ "\n",
66
+ " Temperature_Humidity Log_Rainfall Label Label_Encoded \n",
67
+ "0 1712.196283 5.317804 wheat 0 \n",
68
+ "1 1748.595734 5.427834 wheat 0 \n",
69
+ "2 1893.744627 5.579595 wheat 0 \n",
70
+ "3 2123.482908 5.496611 wheat 0 \n",
71
+ "4 1642.720357 5.574878 wheat 0 \n",
72
+ "<class 'pandas.core.frame.DataFrame'>\n",
73
+ "RangeIndex: 2200 entries, 0 to 2199\n",
74
+ "Data columns (total 12 columns):\n",
75
+ " # Column Non-Null Count Dtype \n",
76
+ "--- ------ -------------- ----- \n",
77
+ " 0 N 2200 non-null int64 \n",
78
+ " 1 P 2200 non-null int64 \n",
79
+ " 2 K 2200 non-null int64 \n",
80
+ " 3 temperature 2200 non-null float64\n",
81
+ " 4 humidity 2200 non-null float64\n",
82
+ " 5 ph 2200 non-null float64\n",
83
+ " 6 rainfall 2200 non-null float64\n",
84
+ " 7 Total_Nutrients 2200 non-null int64 \n",
85
+ " 8 Temperature_Humidity 2200 non-null float64\n",
86
+ " 9 Log_Rainfall 2200 non-null float64\n",
87
+ " 10 Label 2200 non-null object \n",
88
+ " 11 Label_Encoded 2200 non-null int64 \n",
89
+ "dtypes: float64(6), int64(5), object(1)\n",
90
+ "memory usage: 206.4+ KB\n",
91
+ "None\n"
92
+ ]
93
+ }
94
+ ],
95
+ "source": [
96
+ "import pandas as pd\n",
97
+ "\n",
98
+ "# Load the dataset\n",
99
+ "data = pd.read_csv('/content/Crop_Dataset.csv')\n",
100
+ "\n",
101
+ "# Display the first few rows and the data info\n",
102
+ "print(data.head())\n",
103
+ "print(data.info())\n"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "source": [
109
+ "from sklearn.preprocessing import LabelEncoder, StandardScaler\n",
110
+ "\n",
111
+ "# Assuming 'Label' is the column with categorical data\n",
112
+ "if data['Label'].dtype == 'object':\n",
113
+ " encoder = LabelEncoder()\n",
114
+ " data['Label_Encoded'] = encoder.fit_transform(data['Label'])\n",
115
+ " y = data['Label_Encoded']\n",
116
+ "else:\n",
117
+ " y = data['Label']\n",
118
+ "\n",
119
+ "# Exclude the label column from numeric operations\n",
120
+ "numeric_features = data.select_dtypes(include=['int64', 'float64'])\n",
121
+ "X = numeric_features.drop(['Label_Encoded'], axis=1, errors='ignore')\n",
122
+ "\n",
123
+ "# Scaling numeric features\n",
124
+ "scaler = StandardScaler()\n",
125
+ "X_scaled = scaler.fit_transform(X)"
126
+ ],
127
+ "metadata": {
128
+ "id": "8YDm7cLGVAdC"
129
+ },
130
+ "execution_count": 5,
131
+ "outputs": []
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "source": [
136
+ "print(X.head())"
137
+ ],
138
+ "metadata": {
139
+ "colab": {
140
+ "base_uri": "https://localhost:8080/"
141
+ },
142
+ "id": "bxlFqxemUwVN",
143
+ "outputId": "8b0006fe-4fe9-4b98-8d8f-f66bdb4c9b0e"
144
+ },
145
+ "execution_count": 6,
146
+ "outputs": [
147
+ {
148
+ "output_type": "stream",
149
+ "name": "stdout",
150
+ "text": [
151
+ " N P K temperature humidity ph rainfall Total_Nutrients \\\n",
152
+ "0 90 42 43 20.879744 82.002744 6.502985 202.935536 175 \n",
153
+ "1 85 58 41 21.770462 80.319644 7.038096 226.655537 184 \n",
154
+ "2 60 55 44 23.004459 82.320763 7.840207 263.964248 159 \n",
155
+ "3 74 35 40 26.491096 80.158363 6.980401 242.864034 149 \n",
156
+ "4 78 42 42 20.130175 81.604873 7.628473 262.717340 162 \n",
157
+ "\n",
158
+ " Temperature_Humidity Log_Rainfall \n",
159
+ "0 1712.196283 5.317804 \n",
160
+ "1 1748.595734 5.427834 \n",
161
+ "2 1893.744627 5.579595 \n",
162
+ "3 2123.482908 5.496611 \n",
163
+ "4 1642.720357 5.574878 \n"
164
+ ]
165
+ }
166
+ ]
167
+ },
168
+ {
169
+ "cell_type": "code",
170
+ "source": [
171
+ "print(y.head())\n"
172
+ ],
173
+ "metadata": {
174
+ "colab": {
175
+ "base_uri": "https://localhost:8080/"
176
+ },
177
+ "id": "Xfyef1ZHVlv9",
178
+ "outputId": "1124a98a-7088-4beb-c99f-fc99695bce26"
179
+ },
180
+ "execution_count": 7,
181
+ "outputs": [
182
+ {
183
+ "output_type": "stream",
184
+ "name": "stdout",
185
+ "text": [
186
+ "0 21\n",
187
+ "1 21\n",
188
+ "2 21\n",
189
+ "3 21\n",
190
+ "4 21\n",
191
+ "Name: Label_Encoded, dtype: int64\n"
192
+ ]
193
+ }
194
+ ]
195
+ },
196
+ {
197
+ "cell_type": "code",
198
+ "source": [
199
+ "from sklearn.model_selection import train_test_split\n",
200
+ "\n",
201
+ "# Split the dataset into training and testing sets\n",
202
+ "X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)\n",
203
+ "X_train, X_test, y_train, y_test\n"
204
+ ],
205
+ "metadata": {
206
+ "colab": {
207
+ "base_uri": "https://localhost:8080/"
208
+ },
209
+ "id": "qeet3FQuWMYa",
210
+ "outputId": "3134ee1c-da4a-49c9-b23a-a2824087bce7"
211
+ },
212
+ "execution_count": 8,
213
+ "outputs": [
214
+ {
215
+ "output_type": "execute_result",
216
+ "data": {
217
+ "text/plain": [
218
+ "(array([[-0.90904306, -1.13294593, -0.67439784, ..., -1.31493084,\n",
219
+ " -0.49027085, 0.24780902],\n",
220
+ " [-0.36716896, 0.77739624, -0.57565467, ..., -0.21356106,\n",
221
+ " 0.07991257, -0.46657409],\n",
222
+ " [-1.17998011, 0.59545889, -0.45716288, ..., -0.58902803,\n",
223
+ " -0.16692839, -1.2389468 ],\n",
224
+ " ...,\n",
225
+ " [-1.07160529, -0.5264881 , -0.33867109, ..., -0.9269483 ,\n",
226
+ " -0.5842483 , 0.199803 ],\n",
227
+ " [-1.07160529, 2.14192637, 3.07784228, ..., 2.33961433,\n",
228
+ " -1.1140468 , -0.41541788],\n",
229
+ " [-0.50263749, 0.74707335, -0.51640878, ..., -0.25110776,\n",
230
+ " -0.51417889, -0.93933906]]),\n",
231
+ " array([[ 1.36682815, -1.10262304, -0.02269297, ..., 0.16190591,\n",
232
+ " 1.34399451, -2.20354942],\n",
233
+ " [ 1.28554704, -1.37552907, 0.05630155, ..., 0.06178138,\n",
234
+ " 0.58762688, -1.07859766],\n",
235
+ " [ 0.22889255, 0.26190709, 0.01680429, ..., 0.22448374,\n",
236
+ " 3.13720326, 0.44554626],\n",
237
+ " ...,\n",
238
+ " [ 1.90870225, -0.19293629, -0.63490057, ..., 0.39970166,\n",
239
+ " 0.02516414, -0.38782438],\n",
240
+ " [ 1.77323373, -0.04132183, -0.57565467, ..., 0.43724835,\n",
241
+ " -0.17876826, -0.5282515 ],\n",
242
+ " [-1.23416752, 0.44384444, -0.55590604, ..., -0.73921482,\n",
243
+ " -1.75019501, 0.99674145]]),\n",
244
+ " 1656 4\n",
245
+ " 752 2\n",
246
+ " 892 12\n",
247
+ " 1041 7\n",
248
+ " 1179 3\n",
249
+ " ..\n",
250
+ " 1638 4\n",
251
+ " 1095 7\n",
252
+ " 1130 3\n",
253
+ " 1294 9\n",
254
+ " 860 12\n",
255
+ " Name: Label_Encoded, Length: 1760, dtype: int64,\n",
256
+ " 1451 16\n",
257
+ " 1334 13\n",
258
+ " 1761 18\n",
259
+ " 1735 18\n",
260
+ " 1576 11\n",
261
+ " ..\n",
262
+ " 59 21\n",
263
+ " 71 21\n",
264
+ " 1908 14\n",
265
+ " 1958 14\n",
266
+ " 482 8\n",
267
+ " Name: Label_Encoded, Length: 440, dtype: int64)"
268
+ ]
269
+ },
270
+ "metadata": {},
271
+ "execution_count": 8
272
+ }
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "source": [
278
+ "from sklearn.tree import DecisionTreeClassifier\n",
279
+ "from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier\n",
280
+ "from sklearn.svm import SVC\n",
281
+ "from sklearn.neighbors import KNeighborsClassifier\n",
282
+ "from sklearn.metrics import accuracy_score\n",
283
+ "import joblib\n",
284
+ "\n",
285
+ "\n",
286
+ "# Define the models\n",
287
+ "models = {\n",
288
+ " 'Decision Tree': DecisionTreeClassifier(random_state=42),\n",
289
+ " 'Random Forest': RandomForestClassifier(random_state=42),\n",
290
+ " 'SVM': SVC(kernel='rbf', random_state=42),\n",
291
+ " 'KNN': KNeighborsClassifier(),\n",
292
+ " 'Gradient Boosting': GradientBoostingClassifier(random_state=42)\n",
293
+ "}\n",
294
+ "\n",
295
+ "# Train each model and evaluate on the training set\n",
296
+ "train_accuracies = {}\n",
297
+ "for name, model in models.items():\n",
298
+ " model.fit(X_train, y_train)\n",
299
+ " y_train_pred = model.predict(X_train)\n",
300
+ " train_accuracy = accuracy_score(y_train, y_train_pred)\n",
301
+ " train_accuracies[name] = train_accuracy\n",
302
+ " print(f\"{name} training accuracy: {train_accuracy:.4f}\")\n",
303
+ "\n",
304
+ " # Save the model\n",
305
+ " model_filename = f'{name.replace(\" \", \"_\").lower()}_model.joblib'\n",
306
+ " joblib.dump(model, model_filename)\n",
307
+ " print(f\"Saved {name} model as {model_filename}\")\n"
308
+ ],
309
+ "metadata": {
310
+ "colab": {
311
+ "base_uri": "https://localhost:8080/"
312
+ },
313
+ "id": "TsmaeAEYbj6Y",
314
+ "outputId": "0b6e493c-9421-4d88-8e97-0591713968e3"
315
+ },
316
+ "execution_count": 9,
317
+ "outputs": [
318
+ {
319
+ "output_type": "stream",
320
+ "name": "stdout",
321
+ "text": [
322
+ "Decision Tree training accuracy: 1.0000\n",
323
+ "Saved Decision Tree model as decision_tree_model.joblib\n",
324
+ "Random Forest training accuracy: 1.0000\n",
325
+ "Saved Random Forest model as random_forest_model.joblib\n",
326
+ "SVM training accuracy: 0.9875\n",
327
+ "Saved SVM model as svm_model.joblib\n",
328
+ "KNN training accuracy: 0.9881\n",
329
+ "Saved KNN model as knn_model.joblib\n",
330
+ "Gradient Boosting training accuracy: 1.0000\n",
331
+ "Saved Gradient Boosting model as gradient_boosting_model.joblib\n"
332
+ ]
333
+ }
334
+ ]
335
+ },
336
+ {
337
+ "cell_type": "code",
338
+ "source": [
339
+ "# Example new data for prediction\n",
340
+ "new_data = [[129,\t43,\t16, 25.5503704,\t77.85055621,\t6.73210948,\t78.58488484,\t188,\t1989.110547,\t4.376824186]] # Adjust these values as necessary\n",
341
+ "new_data_scaled = scaler.transform(new_data) # Assuming 'scaler' is already fitted and saved/loaded similarly\n",
342
+ "\n",
343
+ "# Load models and make predictions\n",
344
+ "predictions = {}\n",
345
+ "for name in models.keys():\n",
346
+ " model_filename = f'{name.replace(\" \", \"_\").lower()}_model.joblib'\n",
347
+ " loaded_model = joblib.load(model_filename)\n",
348
+ " prediction = loaded_model.predict(new_data_scaled)\n",
349
+ " predictions[name] = prediction\n",
350
+ "\n",
351
+ " # Assuming you have loaded your LabelEncoder as 'encoder'\n",
352
+ " decoded_prediction = encoder.inverse_transform(prediction)\n",
353
+ " print(f\"{name} prediction: {decoded_prediction}\")\n"
354
+ ],
355
+ "metadata": {
356
+ "colab": {
357
+ "base_uri": "https://localhost:8080/"
358
+ },
359
+ "id": "448K06w7cT6d",
360
+ "outputId": "9263c1d3-228a-4e45-95c0-87b5d2f7c0b9"
361
+ },
362
+ "execution_count": 10,
363
+ "outputs": [
364
+ {
365
+ "output_type": "stream",
366
+ "name": "stdout",
367
+ "text": [
368
+ "Decision Tree prediction: ['potatoes']\n",
369
+ "Random Forest prediction: ['potatoes']\n",
370
+ "SVM prediction: ['potatoes']\n",
371
+ "KNN prediction: ['potatoes']\n"
372
+ ]
373
+ },
374
+ {
375
+ "output_type": "stream",
376
+ "name": "stderr",
377
+ "text": [
378
+ "/usr/local/lib/python3.10/dist-packages/sklearn/base.py:493: UserWarning: X does not have valid feature names, but StandardScaler was fitted with feature names\n",
379
+ " warnings.warn(\n"
380
+ ]
381
+ },
382
+ {
383
+ "output_type": "stream",
384
+ "name": "stdout",
385
+ "text": [
386
+ "Gradient Boosting prediction: ['potatoes']\n"
387
+ ]
388
+ }
389
+ ]
390
+ },
391
+ {
392
+ "cell_type": "code",
393
+ "source": [
394
+ "# Save the scaler to a file\n",
395
+ "joblib.dump(scaler, 'base_feature_scaler.joblib')"
396
+ ],
397
+ "metadata": {
398
+ "colab": {
399
+ "base_uri": "https://localhost:8080/"
400
+ },
401
+ "id": "ByINvSM1gSHN",
402
+ "outputId": "6bbfe644-762c-4502-a07d-05dbcf26fd4b"
403
+ },
404
+ "execution_count": 11,
405
+ "outputs": [
406
+ {
407
+ "output_type": "execute_result",
408
+ "data": {
409
+ "text/plain": [
410
+ "['base_feature_scaler.joblib']"
411
+ ]
412
+ },
413
+ "metadata": {},
414
+ "execution_count": 11
415
+ }
416
+ ]
417
+ },
418
+ {
419
+ "cell_type": "code",
420
+ "source": [
421
+ "# Save the LabelEncoder to a file\n",
422
+ "joblib.dump(encoder, 'label_encoder.joblib')"
423
+ ],
424
+ "metadata": {
425
+ "colab": {
426
+ "base_uri": "https://localhost:8080/"
427
+ },
428
+ "id": "c9Uu7lsPgSk7",
429
+ "outputId": "d3bade81-25b7-47a6-e9c0-c14d2ea39339"
430
+ },
431
+ "execution_count": 12,
432
+ "outputs": [
433
+ {
434
+ "output_type": "execute_result",
435
+ "data": {
436
+ "text/plain": [
437
+ "['label_encoder.joblib']"
438
+ ]
439
+ },
440
+ "metadata": {},
441
+ "execution_count": 12
442
+ }
443
+ ]
444
+ }
445
+ ]
446
+ }
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import joblib
4
+
5
+ # Load models and scaler
6
+ scaler = joblib.load('models/base_feature_scaler.joblib')
7
+ models = {
8
+ 'Decision Tree': joblib.load('models/decision_tree_model.joblib'),
9
+ 'Random Forest': joblib.load('models/random_forest_model.joblib'),
10
+ 'SVM': joblib.load('models/svm_model.joblib'),
11
+ 'KNN': joblib.load('models/knn_model.joblib'),
12
+ 'Gradient Boosting': joblib.load('models/gradient_boosting_model.joblib')
13
+ }
14
+ encoder = joblib.load('models/label_encoder.joblib')
15
+
16
+ # Streamlit app layout
17
+ st.title('Crop Recommendation System')
18
+ st.markdown("""
19
+ This application provides recommendations for the most suitable crops based on environmental and soil conditions.
20
+ Please adjust the input values in the sidebar to reflect your local conditions. Here's a brief explanation of each input:
21
+
22
+ - **Nitrogen (N), Phosphorus (P), Potassium (K):** Essential nutrients required by crops. Values should be in kg/ha.
23
+ - **Temperature (C):** The average temperature of the area in degrees Celsius.
24
+ - **Humidity (%):** Average relative humidity in percentage.
25
+ - **pH Level:** Soil acidity or alkalinity on a scale from 0 to 14.
26
+ - **Rainfall (mm):** Annual rainfall in millimeters.
27
+ - **Total Nutrients:** Sum of all nutrient inputs.
28
+ - **Temperature Humidity Index:** A combined index of temperature and humidity.
29
+ - **Log Rainfall:** The logarithmic value of rainfall, providing a transformed perspective of rainfall data.
30
+
31
+ Each model will provide a prediction based on these inputs.
32
+ """)
33
+
34
+ # Input fields for all 10 features
35
+ with st.sidebar:
36
+ st.header('Input Features')
37
+ N = st.number_input('Nitrogen (N)', min_value=0, max_value=200, value=50, help="Enter the amount of Nitrogen in the soil")
38
+ P = st.number_input('Phosphorus (P)', min_value=0, max_value=200, value=40, help="Enter the amount of Phosphorus in the soil")
39
+ K = st.number_input('Potassium (K)', min_value=0, max_value=200, value=30, help="Enter the amount of Potassium in the soil")
40
+ with st.expander("Advanced Environmental Settings"):
41
+ temperature = st.slider('Temperature (C)', -10.0, 50.0, 25.0)
42
+ humidity = st.slider('Humidity (%)', 0.0, 100.0, 80.0)
43
+ ph = st.slider('pH Level', 0.0, 14.0, 6.5)
44
+ rainfall = st.slider('Rainfall (mm)', 0.0, 400.0, 100.0)
45
+ total_nutrients = st.number_input('Total Nutrients', min_value=0, max_value=500, value=150)
46
+ temperature_humidity = st.slider('Temperature Humidity Index', 0.0, 5000.0, 1500.0)
47
+ log_rainfall = st.slider('Log Rainfall', 0.0, 10.0, 5.0)
48
+
49
+ # Create feature array and scale
50
+ features = np.array([[N, P, K, temperature, humidity, ph, rainfall, total_nutrients, temperature_humidity, log_rainfall]])
51
+ features_scaled = scaler.transform(features)
52
+
53
+ # Display predictions
54
+ st.header('Predictions')
55
+ for name, model in models.items():
56
+ prediction = model.predict(features_scaled)
57
+ crop = encoder.inverse_transform(prediction)[0] # Decode prediction
58
+ st.write(f'{name} predicts: {crop}')
models/base_feature_scaler.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5462e69f94a707cd3cda8b66a1f693afc64a9be192f7d3feca8f7ee64dfcd643
3
+ size 1303
models/decision_tree_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4166c8b4063e7c545aef5a958868d54bae0aef0f901a1c7c3f71b8ee2da5b88
3
+ size 18505
models/gradient_boosting_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aee64ca5191be5cd39c0021153add91351dc7718756e1a319db3ac4f93546fc7
3
+ size 3947837
models/knn_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51f069da3144eccfd9b2a78312fc1bb1af2d8d8f5bc5db06537efcdfa27fff93
3
+ size 323478
models/label_encoder.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8107a00fca8f49e7858b6301572734f45ffe132064db908c8d9ca1fd25d6de6c
3
+ size 843
models/random_forest_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5c73d908a14e26bb64f71f22b1715945d5735229a25230957ee869017e82f1e
3
+ size 2704185
models/svm_model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b78b197c8d1ea2e83eb937ea9b26c622d9e248fa9f6fb4ee5f2d82d539013ad6
3
+ size 376307
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy
2
+ pandas
3
+ scikit-learn
4
+ streamlit
5
+ joblib