moahmedwafy commited on
Commit
2a021c2
·
1 Parent(s): 55e70b5

merge with huggingface

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ .env
2
+ env/
3
+ __pycache__/
Dockerfile ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image from the Docker Hub
2
+ FROM python:3.9-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements.txt file into the container at /app
8
+ COPY requirements.txt requirements.txt
9
+
10
+ # Install the dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Copy the rest of the application code into the container at /app
14
+ COPY . .
15
+
16
+ # Set environment variables for Flask
17
+ ENV FLASK_APP=server.py
18
+ ENV FLASK_RUN_HOST=0.0.0.0
19
+ ENV FLASK_RUN_PORT=7860
20
+
21
+ # Expose port 7860 to the outside world
22
+ EXPOSE 7860
23
+
24
+ # Run the application
25
+ CMD ["flask", "run"]
models/fitness_model.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sklearn.preprocessing import OneHotEncoder
2
+ import random
3
+ import pandas as pd
4
+ import os
5
+ import pickle
6
+
7
+ SERVER_FILE_DIR = os.path.dirname(os.path.abspath(__file__))
8
+ FITNESS_MODEL_PATH = os.path.join(
9
+ SERVER_FILE_DIR, *"../resources/models/fitness_model.pkl".split("/")
10
+ )
11
+
12
+
13
+ class FitnessModel:
14
+ def __init__(self, excercise_path, kmeans_path, plan_classifier_path):
15
+ self.data = pd.read_csv(excercise_path)
16
+ self.kmeans = None
17
+ self.plan_classifier = None
18
+ self.encoder = None
19
+ self.cluster_data = {}
20
+ self.X_train_cols = [
21
+ "level_Advanced",
22
+ "level_Beginner",
23
+ "level_Intermediate",
24
+ "goal_ Get Fitter",
25
+ "goal_ Lose Weight",
26
+ "goal_Gain Muscle",
27
+ "goal_Get Fitter",
28
+ "goal_Increase Endurance",
29
+ "goal_Increase Strength",
30
+ "goal_Sports Performance",
31
+ "gender_Female",
32
+ "gender_Male",
33
+ "gender_Male & Female",
34
+ ]
35
+
36
+ # Load kmeans model
37
+ with open(kmeans_path, "rb") as f:
38
+ self.kmeans = pickle.load(f)
39
+
40
+ # Load plan classifier model
41
+ with open(plan_classifier_path, "rb") as f:
42
+ self.plan_classifier = pickle.load(f)
43
+
44
+ # Iterate over each cluster label
45
+ for cluster_label in range(90):
46
+ # Filter the dataset to get data for the current cluster
47
+ cluster_subset = self.data[self.data["cluster"] == cluster_label]
48
+
49
+ # Add the cluster data to the dictionary
50
+ self.cluster_data[cluster_label] = cluster_subset
51
+
52
+ features = self.data[["Level", "goal", "bodyPart"]]
53
+
54
+ # Perform one-hot encoding for categorical features
55
+ self.encoder = OneHotEncoder(sparse=False)
56
+ encoded_features = self.encoder.fit_transform(features)
57
+
58
+ def choose_plan(self, level, goal, gender):
59
+ global plan_classifier
60
+ # Convert input into a DataFrame
61
+ input_data = pd.DataFrame(
62
+ {"level": [level], "goal": [goal], "gender": [gender]}
63
+ )
64
+
65
+ # One-hot encode the input data
66
+ input_encoded = pd.get_dummies(input_data, columns=["level", "goal", "gender"])
67
+
68
+ # Ensure that input has the same columns as the model was trained on
69
+ # This is necessary in case some categories are missing in the input
70
+ missing_cols = set(self.X_train_cols) - set(input_encoded.columns)
71
+ for col in missing_cols:
72
+ input_encoded[col] = 0
73
+
74
+ # Reorder columns to match the order of columns in X_train
75
+ input_encoded = input_encoded[self.X_train_cols]
76
+
77
+ # Make prediction for the given input using the trained model
78
+ prediction = self.plan_classifier.predict(input_encoded)
79
+
80
+ # Convert each string in the list to a list of strings
81
+ daily_activities_lists = [day.split(", ") for day in prediction[0]]
82
+
83
+ return daily_activities_lists
84
+
85
+ def get_daily_recommendation(self, home_or_gym, level, goal, bodyParts, equipments):
86
+ if goal in ["Lose Weight", "Get Fitter"]:
87
+ goal = "Get Fitter & Lose Weight"
88
+ daily_recommendations = []
89
+
90
+ bodyParts = [bp for bp in bodyParts if "-" not in bp]
91
+ # Repeat elements in bodyParts until it reaches a size of 6
92
+ while len(bodyParts) < 6:
93
+ bodyParts += bodyParts
94
+
95
+ # Limit bodyParts to size 6
96
+ bodyParts = bodyParts[:6]
97
+
98
+ for bodyPart in bodyParts:
99
+ # Predict cluster for the specified combination of goal, level, and body part
100
+ input_data = [[level, goal, bodyPart]]
101
+ predicted_cluster = self.kmeans.predict(self.encoder.transform(input_data))[
102
+ 0
103
+ ]
104
+ print(predicted_cluster)
105
+ # Get data for the predicted cluster
106
+ cluster_subset = self.cluster_data[predicted_cluster]
107
+
108
+ # Filter data based on location (home or gym)
109
+ if home_or_gym == 0:
110
+ cluster_subset = cluster_subset[
111
+ ~cluster_subset["equipment"].isin(equipments)
112
+ ]
113
+
114
+ # Randomly select one exercise from the cluster if any left after equipment filtering
115
+ if not cluster_subset.empty:
116
+ selected_exercise = random.choice(
117
+ cluster_subset.to_dict(orient="records")
118
+ )
119
+ daily_recommendations.append(selected_exercise)
120
+
121
+ # Remove duplicates from the list
122
+ unique_recommendations = []
123
+ seen_names = set()
124
+ for exercise in daily_recommendations:
125
+ if exercise["name"] not in seen_names:
126
+ unique_recommendations.append(exercise)
127
+ seen_names.add(exercise["name"])
128
+
129
+ return unique_recommendations
130
+
131
+ def get_gender_adjustment(self, gender):
132
+ return 1.0 if gender == "Male" else 0.7
133
+
134
+ def get_age_adjustment(self, age):
135
+ if age < 30:
136
+ return 1.0
137
+ elif 30 <= age < 50:
138
+ return 0.5
139
+ else:
140
+ return 0.1
141
+
142
+ def get_level_adjustment(self, level):
143
+ if level == "Beginner":
144
+ return 0.8
145
+ elif level == "Intermediate":
146
+ return 1.0
147
+ elif level == "Advanced":
148
+ return 1.2
149
+
150
+ def get_body_part_adjustment(self, body_part):
151
+ body_parts = {
152
+ "chest": 1,
153
+ "shoulders": 0.8,
154
+ "waist": 0.6,
155
+ "upper legs": 0.7,
156
+ "back": 0.9,
157
+ "lower legs": 0.5,
158
+ "upper arms": 0.8,
159
+ "cardio": 0.7,
160
+ "lower arms": 0.6,
161
+ "neck": 0.5,
162
+ }
163
+ return body_parts.get(body_part, 0)
164
+
165
+ def adjust_workout(self, gender, age, feedback, body_part, level, old_weight):
166
+ gender_adjustment = self.get_gender_adjustment(gender)
167
+ age_adjustment = self.get_age_adjustment(age)
168
+ level_adjustment = self.get_level_adjustment(level)
169
+ body_part_adjustment = self.get_body_part_adjustment(body_part)
170
+
171
+ increasing_factor_of_weight = (
172
+ age_adjustment
173
+ * body_part_adjustment
174
+ * gender_adjustment
175
+ * level_adjustment
176
+ * 0.3
177
+ )
178
+
179
+ if not feedback:
180
+ increasing_factor_of_weight = (1 - increasing_factor_of_weight) * -0.1
181
+
182
+ new_weight = old_weight + increasing_factor_of_weight * old_weight
183
+
184
+ return new_weight
185
+
186
+ def calculate_new_repetition(self, level, goal):
187
+ if goal in ["Lose Weight", "Get Fitter"]:
188
+ if level == "Beginner":
189
+ return 15
190
+ elif level == "Intermediate":
191
+ return 12
192
+ elif level == "Expert":
193
+ return 10
194
+ elif goal == "Gain Muscle":
195
+ if level == "Beginner":
196
+ return 10
197
+ elif level == "Intermediate":
198
+ return 8
199
+ elif level == "Advanced":
200
+ return 6
201
+
202
+ def calculate_new_duration(self, level):
203
+
204
+ if level == "Beginner":
205
+ return 20
206
+ elif level == "Intermediate":
207
+ return 50
208
+ elif level == "Advanced":
209
+ return 80
210
+
211
+ def predict(
212
+ self, home_or_gym, level, goal, gender, age, feedback, old_weight, equipments
213
+ ):
214
+
215
+ plan = self.choose_plan(level, goal, gender)
216
+ print(plan)
217
+
218
+ while len(plan) < 30:
219
+ plan.extend(plan)
220
+ plan = plan[:30]
221
+
222
+ all_recommendations = []
223
+ for day_body_parts in plan:
224
+ daily_exercises = self.get_daily_recommendation(
225
+ home_or_gym, level, goal, day_body_parts, equipments
226
+ )
227
+ daily_recommendations = []
228
+
229
+ for exercise in daily_exercises:
230
+ weights = self.adjust_workout(
231
+ gender, age, feedback, exercise["bodyPart"], level, old_weight
232
+ )
233
+ repetitions = self.calculate_new_repetition(level, goal)
234
+ duration = self.calculate_new_duration(level)
235
+ weights_or_duration = (
236
+ weights if exercise["type"] == "weight" else duration
237
+ )
238
+ exercise_recommendations = {
239
+ "name": exercise["name"],
240
+ "type": exercise["type"],
241
+ "equipment": exercise["equipment"],
242
+ "bodyPart": exercise["bodyPart"],
243
+ "target": exercise["target"],
244
+ "weights_or_duration": weights_or_duration,
245
+ "sets": exercise["sets"],
246
+ "repetitions": repetitions,
247
+ }
248
+ daily_recommendations.append(exercise_recommendations)
249
+ all_recommendations.append(daily_recommendations)
250
+
251
+ return all_recommendations # Trim to ensure exactly 30 elements
252
+
253
+ @classmethod
254
+ def load(cls):
255
+ with open(FITNESS_MODEL_PATH, "rb") as f:
256
+ print(f)
257
+ fitness_model = pickle.load(f)
258
+
259
+ return fitness_model
models/nutrition_model.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import pandas as pd
3
+ import numpy as np
4
+ import pickle
5
+ import os
6
+
7
+ SERVER_FILE_DIR = os.path.dirname(os.path.abspath(__file__))
8
+ NUTRITION_MODEL_PATH = os.path.join(SERVER_FILE_DIR, "../resources/models/nutrition_model.pkl")
9
+ MEALS_JSON_PATH = os.path.join(SERVER_FILE_DIR, "../resources/datasets/meals.json")
10
+
11
+ # Ensure the file exists
12
+ if not os.path.exists(MEALS_JSON_PATH):
13
+ raise FileNotFoundError(f"File {MEALS_JSON_PATH} does not exist")
14
+
15
+ df = pd.read_json(MEALS_JSON_PATH)
16
+
17
+
18
+ class NutritionModel:
19
+ def __init__(self):
20
+ self.load()
21
+
22
+ def generate_plan(self, calories):
23
+ lunch_attr = {
24
+ "Calories": calories * 0.5,
25
+ "FatContent": random.uniform(19, 97),
26
+ "SaturatedFatContent": random.uniform(6, 12),
27
+ "CholesterolContent": random.uniform(77, 299),
28
+ "SodiumContent": random.uniform(565, 2299),
29
+ "CarbohydrateContent": random.uniform(28, 317),
30
+ "FiberContent": random.uniform(2, 38),
31
+ "SugarContent": random.uniform(0, 38),
32
+ "ProteinContent": random.uniform(20, 123)
33
+ }
34
+
35
+ lunch_df = pd.DataFrame(lunch_attr, index=[0])
36
+
37
+ breakfast_attr = {
38
+ "Calories": calories * 0.30,
39
+ "FatContent": random.uniform(8.7, 20),
40
+ "SaturatedFatContent": random.uniform(1.7, 3.7),
41
+ "CholesterolContent": random.uniform(0, 63),
42
+ "SodiumContent": random.uniform(163, 650),
43
+ "CarbohydrateContent": random.uniform(23, 56),
44
+ "FiberContent": random.uniform(2.6, 8),
45
+ "SugarContent": random.uniform(3.5, 13),
46
+ "ProteinContent": random.uniform(6, 25)
47
+ }
48
+
49
+ breakfast_df = pd.DataFrame(breakfast_attr, index=[0])
50
+
51
+ dinner_attr = {
52
+ "Calories": calories * 0.30,
53
+ "FatContent": random.uniform(8.7, 20),
54
+ "SaturatedFatContent": random.uniform(1.7, 3.7),
55
+ "CholesterolContent": random.uniform(0, 63),
56
+ "SodiumContent": random.uniform(163, 650),
57
+ "CarbohydrateContent": random.uniform(23, 56),
58
+ "FiberContent": random.uniform(2.6, 8),
59
+ "SugarContent": random.uniform(3.5, 13),
60
+ "ProteinContent": random.uniform(6, 25)
61
+ }
62
+
63
+ dinner_df = pd.DataFrame(dinner_attr, index=[0])
64
+
65
+ snack_attr = {
66
+ "Calories": random.uniform(90, 190),
67
+ "FatContent": random.uniform(1.7, 10),
68
+ "SaturatedFatContent": random.uniform(0.7, 3),
69
+ "CholesterolContent": random.uniform(2, 16),
70
+ "SodiumContent": random.uniform(47, 200),
71
+ "CarbohydrateContent": random.uniform(10, 31),
72
+ "FiberContent": random.uniform(0.4, 2.5),
73
+ "SugarContent": random.uniform(5.7, 21),
74
+ "ProteinContent": random.uniform(3, 20)
75
+ }
76
+
77
+ snack_df = pd.DataFrame(snack_attr, index=[0])
78
+
79
+ lunch = self.nutrition_model.transform(lunch_df)
80
+ breakfast = self.nutrition_model.transform(breakfast_df)
81
+ dinner = self.nutrition_model.transform(dinner_df)
82
+ snack = self.nutrition_model.transform(snack_df)
83
+
84
+ meals = np.concatenate((breakfast, lunch, dinner, snack), axis=0)
85
+ meals = np.transpose(meals)
86
+
87
+ days = []
88
+ for i in range(7):
89
+ day_meals = df.iloc[meals[i]].to_dict(orient="records")
90
+ days.append(day_meals)
91
+
92
+ return days
93
+
94
+ def load(self):
95
+ with open(NUTRITION_MODEL_PATH, "rb") as f:
96
+ self.nutrition_model = pickle.load(f)
97
+
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Flask>=3.0.0,<4.0.0
2
+ numpy>=1.20.0,<2.0.0
3
+ anakin-language-server>=1.0.0,<2.0.0
4
+ python-dotenv>=1.0.0,<2.0.0
5
+ scikit-learn>=1.2.0,<1.3.0
6
+ black>=24.0.0,<25.0.0
7
+ pandas>=2.2.0,<2.3.0
8
+ python-dotenv>=1.0.0,<2.0.0
resources/datasets/meals.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0149719d8294fc792dfe053133b3c9edce076b5d69978b26185e96cfec9a5140
3
+ size 4038577
resources/models/fitness_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665d34c71c506fa1cdbd8d74b54f6ca84f1b9f5a397a6bb90d608cc699f2a61d
3
+ size 95457799
resources/models/nutrition_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cd0dc84cc9dcc0c985725e8d5cda8d9f9b0571c6c3219bdef2309f177b46ae1
3
+ size 258480
server.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+ from dotenv import load_dotenv
3
+ from models.fitness_model import FitnessModel
4
+ from models.nutrition_model import NutritionModel
5
+
6
+ load_dotenv()
7
+
8
+
9
+ fitness_model = FitnessModel.load()
10
+ nutrition_model = NutritionModel()
11
+ nutrition_model.load()
12
+ app = Flask("model-server")
13
+
14
+
15
+ @app.get("/")
16
+ def health():
17
+ return "I'm alive!!"
18
+
19
+
20
+ @app.post("/fitness")
21
+ def fitness_predict():
22
+ paramNames = [
23
+ "home_or_gym",
24
+ "level",
25
+ "goal",
26
+ "gender",
27
+ "age",
28
+ "feedback",
29
+ "old_weight",
30
+ "equipments",
31
+ ]
32
+
33
+ params = {}
34
+ for paramName in paramNames:
35
+ value = request.json.get(paramName)
36
+ if value is None:
37
+ return jsonify({"error": f"{paramName} is missing"}), 400
38
+ params[paramName] = value
39
+
40
+ return jsonify({"result": fitness_model.predict(**params)})
41
+
42
+
43
+ @app.post("/nutrition")
44
+ def nutrition_predict():
45
+ paramNames = ["calories"]
46
+
47
+ params = {}
48
+ for paramName in paramNames:
49
+ value = request.json.get(paramName)
50
+ if value is None:
51
+ return jsonify({"error": f"{paramName} is missing"}), 400
52
+ params[paramName] = value
53
+ return jsonify({"result": nutrition_model.generate_plan(**params)})
54
+
55
+
56
+ if __name__ == "__main__":
57
+ app.run()