Spaces:
Runtime error
Runtime error
Upload folder using huggingface_hub
Browse files- .gitattributes +1 -0
- .github/workflows/update_space.yml +28 -0
- README.md +2 -8
- __pycache__/diet.cpython-39.pyc +0 -0
- __pycache__/ollama.cpython-39.pyc +0 -0
- __pycache__/ollama_diet.cpython-39.pyc +0 -0
- custom_model.ipynb +168 -0
- dataset.csv +3 -0
- diet.py +32 -0
- nutritionmodel.py +285 -0
- prompt.txt +1 -0
- response.json +55 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
dataset.csv filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title: Phase-
|
3 |
-
|
4 |
-
colorFrom: red
|
5 |
-
colorTo: green
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.43.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Phase-R_Diet
|
3 |
+
app_file: diet.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.43.0
|
|
|
|
|
6 |
---
|
|
|
|
__pycache__/diet.cpython-39.pyc
ADDED
Binary file (865 Bytes). View file
|
|
__pycache__/ollama.cpython-39.pyc
ADDED
Binary file (438 Bytes). View file
|
|
__pycache__/ollama_diet.cpython-39.pyc
ADDED
Binary file (879 Bytes). View file
|
|
custom_model.ipynb
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 3,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"Collecting unsloth@ git+https://github.com/unslothai/unsloth.git (from unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git)\n",
|
13 |
+
" Cloning https://github.com/unslothai/unsloth.git to /tmp/pip-install-iyfa2a6n/unsloth_fb5a1c9f71c749ed87cb3c95c10067c7\n",
|
14 |
+
" Running command git clone --filter=blob:none --quiet https://github.com/unslothai/unsloth.git /tmp/pip-install-iyfa2a6n/unsloth_fb5a1c9f71c749ed87cb3c95c10067c7\n",
|
15 |
+
" Resolved https://github.com/unslothai/unsloth.git to commit 976d11a10d54383aeb7a692c69e01151a20bfd72\n",
|
16 |
+
" Installing build dependencies ... \u001b[?25ldone\n",
|
17 |
+
"\u001b[?25h Getting requirements to build wheel ... \u001b[?25ldone\n",
|
18 |
+
"\u001b[?25h Preparing metadata (pyproject.toml) ... \u001b[?25ldone\n",
|
19 |
+
"\u001b[?25hRequirement already satisfied: packaging in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (24.1)\n",
|
20 |
+
"Requirement already satisfied: tyro in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.8.10)\n",
|
21 |
+
"Requirement already satisfied: transformers>=4.43.2 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (4.43.3)\n",
|
22 |
+
"Requirement already satisfied: datasets>=2.16.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.20.0)\n",
|
23 |
+
"Requirement already satisfied: sentencepiece>=0.2.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.2.0)\n",
|
24 |
+
"Requirement already satisfied: tqdm in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (4.66.4)\n",
|
25 |
+
"Requirement already satisfied: psutil in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (6.0.0)\n",
|
26 |
+
"Requirement already satisfied: wheel>=0.42.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.43.0)\n",
|
27 |
+
"Requirement already satisfied: numpy in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.23.5)\n",
|
28 |
+
"Requirement already satisfied: protobuf<4.0.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.20.3)\n",
|
29 |
+
"Requirement already satisfied: huggingface-hub in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.24.3)\n",
|
30 |
+
"Requirement already satisfied: hf-transfer in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.1.8)\n",
|
31 |
+
"Requirement already satisfied: filelock in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.15.4)\n",
|
32 |
+
"Requirement already satisfied: pyarrow>=15.0.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (17.0.0)\n",
|
33 |
+
"Requirement already satisfied: pyarrow-hotfix in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.6)\n",
|
34 |
+
"Requirement already satisfied: dill<0.3.9,>=0.3.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.3.8)\n",
|
35 |
+
"Requirement already satisfied: pandas in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.2.2)\n",
|
36 |
+
"Requirement already satisfied: requests>=2.32.2 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.32.3)\n",
|
37 |
+
"Requirement already satisfied: xxhash in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.4.1)\n",
|
38 |
+
"Requirement already satisfied: multiprocess in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.70.16)\n",
|
39 |
+
"Requirement already satisfied: fsspec<=2024.5.0,>=2023.1.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from fsspec[http]<=2024.5.0,>=2023.1.0->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2024.5.0)\n",
|
40 |
+
"Requirement already satisfied: aiohttp in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.9.5)\n",
|
41 |
+
"Requirement already satisfied: pyyaml>=5.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (6.0.1)\n",
|
42 |
+
"Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from huggingface-hub->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (4.12.2)\n",
|
43 |
+
"Requirement already satisfied: regex!=2019.12.17 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from transformers>=4.43.2->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2024.7.24)\n",
|
44 |
+
"Requirement already satisfied: safetensors>=0.4.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from transformers>=4.43.2->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.4.3)\n",
|
45 |
+
"Requirement already satisfied: tokenizers<0.20,>=0.19 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from transformers>=4.43.2->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.19.1)\n",
|
46 |
+
"Requirement already satisfied: docstring-parser>=0.16 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.16)\n",
|
47 |
+
"Requirement already satisfied: rich>=11.1.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (13.7.1)\n",
|
48 |
+
"Requirement already satisfied: shtab>=1.5.6 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.7.1)\n",
|
49 |
+
"Requirement already satisfied: eval-type-backport>=0.1.3 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.2.0)\n",
|
50 |
+
"Requirement already satisfied: aiosignal>=1.1.2 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.2.0)\n",
|
51 |
+
"Requirement already satisfied: attrs>=17.3.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (23.1.0)\n",
|
52 |
+
"Requirement already satisfied: frozenlist>=1.1.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.4.0)\n",
|
53 |
+
"Requirement already satisfied: multidict<7.0,>=4.5 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (6.0.4)\n",
|
54 |
+
"Requirement already satisfied: yarl<2.0,>=1.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.9.3)\n",
|
55 |
+
"Requirement already satisfied: async-timeout<5.0,>=4.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from aiohttp->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (4.0.3)\n",
|
56 |
+
"Requirement already satisfied: charset-normalizer<4,>=2 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from requests>=2.32.2->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.3.2)\n",
|
57 |
+
"Requirement already satisfied: idna<4,>=2.5 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from requests>=2.32.2->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.7)\n",
|
58 |
+
"Requirement already satisfied: urllib3<3,>=1.21.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from requests>=2.32.2->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.2.2)\n",
|
59 |
+
"Requirement already satisfied: certifi>=2017.4.17 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from requests>=2.32.2->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2024.7.4)\n",
|
60 |
+
"Requirement already satisfied: markdown-it-py>=2.2.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from rich>=11.1.0->tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (3.0.0)\n",
|
61 |
+
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from rich>=11.1.0->tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.18.0)\n",
|
62 |
+
"Requirement already satisfied: python-dateutil>=2.8.2 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from pandas->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2.9.0)\n",
|
63 |
+
"Requirement already satisfied: pytz>=2020.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from pandas->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2024.1)\n",
|
64 |
+
"Requirement already satisfied: tzdata>=2022.7 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from pandas->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (2024.1)\n",
|
65 |
+
"Requirement already satisfied: mdurl~=0.1 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from markdown-it-py>=2.2.0->rich>=11.1.0->tyro->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (0.1.2)\n",
|
66 |
+
"Requirement already satisfied: six>=1.5 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (from python-dateutil>=2.8.2->pandas->datasets>=2.16.0->unsloth@ git+https://github.com/unslothai/unsloth.git->unsloth[colab-new]@ git+https://github.com/unslothai/unsloth.git) (1.16.0)\n",
|
67 |
+
"Requirement already satisfied: xformers<0.0.27 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (0.0.26.post1)\n",
|
68 |
+
"Requirement already satisfied: trl<0.9.0 in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (0.8.6)\n",
|
69 |
+
"Requirement already satisfied: peft in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (0.12.0)\n",
|
70 |
+
"Requirement already satisfied: accelerate in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (0.33.0)\n",
|
71 |
+
"Requirement already satisfied: bitsandbytes in /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages (0.43.3)\n"
|
72 |
+
]
|
73 |
+
}
|
74 |
+
],
|
75 |
+
"source": [
|
76 |
+
"!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\"\n",
|
77 |
+
"!pip install --no-deps \"xformers<0.0.27\" \"trl<0.9.0\" peft accelerate bitsandbytes"
|
78 |
+
]
|
79 |
+
},
|
80 |
+
{
|
81 |
+
"cell_type": "code",
|
82 |
+
"execution_count": 2,
|
83 |
+
"metadata": {},
|
84 |
+
"outputs": [
|
85 |
+
{
|
86 |
+
"name": "stderr",
|
87 |
+
"output_type": "stream",
|
88 |
+
"text": [
|
89 |
+
"2024-09-02 22:16:32.351723: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:485] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n",
|
90 |
+
"2024-09-02 22:16:32.441229: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:8454] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n",
|
91 |
+
"2024-09-02 22:16:32.454471: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1452] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n",
|
92 |
+
"2024-09-02 22:16:32.544617: I tensorflow/core/platform/cpu_feature_guard.cc:210] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n",
|
93 |
+
"To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
|
94 |
+
"2024-09-02 22:16:33.579642: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT\n",
|
95 |
+
"WARNING[XFORMERS]: xFormers can't load C++/CUDA extensions. xFormers was built for:\n",
|
96 |
+
" PyTorch 2.3.0+cu121 with CUDA 1201 (you have 2.4.0+cu121)\n",
|
97 |
+
" Python 3.9.19 (you have 3.9.19)\n",
|
98 |
+
" Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n",
|
99 |
+
" Memory-efficient attention, SwiGLU, sparse and more won't be available.\n",
|
100 |
+
" Set XFORMERS_MORE_DETAILS=1 for more details\n"
|
101 |
+
]
|
102 |
+
},
|
103 |
+
{
|
104 |
+
"ename": "ImportError",
|
105 |
+
"evalue": "Unsloth: Xformers was not installed correctly.\nPlease install xformers separately first.\nThen confirm if it's correctly installed by running:\npython -m xformers.info\n\nLonger error message:\nxFormers can't load C++/CUDA extensions. xFormers was built for:\n PyTorch 2.3.0+cu121 with CUDA 1201 (you have 2.4.0+cu121)\n Python 3.9.19 (you have 3.9.19)\n Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n Memory-efficient attention, SwiGLU, sparse and more won't be available.",
|
106 |
+
"output_type": "error",
|
107 |
+
"traceback": [
|
108 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
109 |
+
"\u001b[0;31mOSError\u001b[0m Traceback (most recent call last)",
|
110 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/xformers/_cpp_lib.py:128\u001b[0m, in \u001b[0;36m_register_extensions\u001b[0;34m()\u001b[0m\n\u001b[1;32m 127\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 128\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mops\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_library\u001b[49m\u001b[43m(\u001b[49m\u001b[43mext_specs\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43morigin\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 129\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n",
|
111 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/torch/_ops.py:1295\u001b[0m, in \u001b[0;36m_Ops.load_library\u001b[0;34m(self, path)\u001b[0m\n\u001b[1;32m 1291\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m dl_open_guard():\n\u001b[1;32m 1292\u001b[0m \u001b[38;5;66;03m# Import the shared library into the process, thus running its\u001b[39;00m\n\u001b[1;32m 1293\u001b[0m \u001b[38;5;66;03m# static (global) initialization code in order to register custom\u001b[39;00m\n\u001b[1;32m 1294\u001b[0m \u001b[38;5;66;03m# operators with the JIT.\u001b[39;00m\n\u001b[0;32m-> 1295\u001b[0m \u001b[43mctypes\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mCDLL\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 1296\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mloaded_libraries\u001b[38;5;241m.\u001b[39madd(path)\n",
|
112 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/ctypes/__init__.py:382\u001b[0m, in \u001b[0;36mCDLL.__init__\u001b[0;34m(self, name, mode, handle, use_errno, use_last_error, winmode)\u001b[0m\n\u001b[1;32m 381\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m handle \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[0;32m--> 382\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_handle \u001b[38;5;241m=\u001b[39m \u001b[43m_dlopen\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_name\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mmode\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 383\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n",
|
113 |
+
"\u001b[0;31mOSError\u001b[0m: /home/nihaltm/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/xformers/_C.so: undefined symbol: _ZN3c1010Dispatcher17runRecordFunctionERN2at14RecordFunctionESt17reference_wrapperIKNS_14FunctionSchemaEENS_11DispatchKeyE",
|
114 |
+
"\nThe above exception was the direct cause of the following exception:\n",
|
115 |
+
"\u001b[0;31mxFormersInvalidLibException\u001b[0m Traceback (most recent call last)",
|
116 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/unsloth/models/_utils.py:253\u001b[0m\n\u001b[1;32m 252\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[0;32m--> 253\u001b[0m \u001b[43m_register_extensions\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m \u001b[38;5;66;03m# Check if C++ modules are loaded correctly\u001b[39;00m\n\u001b[1;32m 254\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m error:\n",
|
117 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/xformers/_cpp_lib.py:130\u001b[0m, in \u001b[0;36m_register_extensions\u001b[0;34m()\u001b[0m\n\u001b[1;32m 129\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mOSError\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m exc:\n\u001b[0;32m--> 130\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m xFormersInvalidLibException(build_metadata) \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mexc\u001b[39;00m\n\u001b[1;32m 131\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m build_metadata\n",
|
118 |
+
"\u001b[0;31mxFormersInvalidLibException\u001b[0m: xFormers can't load C++/CUDA extensions. xFormers was built for:\n PyTorch 2.3.0+cu121 with CUDA 1201 (you have 2.4.0+cu121)\n Python 3.9.19 (you have 3.9.19)\n Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n Memory-efficient attention, SwiGLU, sparse and more won't be available.",
|
119 |
+
"\nDuring handling of the above exception, another exception occurred:\n",
|
120 |
+
"\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)",
|
121 |
+
"Cell \u001b[0;32mIn[2], line 5\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mdatasets\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_dataset\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtransformers\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m TrainingArguments, TextStreamer\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01munsloth\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_templates\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m get_chat_template\n\u001b[1;32m 6\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01munsloth\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastLanguageModel, is_bfloat16_supported\n",
|
122 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/unsloth/__init__.py:154\u001b[0m\n\u001b[1;32m 144\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[1;32m 145\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnsloth: CUDA is not linked properly.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 146\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTry running `python -m bitsandbytes` then `python -m xformers.info`\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 150\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnsloth will still run for now, but maybe it might crash - let\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms hope it works!\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 151\u001b[0m )\n\u001b[1;32m 152\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[0;32m--> 154\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmodels\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 155\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01msave\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n\u001b[1;32m 156\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mchat_templates\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m\n",
|
123 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/unsloth/models/__init__.py:15\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;66;03m#\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Licensed under the Apache License, Version 2.0 (the \"License\");\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m# See the License for the specific language governing permissions and\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;66;03m# limitations under the License.\u001b[39;00m\n\u001b[0;32m---> 15\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mloader\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastLanguageModel\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mllama\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastLlamaModel\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmistral\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastMistralModel\n",
|
124 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/unsloth/models/loader.py:15\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# Copyright 2023-present Daniel Han-Chen & the Unsloth team. All rights reserved.\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;66;03m#\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# Licensed under the Apache License, Version 2.0 (the \"License\");\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;66;03m# See the License for the specific language governing permissions and\u001b[39;00m\n\u001b[1;32m 13\u001b[0m \u001b[38;5;66;03m# limitations under the License.\u001b[39;00m\n\u001b[0;32m---> 15\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_utils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m is_bfloat16_supported, HAS_FLASH_ATTENTION, HAS_FLASH_ATTENTION_SOFTCAPPING\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mllama\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastLlamaModel, logger\n\u001b[1;32m 17\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mmistral\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m FastMistralModel\n",
|
125 |
+
"File \u001b[0;32m~/miniconda3/envs/tf-gpu/lib/python3.9/site-packages/unsloth/models/_utils.py:255\u001b[0m\n\u001b[1;32m 253\u001b[0m _register_extensions() \u001b[38;5;66;03m# Check if C++ modules are loaded correctly\u001b[39;00m\n\u001b[1;32m 254\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m error:\n\u001b[0;32m--> 255\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mImportError\u001b[39;00m(\n\u001b[1;32m 256\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUnsloth: Xformers was not installed correctly.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 257\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mPlease install xformers separately first.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 258\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThen confirm if it\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124ms correctly installed by running:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\\\n\u001b[1;32m 259\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mpython -m xformers.info\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 260\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mLonger error message:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mstr\u001b[39m(error)\n\u001b[1;32m 261\u001b[0m )\n\u001b[1;32m 262\u001b[0m \u001b[38;5;28;01mpass\u001b[39;00m\n\u001b[1;32m 263\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mxformers\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mops\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mfmha\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mxformers\u001b[39;00m\n",
|
126 |
+
"\u001b[0;31mImportError\u001b[0m: Unsloth: Xformers was not installed correctly.\nPlease install xformers separately first.\nThen confirm if it's correctly installed by running:\npython -m xformers.info\n\nLonger error message:\nxFormers can't load C++/CUDA extensions. xFormers was built for:\n PyTorch 2.3.0+cu121 with CUDA 1201 (you have 2.4.0+cu121)\n Python 3.9.19 (you have 3.9.19)\n Please reinstall xformers (see https://github.com/facebookresearch/xformers#installing-xformers)\n Memory-efficient attention, SwiGLU, sparse and more won't be available."
|
127 |
+
]
|
128 |
+
}
|
129 |
+
],
|
130 |
+
"source": [
|
131 |
+
"import torch\n",
|
132 |
+
"from trl import SFTTrainer\n",
|
133 |
+
"from datasets import load_dataset\n",
|
134 |
+
"from transformers import TrainingArguments, TextStreamer\n",
|
135 |
+
"from unsloth.chat_templates import get_chat_template\n",
|
136 |
+
"from unsloth import FastLanguageModel, is_bfloat16_supported"
|
137 |
+
]
|
138 |
+
},
|
139 |
+
{
|
140 |
+
"cell_type": "code",
|
141 |
+
"execution_count": null,
|
142 |
+
"metadata": {},
|
143 |
+
"outputs": [],
|
144 |
+
"source": []
|
145 |
+
}
|
146 |
+
],
|
147 |
+
"metadata": {
|
148 |
+
"kernelspec": {
|
149 |
+
"display_name": "tf-gpu",
|
150 |
+
"language": "python",
|
151 |
+
"name": "python3"
|
152 |
+
},
|
153 |
+
"language_info": {
|
154 |
+
"codemirror_mode": {
|
155 |
+
"name": "ipython",
|
156 |
+
"version": 3
|
157 |
+
},
|
158 |
+
"file_extension": ".py",
|
159 |
+
"mimetype": "text/x-python",
|
160 |
+
"name": "python",
|
161 |
+
"nbconvert_exporter": "python",
|
162 |
+
"pygments_lexer": "ipython3",
|
163 |
+
"version": "3.9.19"
|
164 |
+
}
|
165 |
+
},
|
166 |
+
"nbformat": 4,
|
167 |
+
"nbformat_minor": 2
|
168 |
+
}
|
dataset.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e11c527e8b59cc589ddbf83ae39268b3a8d161107bf462af7c1d59d4dbc090c
|
3 |
+
size 300394762
|
diet.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import asyncio
|
3 |
+
import ollama
|
4 |
+
import json
|
5 |
+
|
6 |
+
def chat(message):
|
7 |
+
messages = [{"role": "user", "content": message}]
|
8 |
+
response = ollama.chat(model='llama3', messages=messages, stream=True)
|
9 |
+
for chunk in response:
|
10 |
+
yield chunk['message']['content']
|
11 |
+
|
12 |
+
def gradio_chat(message, history):
|
13 |
+
response = ""
|
14 |
+
for chunk in chat(message):
|
15 |
+
response += chunk
|
16 |
+
yield response
|
17 |
+
|
18 |
+
iface = gr.ChatInterface(
|
19 |
+
fn=gradio_chat,
|
20 |
+
title="Diet Plan Generator",
|
21 |
+
description="Enter your preferences to generate a personalized diet plan.",
|
22 |
+
examples=[
|
23 |
+
"Generate a meal plan for weight loss with a daily activity level being no activity or very less exercise. Target calories: 2000 kcal. Macro Distribution: 80 g protein, 220 g of carbs and 32 g of fat. The foods should mainly belong to Indian cuisine and should be strictly vegetarian.",
|
24 |
+
"Create a high-protein diet plan for muscle gain. Target calories: 2500 kcal. Macro Distribution: 150 g protein, 250 g of carbs and 70 g of fat. Include Mediterranean cuisine options.",
|
25 |
+
],
|
26 |
+
retry_btn=None,
|
27 |
+
undo_btn="Delete Previous",
|
28 |
+
clear_btn="Clear",
|
29 |
+
)
|
30 |
+
|
31 |
+
if __name__ == "__main__":
|
32 |
+
iface.launch(share = True)
|
nutritionmodel.py
ADDED
@@ -0,0 +1,285 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# import numpy as np
|
2 |
+
# import pandas as pd
|
3 |
+
# from sklearn.model_selection import train_test_split
|
4 |
+
# from sklearn.neural_network import MLPRegressor
|
5 |
+
# from sklearn.preprocessing import StandardScaler
|
6 |
+
|
7 |
+
# # Step 1: Create random food data
|
8 |
+
# def create_random_food_data(num_samples=100):
|
9 |
+
# np.random.seed(42)
|
10 |
+
# areas = ['American', 'Mexican', 'Italian', 'Indian', 'Chinese']
|
11 |
+
# categories = ['Beef', 'Chicken', 'Vegetarian', 'Seafood', 'Pork']
|
12 |
+
# vegetarian_categories = ['Vegetarian']
|
13 |
+
|
14 |
+
# data = []
|
15 |
+
# for _ in range(num_samples):
|
16 |
+
# area = np.random.choice(areas)
|
17 |
+
# category = np.random.choice(categories)
|
18 |
+
# ingredients_count = np.random.randint(3, 10)
|
19 |
+
# calories = np.random.randint(200, 600)
|
20 |
+
# protein = np.random.randint(10, 40)
|
21 |
+
# carbs = np.random.randint(20, 70)
|
22 |
+
# fats = np.random.randint(5, 30)
|
23 |
+
|
24 |
+
# data.append([area, category, ingredients_count, calories, protein, carbs, fats])
|
25 |
+
|
26 |
+
# df = pd.DataFrame(data, columns=['Area', 'Category', 'IngredientsCount', 'Calories', 'Protein', 'Carbs', 'Fats'])
|
27 |
+
# return df
|
28 |
+
|
29 |
+
# # Step 2: Preprocess the Data
|
30 |
+
# def preprocess_data(df):
|
31 |
+
# features = df[['Area', 'Category', 'IngredientsCount']]
|
32 |
+
# targets = df[['Calories', 'Protein', 'Carbs', 'Fats']]
|
33 |
+
|
34 |
+
# # Encode categorical variables
|
35 |
+
# features = pd.get_dummies(features, columns=['Area', 'Category'])
|
36 |
+
|
37 |
+
# return features, targets
|
38 |
+
|
39 |
+
# # Step 3: Train the MLP Model
|
40 |
+
# def train_mlp_model(X, y):
|
41 |
+
# # Split data into training and test sets
|
42 |
+
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
43 |
+
|
44 |
+
# # Standardize the features
|
45 |
+
# scaler = StandardScaler()
|
46 |
+
# X_train_scaled = scaler.fit_transform(X_train)
|
47 |
+
# X_test_scaled = scaler.transform(X_test)
|
48 |
+
|
49 |
+
# # Initialize and train the MLP model
|
50 |
+
# mlp_model = MLPRegressor(hidden_layer_sizes=(100, 50), max_iter=500, random_state=42)
|
51 |
+
# mlp_model.fit(X_train_scaled, y_train)
|
52 |
+
|
53 |
+
# return mlp_model, scaler
|
54 |
+
|
55 |
+
# # Step 4: Generate Diet Plan
|
56 |
+
# def generate_diet_plan(mlp_model, scaler, total_calories, num_meals, region, diet_preference, foods_df):
|
57 |
+
# meal_names = ['Breakfast', 'Morning Snack', 'Lunch', 'Afternoon Snack', 'Dinner']
|
58 |
+
# calorie_distribution = [0.25, 0.10, 0.35, 0.10, 0.20]
|
59 |
+
|
60 |
+
# # Adjust the distribution if number of meals is less than 5
|
61 |
+
# if num_meals < 5:
|
62 |
+
# calorie_distribution = calorie_distribution[:num_meals]
|
63 |
+
# calorie_distribution = [x / sum(calorie_distribution) for x in calorie_distribution]
|
64 |
+
# elif num_meals > 5:
|
65 |
+
# # Evenly distribute the remaining calories across the extra meals
|
66 |
+
# extra_meals = num_meals - 5
|
67 |
+
# extra_meal_calories = (sum(calorie_distribution) - 1) / extra_meals
|
68 |
+
# calorie_distribution.extend([extra_meal_calories] * extra_meals)
|
69 |
+
# calorie_distribution = [x / sum(calorie_distribution) for x in calorie_distribution]
|
70 |
+
# meal_names.extend([f'Extra Meal {i+1}' for i in range(extra_meals)])
|
71 |
+
|
72 |
+
# diet_plan = []
|
73 |
+
|
74 |
+
# # Filter foods based on the user's region and diet preference
|
75 |
+
# if diet_preference == 'Vegetarian':
|
76 |
+
# region_foods = foods_df[(foods_df['Area'] == region) & (foods_df['Category'] == 'Vegetarian')]
|
77 |
+
# else:
|
78 |
+
# region_foods = foods_df[foods_df['Area'] == region]
|
79 |
+
|
80 |
+
# for i in range(num_meals):
|
81 |
+
# # Randomly select a food from the filtered region and preference
|
82 |
+
# food = region_foods.sample(1).iloc[0]
|
83 |
+
|
84 |
+
# # Adjust the portion to meet the meal calorie requirement
|
85 |
+
# portion_factor = (total_calories * calorie_distribution[i]) / food['Calories']
|
86 |
+
# diet_plan.append({
|
87 |
+
# 'Meal': meal_names[i % len(meal_names)],
|
88 |
+
# 'Food': food['Category'],
|
89 |
+
# 'Area': food['Area'],
|
90 |
+
# 'IngredientsCount': food['IngredientsCount'],
|
91 |
+
# 'Calories': food['Calories'] * portion_factor,
|
92 |
+
# 'Protein': food['Protein'] * portion_factor,
|
93 |
+
# 'Carbs': food['Carbs'] * portion_factor,
|
94 |
+
# 'Fats': food['Fats'] * portion_factor
|
95 |
+
# })
|
96 |
+
|
97 |
+
# return diet_plan
|
98 |
+
|
99 |
+
# # Main Function
|
100 |
+
# if __name__ == "__main__":
|
101 |
+
# # Create random food data
|
102 |
+
# foods_df = create_random_food_data()
|
103 |
+
|
104 |
+
# # Preprocess the data
|
105 |
+
# X, y = preprocess_data(foods_df)
|
106 |
+
|
107 |
+
# # Train the MLP model
|
108 |
+
# mlp_model, scaler = train_mlp_model(X, y)
|
109 |
+
|
110 |
+
# # Get user input
|
111 |
+
# total_calories = float(input("Enter the total daily calories you want to consume: "))
|
112 |
+
# num_meals = int(input("Enter the number of meals per day: "))
|
113 |
+
# region = input("Enter your region (American, Mexican, Italian, Indian, Chinese): ")
|
114 |
+
# diet_preference = input("Enter your diet preference (Vegetarian, Non-Vegetarian): ")
|
115 |
+
|
116 |
+
# # Generate and print the diet plan
|
117 |
+
# diet_plan = generate_diet_plan(mlp_model, scaler, total_calories, num_meals, region, diet_preference, foods_df)
|
118 |
+
# for meal in diet_plan:
|
119 |
+
# print(f"{meal['Meal']}: {meal['Food']} ({meal['Area']}) - {meal['Calories']:.2f} kcal, "
|
120 |
+
# f"{meal['Protein']:.2f}g protein, {meal['Carbs']:.2f}g carbs, {meal['Fats']:.2f}g fats")
|
121 |
+
|
122 |
+
|
123 |
+
import requests
|
124 |
+
import numpy as np
|
125 |
+
import pandas as pd
|
126 |
+
from sklearn.model_selection import train_test_split
|
127 |
+
from sklearn.neural_network import MLPRegressor
|
128 |
+
from sklearn.preprocessing import StandardScaler
|
129 |
+
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
|
130 |
+
|
131 |
+
# Step 1: Fetch Food Data from TheMealDB API
|
132 |
+
def fetch_mealdb_data():
|
133 |
+
meals = []
|
134 |
+
for letter in 'abcdefghijklmnopqrstuvwxyz':
|
135 |
+
url = f'https://www.themealdb.com/api/json/v1/1/search.php?f={letter}'
|
136 |
+
response = requests.get(url)
|
137 |
+
data = response.json()
|
138 |
+
if data['meals']:
|
139 |
+
meals.extend(data['meals'])
|
140 |
+
return meals
|
141 |
+
|
142 |
+
# Step 2: Preprocess the Data
|
143 |
+
# def preprocess_data(meals):
|
144 |
+
# features = []
|
145 |
+
# targets = []
|
146 |
+
|
147 |
+
# for meal in meals:
|
148 |
+
# area = meal['strArea'] if meal['strArea'] else 'Unknown'
|
149 |
+
# category = meal['strCategory'] if meal['strCategory'] else 'Unknown'
|
150 |
+
# meal_type = np.random.choice(['Breakfast', 'Lunch', 'Dinner', 'Snack'])
|
151 |
+
# ingredients_count = sum([1 for i in range(1, 21) if meal[f'strIngredient{i}']])
|
152 |
+
|
153 |
+
# # Example target values (you can replace these with real data)
|
154 |
+
# calories = np.random.randint(200, 600)
|
155 |
+
# protein = np.random.randint(10, 40)
|
156 |
+
# carbs = np.random.randint(20, 70)
|
157 |
+
# fats = np.random.randint(5, 30)
|
158 |
+
|
159 |
+
# features.append([area, category, meal_type, ingredients_count])
|
160 |
+
# targets.append([calories, protein, carbs, fats])
|
161 |
+
|
162 |
+
# feature_df = pd.DataFrame(features, columns=['Area', 'Category', 'MealType', 'IngredientsCount'])
|
163 |
+
# target_df = pd.DataFrame(targets, columns=['Calories', 'Protein', 'Carbs', 'Fats'])
|
164 |
+
|
165 |
+
# # Encode categorical variables
|
166 |
+
# feature_df = pd.get_dummies(feature_df, columns=['Area', 'Category', 'MealType'])
|
167 |
+
|
168 |
+
# return feature_df, target_df
|
169 |
+
|
170 |
+
def preprocess_data(meals):
|
171 |
+
features = []
|
172 |
+
targets = []
|
173 |
+
|
174 |
+
for meal in meals:
|
175 |
+
area = meal['strArea'] if meal['strArea'] else 'Unknown'
|
176 |
+
category = meal['strCategory'] if meal['strCategory'] else 'Unknown'
|
177 |
+
meal_type = np.random.choice(['Breakfast', 'Lunch', 'Dinner', 'Snack'])
|
178 |
+
ingredients_count = sum([1 for i in range(1, 21) if meal[f'strIngredient{i}']])
|
179 |
+
meal_name = meal['strMeal'] if meal['strMeal'] else 'Unknown' # Add meal name
|
180 |
+
|
181 |
+
# Example target values (replace with real data if available)
|
182 |
+
calories = np.random.randint(200, 600)
|
183 |
+
protein = np.random.randint(10, 40)
|
184 |
+
carbs = np.random.randint(20, 70)
|
185 |
+
fats = np.random.randint(5, 30)
|
186 |
+
|
187 |
+
# Include meal_name in features
|
188 |
+
features.append([meal_name, area, category, meal_type, ingredients_count])
|
189 |
+
targets.append([calories, protein, carbs, fats])
|
190 |
+
|
191 |
+
feature_df = pd.DataFrame(features, columns=['MealName', 'Area', 'Category', 'MealType', 'IngredientsCount'])
|
192 |
+
target_df = pd.DataFrame(targets, columns=['Calories', 'Protein', 'Carbs', 'Fats'])
|
193 |
+
|
194 |
+
# Encode categorical variables except MealName
|
195 |
+
feature_df = pd.get_dummies(feature_df, columns=['Area', 'Category', 'MealType'])
|
196 |
+
|
197 |
+
return feature_df, target_df
|
198 |
+
|
199 |
+
|
200 |
+
# Step 3: Train the MLP Model
|
201 |
+
def train_mlp_model(X, y):
|
202 |
+
numeric_X = X.select_dtypes(include=[np.number])
|
203 |
+
X_train, X_test, y_train, y_test = train_test_split(numeric_X, y, test_size=0.2, random_state=42)
|
204 |
+
scaler = StandardScaler()
|
205 |
+
X_train_scaled = scaler.fit_transform(X_train)
|
206 |
+
X_test_scaled = scaler.transform(X_test)
|
207 |
+
|
208 |
+
mlp_model = MLPRegressor(hidden_layer_sizes=(150, 100, 50), activation='relu', solver='adam', max_iter=1000, random_state=42)
|
209 |
+
mlp_model.fit(X_train_scaled, y_train)
|
210 |
+
|
211 |
+
y_pred = mlp_model.predict(X_test_scaled)
|
212 |
+
|
213 |
+
mse = mean_squared_error(y_test, y_pred)
|
214 |
+
r2 = r2_score(y_test, y_pred)
|
215 |
+
mae = mean_absolute_error(y_test, y_pred)
|
216 |
+
|
217 |
+
print(f"Model Performance:")
|
218 |
+
print(f"Mean Squared Error (MSE): {mse:.2f}")
|
219 |
+
print(f"R-squared (R²): {r2:.2f}")
|
220 |
+
print(f"Mean Absolute Error (MAE): {mae:.2f}")
|
221 |
+
|
222 |
+
return mlp_model, scaler
|
223 |
+
|
224 |
+
# Step 4: Generate Diet Plan
|
225 |
+
def generate_diet_plan(mlp_model, scaler, total_calories, num_meals, region, diet_preference, foods_df):
|
226 |
+
meal_names = ['Breakfast', 'Morning Snack', 'Lunch', 'Afternoon Snack', 'Dinner']
|
227 |
+
calorie_distribution = [0.25, 0.10, 0.35, 0.10, 0.20]
|
228 |
+
|
229 |
+
if num_meals < 5:
|
230 |
+
calorie_distribution = calorie_distribution[:num_meals]
|
231 |
+
calorie_distribution = [x / sum(calorie_distribution) for x in calorie_distribution]
|
232 |
+
elif num_meals > 5:
|
233 |
+
extra_meals = num_meals - 5
|
234 |
+
extra_meal_calories = (sum(calorie_distribution) - 1) / extra_meals
|
235 |
+
calorie_distribution.extend([extra_meal_calories] * extra_meals)
|
236 |
+
calorie_distribution = [x / sum(calorie_distribution) for x in calorie_distribution]
|
237 |
+
meal_names.extend([f'Extra Meal {i+1}' for i in range(extra_meals)])
|
238 |
+
|
239 |
+
diet_plan = []
|
240 |
+
|
241 |
+
if diet_preference == 'Vegetarian':
|
242 |
+
region_foods = foods_df[(foods_df['Area'] == region) & (foods_df['Category'] == 'Vegetarian')]
|
243 |
+
else:
|
244 |
+
region_foods = foods_df[foods_df['Area'] == region]
|
245 |
+
|
246 |
+
for i in range(num_meals):
|
247 |
+
meal_type = meal_names[i % len(meal_names)]
|
248 |
+
meal_foods = region_foods[region_foods['MealType'] == meal_type]
|
249 |
+
|
250 |
+
if meal_foods.empty:
|
251 |
+
continue
|
252 |
+
|
253 |
+
food = meal_foods.sample(1).iloc[0]
|
254 |
+
portion_factor = (total_calories * calorie_distribution[i]) / food['Calories']
|
255 |
+
diet_plan.append({
|
256 |
+
'Meal': meal_type,
|
257 |
+
'Food': food['Category'],
|
258 |
+
'Area': food['Area'],
|
259 |
+
'IngredientsCount': food['IngredientsCount'],
|
260 |
+
'Calories': food['Calories'] * portion_factor,
|
261 |
+
'Protein': food['Protein'] * portion_factor,
|
262 |
+
'Carbs': food['Carbs'] * portion_factor,
|
263 |
+
'Fats': food['Fats'] * portion_factor
|
264 |
+
})
|
265 |
+
|
266 |
+
return diet_plan
|
267 |
+
|
268 |
+
# Main Function
|
269 |
+
if __name__ == "__main__":
|
270 |
+
meals = fetch_mealdb_data()
|
271 |
+
|
272 |
+
X, y = preprocess_data(meals)
|
273 |
+
|
274 |
+
mlp_model, scaler = train_mlp_model(X, y)
|
275 |
+
|
276 |
+
total_calories = float(input("Enter the total daily calories you want to consume: "))
|
277 |
+
num_meals = int(input("Enter the number of meals per day: "))
|
278 |
+
region = input("Enter your region (American, Mexican, Italian, Indian, Chinese): ")
|
279 |
+
diet_preference = input("Enter your diet preference (Vegetarian, Non-Vegetarian): ")
|
280 |
+
|
281 |
+
diet_plan = generate_diet_plan(mlp_model, scaler, total_calories, num_meals, region, diet_preference, X)
|
282 |
+
|
283 |
+
for meal in diet_plan:
|
284 |
+
print(f"{meal['Meal']}: {meal['Food']} ({meal['Area']}) - {meal['Calories']:.2f} kcal, "
|
285 |
+
f"{meal['Protein']:.2f}g protein, {meal['Carbs']:.2f}g carbs, {meal['Fats']:.2f}g fats")
|
prompt.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Generate a meal plan for weight loss with a daily activity level being no activity or very less exercise. Target calories: 2000 kcal. Macro Distribution: 80 g protein, 220 g of carbs and 32 g of fat. Give this in the form a table with days: [Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday] with meals being mandatorily [Breakfast, Afternoon Snack, Lunch, Evening Snack, Dinner]. Don't tell anything other than the table in the form of json as mentioned. The foods should mainly belong to Indian cuisine and should be strictly vegetarian.
|
response.json
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Here is a sample meal plan for weight loss with a daily activity level being no activity or very less exercise:
|
2 |
+
|
3 |
+
{
|
4 |
+
"Monday": {
|
5 |
+
"Breakfast": "Oatmeal with almond milk, banana, and walnuts (400 calories, 25g protein, 60g carbs, 10g fat)",
|
6 |
+
"Afternoon Snack": "Carrot sticks with hummus (100 calories, 5g protein, 20g carbs, 10g fat)",
|
7 |
+
"Lunch": "Vegetable biryani with tofu and a side of raita (500 calories, 30g protein, 70g carbs, 15g fat)",
|
8 |
+
"Evening Snack": "Cucumber slices with tomato soup (150 calories, 5g protein, 25g carbs, 10g fat)",
|
9 |
+
"Dinner": "Spinach and paneer stuffed bell peppers (550 calories, 35g protein, 40g carbs, 20g fat)"
|
10 |
+
},
|
11 |
+
"Tuesday": {
|
12 |
+
"Breakfast": "Whole wheat toast with avocado spread, cherry tomatoes, and scrambled eggs (350 calories, 20g protein, 30g carbs, 15g fat)",
|
13 |
+
"Afternoon Snack": "Roasted chana chaat with a sprinkle of cumin powder (120 calories, 5g protein, 25g carbs, 10g fat)",
|
14 |
+
"Lunch": "Lentil soup with whole wheat roti and a side salad (450 calories, 20g protein, 60g carbs, 15g fat)",
|
15 |
+
"Evening Snack": "Grilled vegetable skewers with a dollop of low-fat yogurt (200 calories, 10g protein, 30g carbs, 10g fat)",
|
16 |
+
"Dinner": "Chana masala with brown rice and mixed vegetables (500 calories, 25g protein, 70g carbs, 20g fat)"
|
17 |
+
},
|
18 |
+
"Wednesday": {
|
19 |
+
"Breakfast": "Flaxseed porridge with almond milk, banana, and a sprinkle of cinnamon (350 calories, 10g protein, 50g carbs, 15g fat)",
|
20 |
+
"Afternoon Snack": "Cauliflower florets with a side of low-fat curd (100 calories, 5g protein, 20g carbs, 10g fat)",
|
21 |
+
"Lunch": "Vegetable quinoa bowl with roasted sweet potatoes and a dollop of yogurt (500 calories, 25g protein, 60g carbs, 15g fat)",
|
22 |
+
"Evening Snack": "Baked apple slices with cinnamon and a sprinkle of almonds (150 calories, 5g protein, 30g carbs, 10g fat)",
|
23 |
+
"Dinner": "Palak paneer with brown rice and mixed vegetables (550 calories, 35g protein, 40g carbs, 20g fat)"
|
24 |
+
},
|
25 |
+
"Thursday": {
|
26 |
+
"Breakfast": "Omelette with spinach, mushrooms, and whole wheat toast (250 calories, 15g protein, 10g carbs, 10g fat)",
|
27 |
+
"Afternoon Snack": "Hummus with carrot and cucumber sticks (150 calories, 5g protein, 20g carbs, 10g fat)",
|
28 |
+
"Lunch": "Chickpea salad with mixed greens, cherry tomatoes, and a side of whole wheat crackers (450 calories, 20g protein, 60g carbs, 15g fat)",
|
29 |
+
"Evening Snack": "Roasted bell pepper slices with hummus (120 calories, 5g protein, 25g carbs, 10g fat)",
|
30 |
+
"Dinner": "Vegetable korma with brown rice and mixed vegetables (500 calories, 30g protein, 70g carbs, 20g fat)"
|
31 |
+
},
|
32 |
+
"Friday": {
|
33 |
+
"Breakfast": "Whole wheat paratha with low-fat paneer and a side of mixed greens salad (350 calories, 20g protein, 30g carbs, 15g fat)",
|
34 |
+
"Afternoon Snack": "Cucumber slices with a sprinkle of cumin powder (50 calories, 5g protein, 10g carbs, 0g fat)",
|
35 |
+
"Lunch": "Lentil and vegetable curry with brown rice and a side salad (500 calories, 25g protein, 70g carbs, 20g fat)",
|
36 |
+
"Evening Snack": "Grilled vegetable skewers with a dollop of low-fat yogurt (200 calories, 10g protein, 30g carbs, 10g fat)",
|
37 |
+
"Dinner": "Chana masala with brown rice and mixed vegetables (500 calories, 25g protein, 70g carbs, 20g fat)"
|
38 |
+
},
|
39 |
+
"Saturday": {
|
40 |
+
"Breakfast": "Avocado toast on whole wheat bread with scrambled eggs and cherry tomatoes (350 calories, 15g protein, 30g carbs, 15g fat)",
|
41 |
+
"Afternoon Snack": "Roasted chana chaat with a sprinkle of cumin powder (120 calories, 5g protein, 25g carbs, 10g fat)",
|
42 |
+
"Lunch": "Vegetable biryani with tofu and a side of mixed greens salad (500 calories, 30g protein, 70g carbs, 20g fat)",
|
43 |
+
"Evening Snack": "Baked apple slices with cinnamon and a sprinkle of almonds (150 calories, 5g protein, 30g carbs, 10g fat)",
|
44 |
+
"Dinner": "Spinach and paneer stuffed bell peppers (550 calories, 35g protein, 40g carbs, 20g fat)"
|
45 |
+
},
|
46 |
+
"Sunday": {
|
47 |
+
"Breakfast": "Oatmeal with almond milk, banana, and walnuts (400 calories, 25g protein, 60g carbs, 10g fat)",
|
48 |
+
"Afternoon Snack": "Carrot sticks with hummus (100 calories, 5g protein, 20g carbs, 10g fat)",
|
49 |
+
"Lunch": "Chickpea salad with mixed greens, cherry tomatoes, and a side of whole wheat crackers (450 calories, 20g protein, 60g carbs, 15g fat)",
|
50 |
+
"Evening Snack": "Grilled vegetable skewers with a dollop of low-fat yogurt (200 calories, 10g protein, 30g carbs, 10g fat)",
|
51 |
+
"Dinner": "Vegetable korma with brown rice and mixed vegetables (500 calories, 30g protein, 70g carbs, 20g fat)"
|
52 |
+
}
|
53 |
+
}
|
54 |
+
|
55 |
+
Please note that the calorie, protein, carbohydrate, and fat content of each meal is approximate and may vary based on specific ingredients and portion sizes. It's always a good idea to consult with a registered dietitian or healthcare professional for personalized nutrition advice.
|