Prasanna18 commited on
Commit
d626534
Β·
1 Parent(s): cb183bc

Upload 7 files

Browse files
README.md CHANGED
@@ -1,13 +1,12 @@
1
  ---
2
- title: Nagpur FoodGPT
3
- emoji: πŸš€
4
- colorFrom: indigo
5
- colorTo: gray
6
  sdk: streamlit
7
- sdk_version: 1.27.2
8
  app_file: app.py
9
  pinned: false
10
- license: llama2
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: NagpurFoodGPT!
3
+ emoji: 🍊
4
+ colorFrom: #FF5733
5
+ colorTo: #FFBF00
6
  sdk: streamlit
7
+ sdk_version: 1.25.0
8
  app_file: app.py
9
  pinned: false
 
10
  ---
11
 
12
+
app.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_chat import message
3
+ from langchain.chains import ConversationalRetrievalChain
4
+ from langchain.document_loaders import DirectoryLoader
5
+ from langchain.document_loaders import PyPDFLoader
6
+ from langchain.embeddings import HuggingFaceEmbeddings
7
+ from langchain.llms import CTransformers
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain.vectorstores import FAISS
10
+ from langchain.memory import ConversationBufferMemory
11
+ from langchain.document_loaders.csv_loader import CSVLoader
12
+ import requests
13
+ from PIL import Image
14
+ import pydeck as pdk
15
+ import os
16
+ import json
17
+
18
+ st.set_page_config(
19
+ page_title="FoodGPT - Nagpur Based Food Recommendation System.",
20
+ page_icon="🍊",
21
+ layout="wide",
22
+ initial_sidebar_state="expanded",
23
+ )
24
+
25
+ from langchain.document_loaders.csv_loader import CSVLoader
26
+ loader = CSVLoader(file_path='data.csv')
27
+ documents = loader.load()
28
+
29
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=50)
30
+ text_chunks = text_splitter.split_documents(documents)
31
+
32
+ embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2",
33
+ model_kwargs={'device':"cpu"})
34
+
35
+ vector_store = FAISS.from_documents(text_chunks,embeddings)
36
+
37
+ llm = CTransformers(model="llama-2-7b-chat.ggmlv3.q4_0.bin",model_type="llama",
38
+ config={'max_new_tokens':128,'temperature':0.01})
39
+
40
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
41
+
42
+ chain = ConversationalRetrievalChain.from_llm(llm=llm,chain_type='stuff',
43
+ retriever=vector_store.as_retriever(search_kwargs={"k":2}),
44
+ memory=memory)
45
+
46
+ # Sidebar for user input
47
+ st.sidebar.title("FoodGPT!🍊")
48
+ st.sidebar.info("FoodGPT : A Nagpur Based Food Recommendation Chat! Recommends you the best locally recognized brands for your cravings! As this system is backed with LLMA-2 on hand picked data.")
49
+ github_link = "[GitHub]()"
50
+ st.sidebar.info("To contribute and Sponser - " + github_link)
51
+
52
+ st.title("FoodGPT: A Nagpur based Food Recommendation Bot! 🍊")
53
+
54
+ if 'history' not in st.session_state:
55
+ st.session_state['history'] = []
56
+
57
+ if 'generated' not in st.session_state:
58
+ st.session_state['generated'] = ["Hello!I'm FoodGPT, Ask me anything about Nagpur's Food."]
59
+
60
+ if 'past' not in st.session_state:
61
+ st.session_state['past'] = ["Hello!"]
62
+
63
+ reply_container = st.container()
64
+ container = st.container()
65
+
66
+ with container:
67
+ with st.form(key='my_form', clear_on_submit=True):
68
+ user_input = st.text_input("Question:", placeholder="Ask anything about Nagpur's Food Joints or cravings", key='input')
69
+ image_upload = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
70
+
71
+ submit_button = st.form_submit_button(label='Send')
72
+
73
+ try:
74
+ if submit_button and user_input:
75
+ output = chain({"question": user_input, "chat_history": st.session_state['history']})["answer"]
76
+ st.session_state['past'].append(user_input)
77
+ st.session_state['generated'].append(output)
78
+ except Exception as e:
79
+ st.error(f"An error occurred: {str(e)}")
80
+
81
+ if st.session_state['generated']:
82
+ with reply_container:
83
+ for i in range(len(st.session_state['generated'])):
84
+ message(st.session_state["past"][i], is_user=True, key=str(i) + '_user', avatar_style="thumbs")
85
+ message(st.session_state["generated"][i], key=str(i), avatar_style="fun-emoji")
86
+
87
+
88
+ API_URL = "https://api-inference.huggingface.co/models/Prasanna18/indian-food-classification"
89
+ HEADERS = {"Authorization": "Bearer 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"}
90
+
91
+ def query_image_classification(image_bytes):
92
+ try:
93
+ response = requests.post(API_URL, headers=HEADERS, data=image_bytes)
94
+ result = response.json()
95
+ return result
96
+ except Exception as e:
97
+ st.error(f"An error occurred during image classification: {str(e)}")
98
+ return None
99
+
100
+ if image_upload:
101
+ image_bytes = image_upload.read()
102
+
103
+ classification_result = query_image_classification(image_bytes)
104
+
105
+ if classification_result:
106
+ st.image(image_upload, caption="Uploaded Image", use_column_width=True)
107
+
108
+ if isinstance(classification_result, list) and classification_result:
109
+ # Ensure that classification_result is a list of results and not empty
110
+ best_label = max(classification_result, key=lambda x: x.get('score', 0))
111
+
112
+ if 'label' in best_label:
113
+ st.header("Image Classification Result:")
114
+ st.write(f"Classified as: {best_label['label']}")
115
+ else:
116
+ st.error("Invalid classification result format. Missing 'label' key.")
117
+ else:
118
+ st.error("Invalid classification result format or empty result list.")
119
+ else:
120
+ st.error("No classification result received.")
121
+
122
+
123
+ import pydeck as pdk
124
+
125
+ st.title("Nagpur Map")
126
+ center = [21.1458, 79.0882]
127
+
128
+ st.pydeck_chart(
129
+ pdk.Deck(
130
+ map_style="mapbox://styles/mapbox/light-v9",
131
+ initial_view_state={
132
+ "latitude": center[0],
133
+ "longitude": center[1],
134
+ "zoom": 13,
135
+ "pitch": 10,
136
+ },
137
+ layers=[
138
+ pdk.Layer(
139
+ "ScatterplotLayer",
140
+ data=[{"position": center, "tooltip": "Nagpur"}],
141
+ get_position="position",
142
+ get_radius=10000,
143
+ get_color=[255, 0, 0],
144
+ pickable=True,
145
+ ),
146
+ ],
147
+ )
148
+ )
149
+
150
+
cached_data.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"vector_store":
data.csv ADDED
The diff for this file is too large to render. See raw diff
 
llama-2-7b-chat.ggmlv3.q4_0 10.47.24 AM.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8daa9615cce30c259a9555b1cc250d461d1bc69980a274b44d7eda0be78076d8
3
+ size 3791725184
logo.png ADDED
requirements.txt ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==1.4.0
2
+ accelerate==0.21.0
3
+ aiofiles==23.2.1
4
+ aiohttp==3.8.5
5
+ aiosignal==1.3.1
6
+ altair==5.0.1
7
+ anyio==3.6.2
8
+ astunparse==1.6.3
9
+ async-timeout==4.0.3
10
+ attrs==23.1.0
11
+ bitsandbytes==0.40.2
12
+ blinker==1.6.2
13
+ blis==0.7.10
14
+ branca==0.6.0
15
+ cachetools==5.3.1
16
+ catalogue==2.0.9
17
+ certifi==2023.5.7
18
+ charset-normalizer==3.1.0
19
+ click==8.1.3
20
+ colorama==0.4.6
21
+ confection==0.1.1
22
+ contourpy==1.1.0
23
+ ctransformers==0.2.27
24
+ cycler==0.11.0
25
+ cymem==2.0.7
26
+ dataclasses-json==0.6.1
27
+ datasets==2.14.4
28
+ dill==0.3.7
29
+ docopt==0.6.2
30
+ exceptiongroup==1.1.2
31
+ faiss-cpu==1.7.4
32
+ fake-useragent==1.1.3
33
+ fastapi==0.95.2
34
+ ffmpy==0.3.1
35
+ filelock==3.12.2
36
+ Flask==2.3.2
37
+ flatbuffers==23.5.26
38
+ folium==0.14.0
39
+ fonttools==4.42.0
40
+ frozenlist==1.4.0
41
+ fsspec==2023.6.0
42
+ gast==0.4.0
43
+ gitdb==4.0.10
44
+ GitPython==3.1.37
45
+ google-auth==2.17.3
46
+ google-auth-oauthlib==1.0.0
47
+ google-pasta==0.2.0
48
+ gradio==3.40.1
49
+ gradio_client==0.4.0
50
+ grpcio==1.57.0
51
+ h11==0.14.0
52
+ h5py==3.9.0
53
+ httpcore==0.17.3
54
+ httpx==0.24.1
55
+ huggingface-hub==0.16.4
56
+ idna==3.4
57
+ importlib-metadata==6.8.0
58
+ importlib-resources==6.0.1
59
+ install==1.3.5
60
+ itsdangerous==2.1.2
61
+ Jinja2==3.1.2
62
+ joblib==1.2.0
63
+ jsonify==0.5
64
+ jsonpatch==1.33
65
+ jsonpointer==2.4
66
+ jsonschema==4.19.0
67
+ jsonschema-specifications==2023.7.1
68
+ keras==2.13.1
69
+ kiwisolver==1.4.4
70
+ langchain==0.0.309
71
+ langcodes==3.3.0
72
+ langsmith==0.0.42
73
+ libclang==16.0.6
74
+ linkify-it-py==2.0.2
75
+ Markdown==3.4.4
76
+ markdown-it-py==2.2.0
77
+ MarkupSafe==2.1.2
78
+ marshmallow==3.20.1
79
+ matplotlib==3.7.2
80
+ mdit-py-plugins==0.3.3
81
+ mdurl==0.1.2
82
+ mpmath==1.3.0
83
+ multidict==6.0.4
84
+ multiprocess==0.70.15
85
+ murmurhash==1.0.9
86
+ mypy-extensions==1.0.0
87
+ networkx==3.1
88
+ nltk==3.8.1
89
+ numpy==1.24.3
90
+ oauthlib==3.2.2
91
+ openai==0.27.9
92
+ opencv-python==4.8.0.76
93
+ opt-einsum==3.3.0
94
+ orjson==3.9.5
95
+ outcome==1.2.0
96
+ packaging==23.1
97
+ pandas==2.0.3
98
+ pathy==0.10.2
99
+ peft==0.4.0
100
+ Pillow==10.0.0
101
+ preshed==3.0.8
102
+ protobuf==4.24.1
103
+ psutil==5.9.5
104
+ py-cpuinfo==9.0.0
105
+ pyarrow==13.0.0
106
+ pyasn1==0.5.0
107
+ pyasn1-modules==0.3.0
108
+ pydantic==1.10.8
109
+ pydeck==0.8.1b0
110
+ pydub==0.25.1
111
+ Pygments==2.16.1
112
+ pyparsing==3.0.9
113
+ pypdf==3.16.2
114
+ PySocks==1.7.1
115
+ python-dateutil==2.8.2
116
+ python-decouple==3.8
117
+ python-multipart==0.0.6
118
+ pytz==2023.3
119
+ PyYAML==6.0.1
120
+ referencing==0.30.2
121
+ regex==2023.8.8
122
+ requests==2.31.0
123
+ requests-oauthlib==1.3.1
124
+ requests-toolbelt==1.0.0
125
+ rich==13.6.0
126
+ rpds-py==0.9.2
127
+ rsa==4.9
128
+ safetensors==0.3.2
129
+ scikit-learn==1.2.2
130
+ scipy==1.10.1
131
+ seaborn==0.12.2
132
+ selenium==4.10.0
133
+ semantic-version==2.10.0
134
+ sentence-transformers==2.2.2
135
+ sentencepiece==0.1.99
136
+ six==1.16.0
137
+ smart-open==6.3.0
138
+ smmap==5.0.1
139
+ sniffio==1.3.0
140
+ sortedcontainers==2.4.0
141
+ spacy==3.6.1
142
+ spacy-legacy==3.0.12
143
+ spacy-loggers==1.0.4
144
+ SQLAlchemy==2.0.21
145
+ srsly==2.4.7
146
+ starlette==0.27.0
147
+ streamlit==1.27.1
148
+ streamlit-chat==0.1.1
149
+ sympy==1.12
150
+ tabulate==0.9.0
151
+ tenacity==8.2.3
152
+ tensorboard==2.13.0
153
+ tensorboard-data-server==0.7.1
154
+ tensorflow==2.13.0
155
+ tensorflow-estimator==2.13.0
156
+ tensorflow-macos==2.13.0
157
+ termcolor==2.3.0
158
+ thinc==8.1.12
159
+ threadpoolctl==3.1.0
160
+ tiktoken==0.5.1
161
+ tokenizers==0.13.3
162
+ toml==0.10.2
163
+ toolz==0.12.0
164
+ torch==2.0.1
165
+ torch-tb-profiler==0.4.1
166
+ torchaudio==2.0.2
167
+ torchvision==0.15.2
168
+ tornado==6.3.3
169
+ tqdm==4.65.0
170
+ transformers==4.31.0
171
+ trio==0.22.1
172
+ trio-websocket==0.10.3
173
+ trl==0.4.7
174
+ typer==0.9.0
175
+ typing-inspect==0.9.0
176
+ typing_extensions==4.5.0
177
+ tzdata==2023.3
178
+ tzlocal==5.0.1
179
+ uc-micro-py==1.0.2
180
+ ultralytics==8.0.188
181
+ urllib3==2.0.2
182
+ uvicorn==0.22.0
183
+ validators==0.22.0
184
+ wasabi==1.1.2
185
+ websockets==11.0.3
186
+ Werkzeug==2.3.4
187
+ wrapt==1.15.0
188
+ wsproto==1.2.0
189
+ xxhash==3.3.0
190
+ yarl==1.9.2
191
+ zipp==3.17.0