Spaces:
Sleeping
Sleeping
first init
Browse files- .gitattributes +1 -0
- app.py +205 -59
- requirements.txt +5 -1
- tourism_place.csv +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tourism.db filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -1,63 +1,209 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
|
|
62 |
if __name__ == "__main__":
|
63 |
-
|
|
|
1 |
+
import os
|
2 |
+
import groqclient
|
3 |
import gradio as gr
|
4 |
+
import numpy as np
|
5 |
+
from sentence_transformers import SentenceTransformer
|
6 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
7 |
+
import sqlite3
|
8 |
+
import pandas as pd
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
# Get the Groq API key from environment variables (in Hugging Face, this is stored as a secret)
|
12 |
+
api_key = os.getenv('GROQ_API_KEY')
|
13 |
+
groqclient.api_key = api_key
|
14 |
+
|
15 |
+
|
16 |
+
con = sqlite3.connect("file::memory:?cache=shared")
|
17 |
+
con.row_factory = sqlite3.Row
|
18 |
+
cur = con.cursor()
|
19 |
+
|
20 |
+
# create table if not exists
|
21 |
+
|
22 |
+
cur.execute("""
|
23 |
+
CREATE TABLE IF NOT EXISTS places (
|
24 |
+
Place_Id INTEGER PRIMARY KEY, -- SQLite auto-increments INTEGER PRIMARY KEY automatically
|
25 |
+
Place_Name TEXT NOT NULL, -- SQLite uses TEXT instead of VARCHAR
|
26 |
+
Description TEXT,
|
27 |
+
Category TEXT,
|
28 |
+
City TEXT,
|
29 |
+
Price REAL, -- SQLite uses REAL instead of DECIMAL or FLOAT
|
30 |
+
Rating REAL,
|
31 |
+
Embedding TEXT
|
32 |
+
);
|
33 |
+
""")
|
34 |
+
|
35 |
+
|
36 |
+
data = pd.read_csv('tourism_place.csv')
|
37 |
+
|
38 |
+
|
39 |
+
# check if the table is empty
|
40 |
+
cur.execute("SELECT * FROM places")
|
41 |
+
|
42 |
+
if cur.fetchone() is None:
|
43 |
+
# Store the places in the database
|
44 |
+
for i in tqdm(range(len(data))):
|
45 |
+
cur.execute("""
|
46 |
+
INSERT INTO places (Place_Name, Description, Category, City, Price, Rating)
|
47 |
+
VALUES (?, ?, ?, ?, ?, ?)
|
48 |
+
""", (data['Place_Name'][i], data['Description'][i], data['Category'][i], data['City'][i], float(data['Price'][i]), float(data['Rating'][i]))
|
49 |
+
)
|
50 |
+
|
51 |
+
# Commit the changes to the database
|
52 |
+
con.commit()
|
53 |
+
|
54 |
+
# Compute and store embeddings
|
55 |
+
def compute_and_store_embeddings():
|
56 |
+
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
57 |
+
|
58 |
+
# Select all places from the database
|
59 |
+
cur.execute("SELECT Place_Id, Place_Name, Category, Description, City FROM places")
|
60 |
+
places = cur.fetchall()
|
61 |
+
|
62 |
+
for place in places:
|
63 |
+
# Combine PlaceName, Category, Description, and City into one string
|
64 |
+
text = f"{place[1]} {place[2]} {place[3]} {place[4]}"
|
65 |
+
|
66 |
+
# Generate embedding for the combined text
|
67 |
+
embedding = model.encode(text)
|
68 |
+
|
69 |
+
# Convert embedding to a string format to store in the database
|
70 |
+
embedding_str = ','.join([str(x) for x in embedding])
|
71 |
+
|
72 |
+
# Update the place in the database with the embedding
|
73 |
+
cur.execute(
|
74 |
+
"UPDATE places SET Embedding = ? WHERE Place_Id = ?",
|
75 |
+
(embedding_str, place[0])
|
76 |
+
)
|
77 |
+
|
78 |
+
# Commit the changes to the database
|
79 |
+
con.commit()
|
80 |
+
# Run the function to compute and store embeddings
|
81 |
+
compute_and_store_embeddings()
|
82 |
+
|
83 |
+
|
84 |
+
# Load Hugging Face model for generating embeddings
|
85 |
+
model = SentenceTransformer('paraphrase-MiniLM-L6-v2')
|
86 |
+
|
87 |
+
# Normalize user query using Groq VM
|
88 |
+
def normalize_query(user_query):
|
89 |
+
try:
|
90 |
+
response = groqclient.chat.completions.create(
|
91 |
+
model="llama-3.1-70b-versatile",
|
92 |
+
messages=[{
|
93 |
+
"role": "user",
|
94 |
+
"content": f"""
|
95 |
+
Please analyze the query: \"{user_query}\", extract Place name, Category, Description, and City.
|
96 |
+
Return the response as: "Place name, Category, Description, City".
|
97 |
+
"""
|
98 |
+
}]
|
99 |
+
)
|
100 |
+
normalized_user_query = response.choices[0].message.content.split('\n')[-1].strip()
|
101 |
+
return normalized_user_query
|
102 |
+
except Exception as e:
|
103 |
+
print(f"Error normalizing query: {e}")
|
104 |
+
return ""
|
105 |
|
106 |
+
# Generate user embedding using Hugging Face model
|
107 |
+
def get_user_embedding(query):
|
108 |
+
try:
|
109 |
+
return model.encode(query)
|
110 |
+
except Exception as e:
|
111 |
+
print(f"Error generating embedding: {e}")
|
112 |
+
return np.zeros(512) # Assuming 384 as default embedding size
|
113 |
+
|
114 |
+
# Find similar places based on cosine similarity
|
115 |
+
def get_similar_places(user_embedding):
|
116 |
+
similarities = []
|
117 |
+
try:
|
118 |
+
res = cur.execute("SELECT * FROM places").fetchall()
|
119 |
+
|
120 |
+
for place in res:
|
121 |
+
embedding_str = place['Embedding']
|
122 |
+
embedding = np.array([float(x) for x in embedding_str.split(',')])
|
123 |
+
similarity = cosine_similarity([user_embedding], [embedding])[0][0]
|
124 |
+
similarities.append((place, similarity))
|
125 |
+
|
126 |
+
ranked_results = sorted(similarities, key=lambda x: (x[1], x[0]['Rating']), reverse=True)
|
127 |
+
return ranked_results[:5] # Return top 5 places
|
128 |
+
except Exception as e:
|
129 |
+
print(f"Error fetching similar places: {e}")
|
130 |
+
return []
|
131 |
+
|
132 |
+
# Main function to get top 5 destinations
|
133 |
+
def get_top_5_destinations(user_query):
|
134 |
+
normalized_query = normalize_query(user_query)
|
135 |
+
user_embedding = get_user_embedding(normalized_query)
|
136 |
+
similar_places = get_similar_places(user_embedding)
|
137 |
+
|
138 |
+
if not similar_places:
|
139 |
+
return "Tidak ada tempat yang ditemukan."
|
140 |
+
|
141 |
+
top_places = []
|
142 |
+
for i, (place, similarity) in enumerate(similar_places):
|
143 |
+
top_places.append({
|
144 |
+
'name': place['Place_Name'],
|
145 |
+
'city': place['City'],
|
146 |
+
'category': place['Category'],
|
147 |
+
'rating': place['Rating'],
|
148 |
+
'description': place['Description'],
|
149 |
+
'similarity': similarity
|
150 |
+
})
|
151 |
+
|
152 |
+
return top_places
|
153 |
+
|
154 |
+
# Generate response to user using Groq VM
|
155 |
+
def generate_response(user_query, top_places):
|
156 |
+
try:
|
157 |
+
# Prepare the destinations data in JSON format for the model to use directly
|
158 |
+
destinations_data = ", ".join([
|
159 |
+
f'{{"name": "{place["name"]}", "city": "{place["city"]}", "category": "{place["category"]}", "rating": {place["rating"]}, "description": "{place["description"]}"}}'
|
160 |
+
for place in top_places
|
161 |
+
])
|
162 |
+
|
163 |
+
# System prompt: Simplified and focused on returning only the recommendations
|
164 |
+
system_prompt = """
|
165 |
+
You are a tour guide assistant. Your task is to present the following tourism recommendations to the user in Bahasa Indonesia.
|
166 |
+
- For each destination, include the name, city, category, rating, and a short description.
|
167 |
+
- Do not provide any additional commentary.
|
168 |
+
- Only return the provided data in a clear and concise format.
|
169 |
+
"""
|
170 |
+
|
171 |
+
# Generate the response using the model
|
172 |
+
response = groqclient.chat.completions.create(
|
173 |
+
model="llama-3.1-70b-versatile",
|
174 |
+
messages=[
|
175 |
+
{"role": "system", "content": system_prompt}, # System prompt defines behavior
|
176 |
+
{"role": "user", "content": f"Berikut adalah rekomendasi berdasarkan data: {destinations_data}"}
|
177 |
+
]
|
178 |
+
)
|
179 |
+
|
180 |
+
# Return the response content generated by the model
|
181 |
+
return response.choices[0].message.content
|
182 |
+
except Exception as e:
|
183 |
+
print(f"Error generating response: {e}")
|
184 |
+
return "Maaf, terjadi kesalahan dalam menghasilkan rekomendasi."
|
185 |
+
|
186 |
+
# Gradio Interface - User Input and Output
|
187 |
+
def chatbot(user_query):
|
188 |
+
# Step 1: Get the top 5 destinations
|
189 |
+
top_places = get_top_5_destinations(user_query)
|
190 |
+
|
191 |
+
if isinstance(top_places, str): # Error case, e.g. "No places found"
|
192 |
+
return top_places
|
193 |
+
|
194 |
+
# Step 2: Generate the chatbot's response
|
195 |
+
response = generate_response(user_query, top_places)
|
196 |
+
return response
|
197 |
+
|
198 |
+
# Define Gradio Interface
|
199 |
+
iface = gr.Interface(
|
200 |
+
fn=chatbot,
|
201 |
+
inputs="text",
|
202 |
+
outputs="text",
|
203 |
+
title="Tourism Recommendation Chatbot",
|
204 |
+
description="Masukkan pertanyaan wisata Anda dan dapatkan rekomendasi tempat terbaik!"
|
205 |
+
)
|
206 |
|
207 |
+
# Launch the Gradio App
|
208 |
if __name__ == "__main__":
|
209 |
+
iface.launch()
|
requirements.txt
CHANGED
@@ -1 +1,5 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.4.1
|
2 |
+
sentence-transformers==2.2.2
|
3 |
+
scikit-learn==1.2.2
|
4 |
+
groqclient
|
5 |
+
numpy==1.23.5
|
tourism_place.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|