epochs-demos commited on
Commit
d73ff04
·
0 Parent(s):

Duplicate from epochs-demos/product-recommendor

Browse files
Files changed (7) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +336 -0
  4. images.csv +0 -0
  5. prompts.json +7 -0
  6. requirements.txt +9 -0
  7. stylesu.csv +0 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Product Recommendor
3
+ emoji: 👁
4
+ colorFrom: gray
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: epochs-demos/product-recommendor
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Importing all the necessary needed libraries
2
+ import torch
3
+ import requests
4
+ import numpy as np
5
+ import pandas as pd
6
+ import gradio as gr
7
+ from io import BytesIO
8
+ from PIL import Image as PILIMAGE
9
+ from IPython.display import Image
10
+ from IPython.core.display import HTML
11
+ from transformers import CLIPProcessor, CLIPModel, CLIPTokenizer
12
+ from sentence_transformers import SentenceTransformer, util
13
+ import os
14
+ import json
15
+ import requests
16
+ import langchain
17
+ from tqdm import tqdm
18
+ from langchain.text_splitter import CharacterTextSplitter
19
+ images = []
20
+ prompt_templates = {"DefaultChatGPT": ""}
21
+ # Streaming endpoint
22
+ API_URL = "https://api.openai.com/v1/chat/completions" # os.getenv("API_URL") + "/generate_stream"
23
+ convo_id = 'default'
24
+ #5c72c157a8fd54357bd13112cd71952a
25
+ import time
26
+ images1= pd.read_csv("./images.csv")
27
+
28
+ openai_api_key='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'
29
+ m=0
30
+ style1= pd.read_csv('./stylesu.csv')
31
+ feature_info= list(style1.columns)
32
+ feature_info = ' '.join([str(elem) for elem in feature_info])
33
+ info= style1.values.tolist()
34
+ final_info=''
35
+ for i in info:
36
+ li=''
37
+ li=' '.join([str(elem) for elem in i])
38
+ final_info += li+'\n'
39
+
40
+
41
+
42
+ def on_prompt_template_change(prompt_template):
43
+ if not isinstance(prompt_template, str): return
44
+ if prompt_template:
45
+ return prompt_templates[prompt_template]
46
+ else:
47
+ ''
48
+
49
+ def get_empty_state():
50
+ return {"total_tokens": 0, "messages": []}
51
+
52
+ def get_prompt_templates():
53
+ with open('./prompts.json','r',encoding='utf8') as fp:
54
+ json_data = json.load(fp)
55
+ for data in json_data:
56
+ act = data['act']
57
+ prompt = data['prompt']
58
+ prompt_templates[act] = prompt
59
+ # reader = csv.reader(csv_file)
60
+ # next(reader) # skip the header row
61
+ # for row in reader:
62
+ # if len(row) >= 2:
63
+ # act = row[0].strip('"')
64
+ # prompt = row[1].strip('"')
65
+ # prompt_templates[act] = prompt
66
+
67
+ choices = list(prompt_templates.keys())
68
+ choices = choices[:1] + sorted(choices[1:])
69
+ return gr.update(value=choices[0], choices=choices)
70
+
71
+
72
+
73
+ def run(pr=gr.Progress(track_tqdm=True)):
74
+ #if(chat_counter==0):
75
+ message_prompt=[]
76
+ x=len(final_info)
77
+ print(x/2000)
78
+ for i in range(0,x,2000): #final_texts:
79
+ message_prompt.append(final_info[i:i+2000]+" Remember this along with previous prompts as it makes up the csv file")
80
+ #//there
81
+ prompt_template = "I want you to act as a Product recommender and read the CSV file I will provide you. I need you to thoroughly review the CSV file and give recommendations based on the input afterward. You should recommend me the product by displaying its id, and description. The csv features are:" +feature_info+ "The csv information is as follows:"
82
+ payload = {
83
+ "model": "gpt-3.5-turbo",
84
+ "messages": [{"role":"system", "content":prompt_template}],
85
+ "temperature": 0.1,
86
+ "top_p": 1.0,
87
+ "n": 1,
88
+ "stream": True,
89
+ "presence_penalty": 0,
90
+ "frequency_penalty": 0,
91
+ }
92
+
93
+ headers = {
94
+ "Content-Type": "application/json",
95
+ "Authorization": f"Bearer {openai_api_key}"
96
+ }
97
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
98
+
99
+ for i in pr.tqdm(message_prompt):
100
+ payload = {
101
+ "model": "gpt-3.5-turbo",
102
+ "messages": [{"role":"system", "content":i}],
103
+ "temperature": 0.1,
104
+ "top_p": 1.0,
105
+ "n": 1,
106
+ "stream": True,
107
+ "presence_penalty": 0,
108
+ "frequency_penalty": 0, }
109
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
110
+ time.sleep(0.01)
111
+ pr(1/2210)
112
+
113
+ print("completed")
114
+
115
+ def predict(inputs, prompt_template, temperature, openai_api_key, chat_counter, context_length, chatbot=[],
116
+ history=[]):
117
+
118
+ # # repetition_penalty, top_k
119
+ if inputs==None:
120
+ inputs = ''
121
+ prompt_template = "I want you to act as a Product recommender and read the CSV file I will provide you. I need you to thoroughly review the CSV file and give recommendations based on the input afterward. You should recommend me the product by displaying its id, and description. The csv features are:" +feature_info+ "The csv information is as follows:"
122
+
123
+ headers = {
124
+ "Content-Type": "application/json",
125
+ "Authorization": f"Bearer {openai_api_key}"
126
+ }
127
+ payload = {
128
+ "model": "gpt-3.5-turbo",
129
+ "messages": [{"role": "user", "content": f"{inputs}"}],
130
+ "temperature": 0.1,
131
+ "top_p": 1.0,
132
+ "n": 1,
133
+ "stream": True,
134
+ "presence_penalty": 0,
135
+ "frequency_penalty": 0,
136
+ }
137
+
138
+
139
+
140
+ # print(f"chat_counter - {chat_counter}")
141
+ if chat_counter != 0:
142
+ messages = []
143
+ # print(chatbot)
144
+ # print(chatbot[-context_length:])
145
+ # print(context_length)
146
+ for data in chatbot[-context_length:]:
147
+ temp1 = {}
148
+ temp1["role"] = "user"
149
+ temp1["content"] = data[0]
150
+ temp2 = {}
151
+ temp2["role"] = "assistant"
152
+ temp2["content"] = data[1]
153
+ messages.append(temp1)
154
+ messages.append(temp2)
155
+ temp3 = {}
156
+ temp3["role"] = "user"
157
+ temp3["content"] = inputs
158
+ messages.append(temp3)
159
+ # print(messages)
160
+ # messages
161
+ payload = {
162
+ "model": "gpt-3.5-turbo",
163
+ "messages": [{"role": "system", "content": prompt_template}]+messages, # [{"role": "user", "content": f"{inputs}"}],
164
+ "temperature": temperature, # 1.0,
165
+ "n": 1,
166
+ "stream": True,
167
+ "presence_penalty": 0,
168
+ "frequency_penalty": 0,
169
+ }
170
+
171
+
172
+
173
+ history.append(inputs)
174
+ # print(f"payload is - {payload}")
175
+ # make a POST request to the API endpoint using the requests.post method, passing in stream=True
176
+ # print('payload',payload)
177
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
178
+
179
+ # print('response', response)
180
+ # print('content',response.content)
181
+ # print('text', response.text)
182
+ if response.status_code != 200:
183
+ try:
184
+ payload['id'] = response.content['id']
185
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
186
+ if response.status_code != 200:
187
+ payload['id'] = response.content['id']
188
+ response = requests.post(API_URL, headers=headers, json=payload, stream=True)
189
+ except:
190
+ pass
191
+
192
+ # print('status_code', response.status_code)
193
+ # response = requests.post(API_URL, headers=headers, json=payload, stream=True)
194
+ token_counter = 0
195
+ partial_words = ""
196
+ counter = 0
197
+ if response.status_code==200:
198
+ chat_counter += 1
199
+ # print('chunk')
200
+ for chunk in response.iter_lines():
201
+ # Skipping first chunk
202
+ if counter == 0:
203
+ counter += 1
204
+ continue
205
+ # check whether each line is non-empty
206
+ chunk = chunk.decode("utf-8")[6:]
207
+ if chunk:
208
+ # print(chunk)
209
+ if chunk=='[DONE]':
210
+ break
211
+ resp: dict = json.loads(chunk)
212
+ choices = resp.get("choices")
213
+ if not choices:
214
+ continue
215
+ delta = choices[0].get("delta")
216
+ if not delta:
217
+ continue
218
+ # decode each line as response data is in bytes
219
+ if len(chunk) > 12 and "content" in resp['choices'][0]['delta']:
220
+ # if len(json.loads(chunk.decode()[6:])['choices'][0]["delta"]) == 0:
221
+ # break
222
+ partial_words = partial_words + resp['choices'][0]["delta"]["content"]
223
+ # print(partial_words)
224
+ if token_counter == 0:
225
+ history.append(" " + partial_words)
226
+ else:
227
+ history[-1] = partial_words
228
+ chat = [(history[i], history[i + 1]) for i in
229
+ range(0, len(history) - 1, 2)] # convert to tuples of list
230
+ # print(chat)
231
+ token_counter += 1
232
+ yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
233
+ else:
234
+ chat = [(history[i], history[i + 1]) for i in
235
+ range(0, len(history) - 1, 2)] # convert to tuples of list
236
+ chat.append((inputs, "OpenAI Network Error. please try again"))
237
+ token_counter += 1
238
+ yield chat, history, chat_counter # resembles {chatbot: chat, state: history}
239
+
240
+
241
+
242
+
243
+ def reset_textbox():
244
+ return gr.update(value='')
245
+
246
+ def clear_conversation(chatbot):
247
+ return gr.update(value=None, visible=True), [], [], gr.update(value=0)
248
+
249
+
250
+
251
+ def galleryim():
252
+
253
+ count=0
254
+ for i in images1['filename']:
255
+ count+=1
256
+ if count==50:
257
+ break
258
+ photo_data = images1[images1["filename"] == i].iloc[0]
259
+ response = requests.get(photo_data["link"] )
260
+ try:
261
+ img = PILIMAGE.open(BytesIO(response.content))
262
+ except:
263
+ print("File not found")
264
+ else:
265
+ images.append(img)
266
+ return images
267
+
268
+ title = """<h1 align="center">ChatGPTDatasetSearch</h1>"""
269
+ description = """Language models can be conditioned to act like dialogue agents through a conversational prompt that typically takes the form:
270
+ ```
271
+ User: <utterance>
272
+ Assistant: <utterance>
273
+ User: <utterance>
274
+ Assistant: <utterance>
275
+ ...
276
+ ```
277
+ In this app, you can explore the outputs of a gpt-3.5-turbo LLM.
278
+ """
279
+ with gr.Blocks(css="""#col_container {width: 800px; margin-left: auto; margin-right: auto;}
280
+ #chatbot {height: 500px; overflow: auto;}
281
+ #inputs {font-size: 20px;}
282
+ #prompt_template_preview {padding: 1em; border-width: 1px; border-style: solid; border-color: #e0e0e0; border-radius: 4px;}""") as demo:
283
+ gr.HTML(title)
284
+ with gr.Column(variant="panel"):
285
+
286
+ gr.HTML( """<b><center><h1>1001Epochs</h1></center></b>
287
+ <p><center>TOP THREE images that best match the search query provided by the user</center></p>
288
+ """)
289
+
290
+ with gr.Row():
291
+ with gr.Column(scale=0.50):
292
+ gallery = gr.Gallery( value=galleryim(),
293
+ label="Generated images", show_label=False, elem_id="gallery",every=60).style(columns=5, container=True)
294
+
295
+ with gr.Column(elem_id="col_container"):
296
+
297
+ openai_api_key = gr.Textbox(type='password', label="Enter API Key",placeholder="sk-xxxxxxxx")
298
+ button1=gr.Button("feed the csv into model")
299
+ button1.click(run, show_progress=True)
300
+ chatbot = gr.Chatbot(elem_id='chatbot') # c
301
+ inputs = gr.Textbox(show_label=False, placeholder="Enter Content",elem_id="inputs",value='') # t
302
+ state = gr.State([]) # s
303
+ # state = gr.State(get_empty_state())
304
+ b1 = gr.Button("Submit")
305
+ btn_clear_conversation = gr.Button("🔃 New Conversation")
306
+
307
+ # inputs, top_p, temperature, top_k, repetition_penalty
308
+ with gr.Accordion("Advanced settings", open=False,):
309
+ context_length = gr.Slider(minimum=1, maximum=6, value=2, step=1, label="Dialogue Length",
310
+ info="Associate the previous rounds of dialogues, the higher the value, the more tokens will be consumed")
311
+ temperature = gr.Slider(minimum=0, maximum=2.0, value=0.7, step=0.1, label="Temperature",
312
+ info="The higher the value, the stronger the creativity")
313
+ prompt_template = gr.Dropdown(label="Choose robot type",
314
+ choices=list(prompt_templates.keys()),visible=False)
315
+ prompt_template_preview = gr.Markdown(elem_id="prompt_template_preview",visible=False)
316
+ # top_k = gr.Slider( minimum=1, maximum=50, value=4, step=1, interactive=True, label="Top-k",)
317
+ # repetition_penalty = gr.Slider( minimum=0.1, maximum=3.0, value=1.03, step=0.01, interactive=True, label="Repetition Penalty", )
318
+ chat_counter = gr.Number(value=0, visible=False, precision=0)
319
+
320
+ inputs.submit(predict, [inputs, prompt_template, temperature, openai_api_key, chat_counter, context_length, chatbot, state],
321
+ [chatbot, state, chat_counter], )
322
+ b1.click(predict, [inputs, prompt_template, temperature, openai_api_key, chat_counter, context_length, chatbot, state],
323
+ [chatbot, state, chat_counter], )
324
+ b1.click(reset_textbox, [], [inputs])
325
+
326
+ btn_clear_conversation.click(clear_conversation, [], [inputs, chatbot, state, chat_counter])
327
+
328
+ inputs.submit(reset_textbox, [], [inputs])
329
+ prompt_template.change(on_prompt_template_change, inputs=[prompt_template], outputs=[prompt_template_preview])
330
+ demo.load(get_prompt_templates, inputs=None, outputs=[prompt_template], queur=False)
331
+
332
+ # gr.Markdown(description)
333
+ demo.queue(concurrency_count=10)
334
+ demo.launch(debug=True)
335
+
336
+
images.csv ADDED
The diff for this file is too large to render. See raw diff
 
prompts.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "act": "Act as product Recommendation system",
4
+ "prompt": "I want you to act as a Product recommender and read the CSV file I will provide you. I need you to thoroughly review the CSV file and give recommendations based on the input afterward. You should recommend me the product by displaying its id,description.\n"
5
+ },
6
+
7
+ ]
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ sentence-transformers==2.2.1
2
+ transformers
3
+ torch
4
+ numpy
5
+ tiktoken
6
+
7
+ ftfy
8
+ langchain
9
+ IPython
stylesu.csv ADDED
The diff for this file is too large to render. See raw diff