rkoushikroy2 commited on
Commit
8e66315
1 Parent(s): daedc8e

Upload 4 files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. app.py +41 -0
  3. data_with_ada_embedding.csv +3 -0
  4. helper_functions.py +90 -0
  5. requirements.txt +127 -0
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ data_with_ada_embedding.csv filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Imports
2
+ import gradio as gr
3
+ from helper_functions import *
4
+
5
+ with gr.Blocks() as app:
6
+ gr.Markdown('# FundedNext Customer Service Chatbot')
7
+ with gr.Tab("Chat"):
8
+ with gr.Row():
9
+ with gr.Column():
10
+ msg = gr.Textbox()
11
+ with gr.Row():
12
+ submit = gr.Button("Submit")
13
+ clear = gr.Button("Clear")
14
+ with gr.Column():
15
+ chatbot = gr.Chatbot()
16
+ def user(user_message, history):
17
+ return "", history + [[user_message, None]]
18
+
19
+ def bot(history):
20
+ bot_message = get_reply(history[-1][0])
21
+ history[-1][1] = bot_message
22
+ return history
23
+
24
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
25
+ bot, chatbot, chatbot
26
+ ).then(
27
+ fn = reset_memory, inputs = None, outputs = None
28
+ )
29
+ submit.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
30
+ bot, chatbot, chatbot
31
+ )
32
+ clear.click(fn = lambda: None, inputs = None, outputs = chatbot, queue=False).then(
33
+ fn = clear_variables, inputs = None, outputs = None, queue=False
34
+ )
35
+ with gr.Tab("Prompt"):
36
+ context = gr.Textbox()
37
+ submit = gr.Button("Check Prompt")
38
+ submit.click(get_context_gr, None, context, queue=False)
39
+ app.launch(auth=("fundednext", "fundednext123"))
40
+
41
+ # app.launch(debug=True)
data_with_ada_embedding.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7513e6259b03d34679c0d36c2ce8e75c4e81232e20c412f3bcd414ed92741c5
3
+ size 19996727
helper_functions.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai.embeddings_utils import get_embedding, cosine_similarity
2
+ import os
3
+ import openai
4
+ import pandas as pd
5
+ import numpy as np
6
+
7
+ # Set up OpenAI API key
8
+ openai.api_key = os.getenv("OPENAI_API_KEY")
9
+
10
+ # Load data
11
+ df = pd.read_csv('data_with_ada_embedding.csv')
12
+ df["token"] = df.combined_summarised.map(len)//4
13
+ df['ada_embedding'] = df.ada_embedding.apply(eval).apply(np.array)
14
+
15
+ pre_text = """You are a customer service agent of an app called FundedNext. FundedNext is a Proprietary Trading Firm aimed to assist traders with individual funding up to $200,000, backed by a solid strategy to minimise risks.
16
+ Fundednext has two account models. Users can go for Either Express Model or Evaluation Model, To get a real funded account. Each model has challenge phase and real phase. After sucessfully completing the challenge phase without violating any rules, users are eligible for their real trading account.
17
+ Express model has two phases. Express Demo and Express Real. Express Demo is the challenge phase. Express users need to pass only one challenge phase to get to Express Real phase.
18
+ While traders in the Evaluation model need to pass two challenge phases called Phase 1 and Phase 2. The final phase in Evaluation model is Evaluation Real.
19
+ You are supposed to help the users of FundedNext with their questions and provide them with helpful answers.
20
+ For each question, you will be given a context. You can use the context to answer the question. You can also use the context to ask follow up questions to the user. You should only answer the question if you are sure of the answer based on the provided context.
21
+ """
22
+
23
+ def search(df, query, max_n, max_token):
24
+ query_embedding = get_embedding(
25
+ query,
26
+ engine="text-embedding-ada-002"
27
+ )
28
+ df["similarity"] = df.ada_embedding.apply(lambda x: cosine_similarity(x, query_embedding))
29
+ df = df.sort_values("similarity", ascending=False).head(max_n)
30
+ df["cumulative_sum"] = df.token.cumsum()
31
+ return '\n\n'.join(df[(df['cumulative_sum'] < max_token)]["combined_summarised"])
32
+
33
+ def get_context(query):
34
+ results = search(df, query, max_n = 10, max_token = 500)
35
+ return f"""I will ask you questions based on the following context:
36
+ — Start of Context —
37
+
38
+ {results}
39
+
40
+ — End of Context —
41
+ My question is: “{query}”
42
+ """
43
+
44
+
45
+ messages_archived = [
46
+ {"role": "system", "content": pre_text}
47
+ ]
48
+
49
+ context = "Empty"
50
+ messages_current = []
51
+
52
+ def get_reply(message):
53
+ if message:
54
+ global messages_current
55
+ messages_current = messages_archived.copy()
56
+ global context
57
+ context = get_context(message)
58
+ messages_current.append(
59
+ {"role": "user", "content": context}
60
+ )
61
+ chat = openai.ChatCompletion.create(
62
+ model="gpt-3.5-turbo", messages=messages_current, temperature=0
63
+ )
64
+
65
+ reply = chat.choices[0].message.content
66
+ messages_archived.append({"role": "user", "content": message})
67
+ messages_archived.append({"role": "assistant", "content": reply})
68
+ # If no message is provided, return a string that says "No Message Received"
69
+ else:
70
+ reply = "No Message Received"
71
+
72
+ return reply
73
+
74
+ def clear_variables():
75
+ global messages_archived
76
+ messages_archived = [
77
+ {"role": "system", "content": pre_text}
78
+ ]
79
+
80
+ def reset_memory():
81
+ global messages_archived
82
+ if(len(messages_archived)>=21):
83
+ messages_archived = [
84
+ {"role": "system", "content": pre_text}
85
+ ]
86
+
87
+
88
+
89
+ def get_context_gr():
90
+ return str(messages_current)
requirements.txt ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiofiles==23.1.0
2
+ aiohttp==3.8.4
3
+ aiosignal==1.3.1
4
+ altair==4.2.2
5
+ anyio==3.6.2
6
+ asttokens==2.2.1
7
+ async-timeout==4.0.2
8
+ attrs==22.2.0
9
+ backcall==0.2.0
10
+ backports.functools-lru-cache==1.6.4
11
+ brotlipy==0.7.0
12
+ certifi==2022.12.7
13
+ cffi==1.15.1
14
+ charset-normalizer==2.0.4
15
+ click==8.1.3
16
+ colorama==0.4.6
17
+ contourpy==1.0.7
18
+ cryptography==39.0.1
19
+ cycler==0.11.0
20
+ datasets==2.10.1
21
+ debugpy==1.5.1
22
+ decorator==5.1.1
23
+ dill==0.3.6
24
+ entrypoints==0.4
25
+ et-xmlfile==1.1.0
26
+ executing==1.2.0
27
+ faiss==1.7.2
28
+ fastapi==0.94.1
29
+ ffmpy==0.3.0
30
+ filelock==3.9.0
31
+ flit_core==3.6.0
32
+ fonttools==4.39.0
33
+ frozenlist==1.3.3
34
+ fsspec==2023.3.0
35
+ gradio==3.21.0
36
+ h11==0.14.0
37
+ httpcore==0.16.3
38
+ httpx==0.23.3
39
+ huggingface-hub==0.13.1
40
+ idna==3.4
41
+ importlib-metadata==6.0.0
42
+ importlib-resources==5.12.0
43
+ ipykernel==6.15.0
44
+ ipython==8.11.0
45
+ jedi==0.18.2
46
+ Jinja2==3.1.2
47
+ joblib==1.2.0
48
+ jsonschema==4.17.3
49
+ jupyter_client==8.0.3
50
+ jupyter_core==5.2.0
51
+ kiwisolver==1.4.4
52
+ linkify-it-py==2.0.0
53
+ markdown-it-py==2.2.0
54
+ MarkupSafe==2.1.2
55
+ matplotlib==3.7.1
56
+ matplotlib-inline==0.1.6
57
+ mdit-py-plugins==0.3.3
58
+ mdurl==0.1.2
59
+ mkl-service==2.3.0
60
+ multidict==6.0.4
61
+ multiprocess==0.70.14
62
+ nest-asyncio==1.5.6
63
+ numpy==1.24.2
64
+ openai==0.27.2
65
+ openpyxl==3.1.2
66
+ orjson==3.8.7
67
+ packaging==23.0
68
+ pandas==1.5.3
69
+ parso==0.8.3
70
+ pickleshare==0.7.5
71
+ Pillow==9.4.0
72
+ pip==23.0.1
73
+ pkgutil_resolve_name==1.3.10
74
+ platformdirs==2.5.2
75
+ plotly==5.13.1
76
+ prompt-toolkit==3.0.38
77
+ psutil==5.9.0
78
+ pure-eval==0.2.2
79
+ pyarrow==11.0.0
80
+ pycparser==2.21
81
+ pydantic==1.10.6
82
+ pydub==0.25.1
83
+ Pygments==2.14.0
84
+ pyOpenSSL==23.0.0
85
+ pyparsing==3.0.9
86
+ pyrsistent==0.19.3
87
+ PySocks==1.7.1
88
+ python-dateutil==2.8.2
89
+ python-multipart==0.0.6
90
+ pytz==2022.7.1
91
+ pywin32==227
92
+ PyYAML==6.0
93
+ pyzmq==23.2.0
94
+ regex==2022.10.31
95
+ requests==2.28.1
96
+ responses==0.18.0
97
+ rfc3986==1.5.0
98
+ scikit-learn==1.2.2
99
+ scipy==1.10.1
100
+ setuptools==65.6.3
101
+ six==1.16.0
102
+ sniffio==1.3.0
103
+ stack-data==0.6.2
104
+ starlette==0.26.1
105
+ tenacity==8.2.2
106
+ threadpoolctl==3.1.0
107
+ tokenizers==0.13.2
108
+ toolz==0.12.0
109
+ torch==1.13.1
110
+ torchaudio==0.13.1
111
+ torchvision==0.14.1
112
+ tornado==6.2
113
+ tqdm==4.65.0
114
+ traitlets==5.9.0
115
+ transformers==4.26.1
116
+ typing_extensions==4.4.0
117
+ uc-micro-py==1.0.1
118
+ urllib3==1.26.14
119
+ uvicorn==0.21.0
120
+ wcwidth==0.2.6
121
+ websockets==10.4
122
+ wheel==0.38.4
123
+ win-inet-pton==1.1.0
124
+ wincertstore==0.2
125
+ xxhash==3.2.0
126
+ yarl==1.8.2
127
+ zipp==3.15.0