Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -103,38 +103,39 @@ def load_recommender(paths, start_page=1):
|
|
103 |
recommender.fit(chunks)
|
104 |
return 'Corpus Loaded.'
|
105 |
|
106 |
-
def generate_text(
|
107 |
-
|
108 |
model=engine,
|
109 |
-
messages=
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
|
|
|
|
115 |
|
116 |
def generate_answer(question):
|
117 |
topn_chunks = recommender(question)
|
118 |
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
127 |
|
128 |
for c in topn_chunks:
|
129 |
-
|
130 |
'role': 'assistant',
|
131 |
'content': c
|
132 |
-
}
|
133 |
-
system_message['content'] += '\n\n' + search_result_message['content']
|
134 |
|
135 |
-
|
136 |
-
|
137 |
-
answer = generate_text(messages, "gpt-3.5-turbo")
|
138 |
return answer
|
139 |
|
140 |
def question_answer(urls, file, question):
|
|
|
103 |
recommender.fit(chunks)
|
104 |
return 'Corpus Loaded.'
|
105 |
|
106 |
+
def generate_text(prompt, engine='gpt-3.5-turbo', max_tokens=2050, temperature=0.8):
|
107 |
+
response = openai.ChatCompletion.create(
|
108 |
model=engine,
|
109 |
+
messages=[{"role": "system", "content": "You are a research assistant"},
|
110 |
+
{"role": "user", "content": prompt}],
|
111 |
+
max_tokens=max_tokens,
|
112 |
+
n=1,
|
113 |
+
temperature=temperature
|
114 |
+
)
|
115 |
+
return response.choices[0].message['content']
|
116 |
+
return None
|
117 |
|
118 |
def generate_answer(question):
|
119 |
topn_chunks = recommender(question)
|
120 |
|
121 |
+
messages = [
|
122 |
+
{
|
123 |
+
'role': 'system',
|
124 |
+
'content': 'You are a research assistant. Compose a comprehensive reply to the query using the search results given. Only include information found in the results. If the text does not relate to the query, simply state "Text Not Found in Body of Knowledge".'
|
125 |
+
},
|
126 |
+
{
|
127 |
+
'role': 'user',
|
128 |
+
'content': 'Query: ' + question
|
129 |
+
},
|
130 |
+
]
|
131 |
|
132 |
for c in topn_chunks:
|
133 |
+
messages.append({
|
134 |
'role': 'assistant',
|
135 |
'content': c
|
136 |
+
})
|
|
|
137 |
|
138 |
+
answer = generate_text(messages)
|
|
|
|
|
139 |
return answer
|
140 |
|
141 |
def question_answer(urls, file, question):
|