gufett0 commited on
Commit
a665d4d
·
1 Parent(s): 07ea5ca

added introductory prompt

Browse files
Files changed (1) hide show
  1. backend.py +5 -2
backend.py CHANGED
@@ -12,7 +12,8 @@ from huggingface_hub import login
12
  from llama_index.core.memory import ChatMemoryBuffer
13
  from typing import Iterator, List, Any
14
  from llama_index.core.chat_engine import CondensePlusContextChatEngine
15
- from llama_index.core.llms import ChatMessage, MessageRole
 
16
 
17
 
18
 
@@ -143,7 +144,9 @@ def handle_query(query_str: str,
143
 
144
  outputs.append(token)
145
  print(f"Generated token: {token}")
146
- yield "".join(outputs)
 
 
147
 
148
  """if sources:
149
  sources_str = ", ".join(sources)
 
12
  from llama_index.core.memory import ChatMemoryBuffer
13
  from typing import Iterator, List, Any
14
  from llama_index.core.chat_engine import CondensePlusContextChatEngine
15
+ from llama_index.core.llms import ChatMessage, MessageRole , CompletionResponse
16
+
17
 
18
 
19
 
 
144
 
145
  outputs.append(token)
146
  print(f"Generated token: {token}")
147
+
148
+ streamed_response = "".join(outputs)
149
+ yield CompletionResponse(text=streamed_response, delta=token)
150
 
151
  """if sources:
152
  sources_str = ", ".join(sources)