mencraft commited on
Commit
61482e0
·
1 Parent(s): 2a7b2e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -11
app.py CHANGED
@@ -34,7 +34,7 @@ documents = SimpleDirectoryReader(
34
  ).load_data()
35
 
36
  index = VectorStoreIndex.from_documents(documents)
37
-
38
 
39
  @cl.on_chat_start
40
  async def factory():
@@ -59,31 +59,51 @@ async def factory():
59
  callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]),
60
  )
61
 
 
 
 
 
 
 
62
  query_engine = index.as_query_engine(
63
  service_context=gpt_35_context
64
  )
65
 
 
 
66
  msg.content = f"Index built!"
67
  await msg.send()
68
 
69
  cl.user_session.set("query_engine", query_engine)
70
 
 
 
71
 
72
  @cl.on_message
73
  async def main(message):
74
  query_engine = cl.user_session.get("query_engine") # type: RetrieverQueryEngine
75
  response = await cl.make_async(query_engine.query)(message)
76
- print(response)
77
-
78
- # await cl.Message(content=response).send()
79
  response_message = cl.Message(content="")
80
- response_message.content = response
81
- print(response_message)
 
 
 
 
 
82
  await response_message.send()
83
- # # for token in response.response_gen:
84
- # # await response_message.stream_token(token=token)
85
 
86
- # # if response.response_txt:
87
- # response_message.content = response
 
 
 
 
 
 
 
 
 
 
88
 
89
- # await response_message.send()
 
34
  ).load_data()
35
 
36
  index = VectorStoreIndex.from_documents(documents)
37
+ ft_model_id='ft:gpt-3.5-turbo-0613:handshake::7raTNuPU'
38
 
39
  @cl.on_chat_start
40
  async def factory():
 
59
  callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]),
60
  )
61
 
62
+ ft_context = ServiceContext.from_defaults(
63
+ llm=OpenAI(model=ft_model_id, temperature=0.3),
64
+ context_window=2048, # limit the context window artifically to test refine process
65
+ callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]),
66
+ )
67
+
68
  query_engine = index.as_query_engine(
69
  service_context=gpt_35_context
70
  )
71
 
72
+ finetune_query_engine = index.as_query_engine(service_context=ft_context)
73
+
74
  msg.content = f"Index built!"
75
  await msg.send()
76
 
77
  cl.user_session.set("query_engine", query_engine)
78
 
79
+ cl.user_session.set("finetune_query_engine", finetune_query_engine)
80
+
81
 
82
  @cl.on_message
83
  async def main(message):
84
  query_engine = cl.user_session.get("query_engine") # type: RetrieverQueryEngine
85
  response = await cl.make_async(query_engine.query)(message)
86
+
 
 
87
  response_message = cl.Message(content="")
88
+ response_message.content = "Base Model Response"
89
+
90
+ elements = [
91
+ cl.Text(content=response.response, display="inline")
92
+ ]
93
+ response_message.elements = elements
94
+
95
  await response_message.send()
 
 
96
 
97
+ finetune_query_engine = cl.user_session.get("finetune_query_engine") # type: RetrieverQueryEngine
98
+ finetune_response = await cl.make_async(finetune_query_engine.query)(message)
99
+
100
+ finetune_response_message = cl.Message(content="")
101
+ finetune_response_message.content = 'Finetune Model Response'
102
+
103
+ elements = [
104
+ cl.Text( content=finetune_response.response, display="inline")
105
+ ]
106
+ finetune_response_message.elements = elements
107
+
108
+ await finetune_response_message.send()
109