Jori Geysen commited on
Commit
74b7fbb
1 Parent(s): a0a1f2e

initial commit

Browse files
Files changed (3) hide show
  1. Dockerfile +11 -0
  2. app.py +68 -0
  3. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9
2
+
3
+ WORKDIR /code
4
+
5
+ COPY ./requirements.txt /code/requirements.txt
6
+
7
+ RUN pip install --no-cache-dir --upgrade -r /code/requirements.txt
8
+
9
+ COPY . .
10
+
11
+ CMD ["chainlit", "run", "app.py", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import openai
3
+
4
+ from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
5
+ from llama_index.callbacks.base import CallbackManager
6
+ from llama_index import (
7
+ LLMPredictor,
8
+ ServiceContext,
9
+ StorageContext,
10
+ load_index_from_storage,
11
+ )
12
+ from llama_index.llms import OpenAI
13
+ import chainlit as cl
14
+
15
+
16
+ openai.api_key = os.environ.get("OPENAI_API_KEY")
17
+
18
+ try:
19
+ # rebuild storage context
20
+ storage_context = StorageContext.from_defaults(persist_dir="./storage")
21
+ # load index
22
+ index = load_index_from_storage(storage_context)
23
+ except:
24
+ from llama_index import GPTVectorStoreIndex, SimpleDirectoryReader
25
+
26
+ documents = SimpleDirectoryReader(input_files=["hitchhikers.pdf"]).load_data()
27
+ index = GPTVectorStoreIndex.from_documents(documents)
28
+ index.storage_context.persist()
29
+
30
+
31
+ @cl.on_chat_start
32
+ async def factory():
33
+ llm_predictor = LLMPredictor(
34
+ llm=OpenAI(
35
+ temperature=0,
36
+ model="ft:gpt-3.5-turbo-0613:personal::7sleLdbA",
37
+ streaming=True,
38
+ context_window=2048,
39
+ ),
40
+ )
41
+ service_context = ServiceContext.from_defaults(
42
+ llm_predictor=llm_predictor,
43
+ chunk_size=512,
44
+ callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]),
45
+ )
46
+
47
+ query_engine = index.as_query_engine(
48
+ service_context=service_context,
49
+ streaming=True,
50
+ )
51
+
52
+ cl.user_session.set("query_engine", query_engine)
53
+
54
+
55
+ @cl.on_message
56
+ async def main(message):
57
+ query_engine = cl.user_session.get("query_engine") # type: RetrieverQueryEngine
58
+ response = await cl.make_async(query_engine.query)(message)
59
+
60
+ response_message = cl.Message(content="")
61
+
62
+ for token in response.response_gen:
63
+ await response_message.stream_token(token=token)
64
+
65
+ if response.response_txt:
66
+ response_message.content = response.response_txt
67
+
68
+ await response_message.send()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ chainlit==0.6.3
2
+ llama_index==0.8.9
3
+ openai==0.27.9
4
+ pypdf
5
+ nltk