Commit
·
3cfd42f
1
Parent(s):
937e308
Update document_questioner_app.py
Browse files
document_questioner_app.py
CHANGED
@@ -10,8 +10,6 @@ from langchain.chains import RetrievalQAWithSourcesChain
|
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from langchain.chat_models import ChatOpenAI
|
12 |
|
13 |
-
os.environ["OPENAI_API_KEY"] = "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"
|
14 |
-
|
15 |
def question_document(Document, Question):
|
16 |
|
17 |
# loads a PDF document
|
@@ -28,7 +26,7 @@ def question_document(Document, Question):
|
|
28 |
docsearch = Chroma.from_documents(docs, embeddings, ids=["page" + str(d.metadata["page"]) for d in docs])
|
29 |
|
30 |
# Define LLM
|
31 |
-
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.
|
32 |
|
33 |
# Customize map_reduce prompts
|
34 |
question_template = """{context}
|
|
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from langchain.chat_models import ChatOpenAI
|
12 |
|
|
|
|
|
13 |
def question_document(Document, Question):
|
14 |
|
15 |
# loads a PDF document
|
|
|
26 |
docsearch = Chroma.from_documents(docs, embeddings, ids=["page" + str(d.metadata["page"]) for d in docs])
|
27 |
|
28 |
# Define LLM
|
29 |
+
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0.2, openai_api_key = os.environ['OpenaiKey'])
|
30 |
|
31 |
# Customize map_reduce prompts
|
32 |
question_template = """{context}
|