using openai chat model
Browse files- .env-sample +1 -0
- app.py +8 -6
.env-sample
CHANGED
@@ -1 +1,2 @@
|
|
1 |
ANTHROPIC_API_KEY=""
|
|
|
|
1 |
ANTHROPIC_API_KEY=""
|
2 |
+
OPENAI_API_KEY=""
|
app.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
from langchain.chat_models import ChatAnthropic
|
2 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
3 |
from langchain.prompts.chat import (
|
4 |
ChatPromptTemplate,
|
@@ -7,6 +7,7 @@ from langchain.prompts.chat import (
|
|
7 |
HumanMessagePromptTemplate,
|
8 |
)
|
9 |
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
|
|
10 |
import streamlit as st
|
11 |
from dotenv import load_dotenv
|
12 |
import PyPDF2
|
@@ -24,14 +25,15 @@ class LegalExpert:
|
|
24 |
[self.system_prompt, self.user_prompt]
|
25 |
)
|
26 |
|
27 |
-
# create llm
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
|
|
32 |
# self.chat = ChatAnthropic()
|
33 |
|
34 |
-
self.chain = LLMChain(llm=self.
|
35 |
|
36 |
def get_system_prompt(self):
|
37 |
system_prompt = """
|
|
|
1 |
+
from langchain.chat_models import ChatAnthropic, ChatOpenAI
|
2 |
from langchain import PromptTemplate, LLMChain, HuggingFaceHub
|
3 |
from langchain.prompts.chat import (
|
4 |
ChatPromptTemplate,
|
|
|
7 |
HumanMessagePromptTemplate,
|
8 |
)
|
9 |
from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
10 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
11 |
import streamlit as st
|
12 |
from dotenv import load_dotenv
|
13 |
import PyPDF2
|
|
|
25 |
[self.system_prompt, self.user_prompt]
|
26 |
)
|
27 |
|
28 |
+
# create llm pipeline for huggingfaceHub model
|
29 |
+
model_name = "flan-t5-xl"
|
30 |
+
|
31 |
+
self.huggingface_llm = pipeline("text-generation", model=model_name, tokenizer=AutoTokenizer.from_pretrained(model_name))
|
32 |
|
33 |
+
self.openai_gpt4_llm = ChatOpenAI(model_name="gpt-4", temperature=0, max_tokens=256)
|
34 |
# self.chat = ChatAnthropic()
|
35 |
|
36 |
+
self.chain = LLMChain(llm=self.openai_gpt4_llm, prompt=full_prompt_template)
|
37 |
|
38 |
def get_system_prompt(self):
|
39 |
system_prompt = """
|