jchen8000 commited on
Commit
f4b6670
·
verified ·
1 Parent(s): 588f6b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -10
app.py CHANGED
@@ -11,12 +11,8 @@ from langchain.chains.conversation.memory import ConversationBufferWindowMemory
11
  from langchain_groq import ChatGroq
12
  from langchain_google_genai import ChatGoogleGenerativeAI
13
 
14
- # Initialize Groq Langchain chat object and conversation
15
- groq_chat = ChatGroq(groq_api_key=os.environ.get("GROQ_API_KEY"))
16
-
17
- # Initialize Google Langchain chat object and conversation
18
- os.environ["GOOGLE_API_KEY"] = os.environ.get("GOOGLE_API_KEY")
19
- google_chat = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
20
 
21
  # Initialize memory to manages the chat history,
22
  # ensuring the AI remembers the specified number of history messages, in this case 8.
@@ -27,11 +23,25 @@ def generate_response(user_input, history, model, temperature, max_tokens, top_p
27
  print( "Model =", model)
28
 
29
  if model.startswith("gemini"):
30
- chat = google_chat
31
- # chat.model = model
 
 
 
 
 
 
 
32
  else:
33
- chat = groq_chat
34
- chat.model_name = model
 
 
 
 
 
 
 
35
 
36
  prompt = ChatPromptTemplate.from_messages(
37
  [
 
11
  from langchain_groq import ChatGroq
12
  from langchain_google_genai import ChatGoogleGenerativeAI
13
 
14
+ # os.environ["GROQ_API_KEY"] = os.environ.get("GROQ_API_KEY")
15
+ # os.environ["GOOGLE_API_KEY"] = os.environ.get("GOOGLE_API_KEY")
 
 
 
 
16
 
17
  # Initialize memory to manages the chat history,
18
  # ensuring the AI remembers the specified number of history messages, in this case 8.
 
23
  print( "Model =", model)
24
 
25
  if model.startswith("gemini"):
26
+ chat = ChatGoogleGenerativeAI(
27
+ google_api_key = os.environ.get("GOOGLE_API_KEY"),
28
+ model = model,
29
+ temperature=temperature,
30
+ max_tokens=max_tokens,
31
+ timeout=None,
32
+ max_retries=2,
33
+ top_p = top_p
34
+ )
35
  else:
36
+ chat = ChatGroq(
37
+ groq_api_key = os.environ.get("GROQ_API_KEY"),
38
+ model_name = model,
39
+ temperature=temperature,
40
+ max_tokens=max_tokens,
41
+ request_timeout=None,
42
+ max_retries=2,
43
+ top_p = top_p
44
+ )
45
 
46
  prompt = ChatPromptTemplate.from_messages(
47
  [