Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,5 @@
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
from langchain.chains import LLMChain
|
3 |
from langchain_core.prompts import (
|
@@ -20,10 +22,40 @@ from langchain_groq import ChatGroq
|
|
20 |
|
21 |
# Function to generate responses
|
22 |
def generate_response(user_input, history, locale, model, temperature, max_tokens, top_p, seed):
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
# Define additional inputs and examples if needed
|
29 |
additional_inputs = [
|
|
|
1 |
+
import os
|
2 |
+
import random
|
3 |
import gradio as gr
|
4 |
from langchain.chains import LLMChain
|
5 |
from langchain_core.prompts import (
|
|
|
22 |
|
23 |
# Function to generate responses
|
24 |
def generate_response(user_input, history, locale, model, temperature, max_tokens, top_p, seed):
|
25 |
+
|
26 |
+
# Initialize Groq Langchain chat object and conversation
|
27 |
+
groq_chat = ChatGroq(
|
28 |
+
groq_api_key=os.environ.get("GROQ_API_KEY")
|
29 |
+
model_name=model
|
30 |
+
)
|
31 |
+
|
32 |
+
# Manages the chat history, ensuring the AI remembers the specified number of history messages, in this case 5.
|
33 |
+
memory = ConversationBufferWindowMemory(k=5, memory_key="chat_history", return_messages=True)
|
34 |
+
|
35 |
+
prompt = ChatPromptTemplate.from_messages(
|
36 |
+
[
|
37 |
+
# This is the persistent system prompt, sets the initial context for the AI.
|
38 |
+
SystemMessage(content='You are a helpful AI assistant.'),
|
39 |
+
# This placeholder will take care of chat history.
|
40 |
+
MessagesPlaceholder(variable_name="chat_history"),
|
41 |
+
# This template is where the user's current input will be injected into the prompt.
|
42 |
+
HumanMessagePromptTemplate.from_template("{human_input}"),
|
43 |
+
]
|
44 |
+
)
|
45 |
+
|
46 |
+
# Create a conversation sequence using RunnableSequence
|
47 |
+
conversation = prompt | groq_chat
|
48 |
+
|
49 |
+
# Load chat_history
|
50 |
+
chat_history = memory.load_memory_variables({})["chat_history"]
|
51 |
+
|
52 |
+
# The chatbot's answer is generated by sending the full prompt to the LLM
|
53 |
+
response = conversation.invoke({"human_input": user_question, "chat_history": chat_history})
|
54 |
+
|
55 |
+
# Update the memory with the new interaction
|
56 |
+
memory.save_context({"input": user_question}, {"output": response.content})
|
57 |
+
.
|
58 |
+
return response.content
|
59 |
|
60 |
# Define additional inputs and examples if needed
|
61 |
additional_inputs = [
|