Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,16 +11,6 @@ from langchain_core.messages import SystemMessage
|
|
11 |
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
12 |
from langchain_groq import ChatGroq
|
13 |
|
14 |
-
|
15 |
-
|
16 |
-
# # Initialize the language model and memory
|
17 |
-
# llm = Groq(api_key="your_groq_api_key")
|
18 |
-
# memory = ConversationBufferMemory()
|
19 |
-
|
20 |
-
# # Define the conversation chain
|
21 |
-
# conversation = ConversationChain(llm=llm, memory=memory)
|
22 |
-
|
23 |
-
# Function to generate responses
|
24 |
def generate_response(user_input, history, model, temperature, max_tokens, top_p, seed):
|
25 |
print( "Model =", model)
|
26 |
|
@@ -68,8 +58,13 @@ additional_inputs = [
|
|
68 |
]
|
69 |
|
70 |
example1 = [
|
71 |
-
["Who are you?"],
|
72 |
-
|
|
|
|
|
|
|
|
|
|
|
73 |
|
74 |
# Create the Gradio interface
|
75 |
interface = gr.ChatInterface(
|
|
|
11 |
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
12 |
from langchain_groq import ChatGroq
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
def generate_response(user_input, history, model, temperature, max_tokens, top_p, seed):
|
15 |
print( "Model =", model)
|
16 |
|
|
|
58 |
]
|
59 |
|
60 |
example1 = [
|
61 |
+
["Who are you? and please introduce yourself."],
|
62 |
+
["What's the distance from Tokyo to New York?"],
|
63 |
+
["What to San Francisco?"],
|
64 |
+
["Then what to Beijing?"],
|
65 |
+
["And what to Kyoto?"],
|
66 |
+
["Who are you?"],
|
67 |
+
]
|
68 |
|
69 |
# Create the Gradio interface
|
70 |
interface = gr.ChatInterface(
|