Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,28 +1,42 @@
|
|
1 |
import gradio as gr
|
2 |
-
from langchain.chains import
|
3 |
-
from
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
# Initialize the language model and memory
|
7 |
-
llm = Groq(api_key="your_groq_api_key")
|
8 |
-
memory = ConversationBufferMemory()
|
9 |
|
10 |
-
#
|
11 |
-
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Function to generate responses
|
14 |
def generate_response(user_input):
|
15 |
-
response = conversation.run(user_input)
|
16 |
-
return
|
17 |
|
18 |
# Define additional inputs and examples if needed
|
19 |
-
additional_inputs = [
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
# Create the Gradio interface
|
23 |
interface = gr.ChatInterface(
|
24 |
fn=generate_response,
|
25 |
-
theme="Nymbo/Alyx_Theme",
|
26 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
27 |
additional_inputs=additional_inputs,
|
28 |
examples=example1,
|
|
|
1 |
import gradio as gr
|
2 |
+
from langchain.chains import LLMChain
|
3 |
+
from langchain_core.prompts import (
|
4 |
+
ChatPromptTemplate,
|
5 |
+
HumanMessagePromptTemplate,
|
6 |
+
MessagesPlaceholder,
|
7 |
+
)
|
8 |
+
from langchain_core.messages import SystemMessage
|
9 |
+
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
|
10 |
+
from langchain_groq import ChatGroq
|
11 |
+
|
12 |
|
|
|
|
|
|
|
13 |
|
14 |
+
# # Initialize the language model and memory
|
15 |
+
# llm = Groq(api_key="your_groq_api_key")
|
16 |
+
# memory = ConversationBufferMemory()
|
17 |
+
|
18 |
+
# # Define the conversation chain
|
19 |
+
# conversation = ConversationChain(llm=llm, memory=memory)
|
20 |
|
21 |
# Function to generate responses
|
22 |
def generate_response(user_input):
|
23 |
+
# response = conversation.run(user_input)
|
24 |
+
return user_input
|
25 |
|
26 |
# Define additional inputs and examples if needed
|
27 |
+
additional_inputs = [
|
28 |
+
gr.Dropdown(choices=["llama-3.1-70b-versatile", "llama-3.1-8b-instant", "llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma2-9b-it", "gemma-7b-it"], value="llama-3.1-70b-versatile", label="Model"),
|
29 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls diversity of the generated text. Lower is more deterministic, higher is more creative."),
|
30 |
+
gr.Slider(minimum=1, maximum=8000, step=1, value=8000, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response.<br>Maximums: 8k for gemma 7b it, gemma2 9b it, llama 7b & 70b, 32k for mixtral 8x7b, 132k for llama 3.1."),
|
31 |
+
gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
|
32 |
+
gr.Number(precision=0, value=0, label="Seed", info="A starting point to initiate generation, use 0 for random")
|
33 |
+
]
|
34 |
+
|
35 |
+
example1 = [[Who are you?]]
|
36 |
|
37 |
# Create the Gradio interface
|
38 |
interface = gr.ChatInterface(
|
39 |
fn=generate_response,
|
|
|
40 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
41 |
additional_inputs=additional_inputs,
|
42 |
examples=example1,
|