Spaces:
Sleeping
Sleeping
Commit
·
349f4df
1
Parent(s):
61df9a8
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
|
3 |
from langchain.memory import ConversationBufferMemory
|
4 |
-
import os
|
5 |
-
|
6 |
-
# Get the HuggingFace API token from the environment variable
|
7 |
-
# huggingfacehub_api_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
|
8 |
-
huggingfacehub_api_token = None
|
9 |
|
10 |
repo_id = "tiiuae/falcon-7b-instruct"
|
11 |
|
@@ -17,17 +12,22 @@ Chatbot:"""
|
|
17 |
prompt = PromptTemplate(template=template, input_variables=["chat_history","human_input"])
|
18 |
|
19 |
def generate_response(question, huggingfacehub_api_token, temperature=0.6, max_new_tokens=500):
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
inputs = [
|
29 |
gr.inputs.Textbox(label="Question"),
|
30 |
-
gr.inputs.Textbox(label="HuggingFace API Token", type="password", default=
|
31 |
gr.inputs.Slider(minimum=0.1, maximum=2.0, default=0.6, label="Temperature"),
|
32 |
gr.inputs.Slider(minimum=100, maximum=1000, default=500, label="Max New Tokens")
|
33 |
]
|
|
|
1 |
import gradio as gr
|
2 |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
|
3 |
from langchain.memory import ConversationBufferMemory
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
repo_id = "tiiuae/falcon-7b-instruct"
|
6 |
|
|
|
12 |
prompt = PromptTemplate(template=template, input_variables=["chat_history","human_input"])
|
13 |
|
14 |
def generate_response(question, huggingfacehub_api_token, temperature=0.6, max_new_tokens=500):
|
15 |
+
try:
|
16 |
+
memory = ConversationBufferMemory(memory_key="chat_history")
|
17 |
+
llm = HuggingFaceHub(huggingfacehub_api_token=huggingfacehub_api_token,
|
18 |
+
repo_id=repo_id,
|
19 |
+
model_kwargs={"temperature": temperature,
|
20 |
+
"max_new_tokens": max_new_tokens})
|
21 |
+
llm_chain = LLMChain(prompt=prompt, llm=llm, memory=memory)
|
22 |
+
response = llm_chain.predict(chat_history="", human_input=question)
|
23 |
+
except ValueError as e:
|
24 |
+
response = "An error occurred while processing your request. Please try again later."
|
25 |
+
print(f"Error: {str(e)}")
|
26 |
+
return response
|
27 |
|
28 |
inputs = [
|
29 |
gr.inputs.Textbox(label="Question"),
|
30 |
+
gr.inputs.Textbox(label="HuggingFace API Token", type="password", default=None),
|
31 |
gr.inputs.Slider(minimum=0.1, maximum=2.0, default=0.6, label="Temperature"),
|
32 |
gr.inputs.Slider(minimum=100, maximum=1000, default=500, label="Max New Tokens")
|
33 |
]
|