Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,16 @@
|
|
1 |
import streamlit as st
|
2 |
from langchain.llms import HuggingFaceHub
|
|
|
3 |
|
4 |
|
|
|
5 |
|
6 |
#Function to return the response
|
7 |
def generate_answer(query):
|
8 |
-
|
|
|
|
|
|
|
9 |
prompt = f"""
|
10 |
<|system|>
|
11 |
You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
|
@@ -14,7 +19,7 @@ def generate_answer(query):
|
|
14 |
{query}</s>
|
15 |
<|assistant|>
|
16 |
"""
|
17 |
-
result =
|
18 |
return result
|
19 |
|
20 |
|
|
|
1 |
import streamlit as st
|
2 |
from langchain.llms import HuggingFaceHub
|
3 |
+
from huggingface_hub import InferenceClient
|
4 |
|
5 |
|
6 |
+
huggingfacehub_api_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
7 |
|
8 |
#Function to return the response
|
9 |
def generate_answer(query):
|
10 |
+
client = InferenceClient(
|
11 |
+
repo_id="huggingfaceh4/zephyr-7b-alpha",
|
12 |
+
token=huggingfacehub_api_token,
|
13 |
+
)
|
14 |
prompt = f"""
|
15 |
<|system|>
|
16 |
You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
|
|
|
19 |
{query}</s>
|
20 |
<|assistant|>
|
21 |
"""
|
22 |
+
result = client.predict(prompt)
|
23 |
return result
|
24 |
|
25 |
|