Update app.py
Browse files
app.py
CHANGED
@@ -1,16 +1,12 @@
|
|
1 |
import streamlit as st
|
2 |
from langchain.llms import HuggingFaceHub
|
3 |
-
from huggingface_hub import InferenceClient
|
4 |
-
import os
|
5 |
-
|
6 |
-
huggingfacehub_api_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
|
7 |
|
8 |
#Function to return the response
|
9 |
def generate_answer(query):
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
)
|
14 |
prompt = f"""
|
15 |
<|system|>
|
16 |
You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
|
@@ -19,7 +15,7 @@ def generate_answer(query):
|
|
19 |
{query}</s>
|
20 |
<|assistant|>
|
21 |
"""
|
22 |
-
result =
|
23 |
return result
|
24 |
|
25 |
|
|
|
1 |
import streamlit as st
|
2 |
from langchain.llms import HuggingFaceHub
|
|
|
|
|
|
|
|
|
3 |
|
4 |
#Function to return the response
|
5 |
def generate_answer(query):
|
6 |
+
llm = HuggingFaceHub(
|
7 |
+
repo_id = "huggingfaceh4/zephyr-7b-alpha",
|
8 |
+
model_kwargs={"temperature": 0.5, "max_length": 64,"max_new_tokens":512}
|
9 |
+
)
|
10 |
prompt = f"""
|
11 |
<|system|>
|
12 |
You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
|
|
|
15 |
{query}</s>
|
16 |
<|assistant|>
|
17 |
"""
|
18 |
+
result = llm.predict(prompt)
|
19 |
return result
|
20 |
|
21 |
|