JamalAG commited on
Commit
12ad205
·
1 Parent(s): 4a4ea64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -1,11 +1,16 @@
1
  import streamlit as st
2
  from langchain.llms import HuggingFaceHub
 
3
 
4
 
 
5
 
6
  #Function to return the response
7
  def generate_answer(query):
8
- llm = HuggingFaceHub(repo_id="huggingfaceh4/zephyr-7b-alpha", model_kwargs={"temperature": 0.5, "max_length": 64,"max_new_tokens":512})
 
 
 
9
  prompt = f"""
10
  <|system|>
11
  You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
@@ -14,7 +19,7 @@ def generate_answer(query):
14
  {query}</s>
15
  <|assistant|>
16
  """
17
- result = llm.predict(prompt)
18
  return result
19
 
20
 
 
1
  import streamlit as st
2
  from langchain.llms import HuggingFaceHub
3
+ from huggingface_hub import InferenceClient
4
 
5
 
6
+ huggingfacehub_api_token = os.environ["HUGGINGFACEHUB_API_TOKEN"]
7
 
8
  #Function to return the response
9
  def generate_answer(query):
10
+ client = InferenceClient(
11
+ repo_id="huggingfaceh4/zephyr-7b-alpha",
12
+ token=huggingfacehub_api_token,
13
+ )
14
  prompt = f"""
15
  <|system|>
16
  You are an AI assistant that follows instruction extremely well. Please be truthful and give direct answers
 
19
  {query}</s>
20
  <|assistant|>
21
  """
22
+ result = client.predict(prompt)
23
  return result
24
 
25