Update src/streamlit_app.py
Browse files- src/streamlit_app.py +2 -14
src/streamlit_app.py
CHANGED
@@ -16,26 +16,14 @@ if not hf_token:
|
|
16 |
if "response" not in st.session_state:
|
17 |
st.session_state.response = ""
|
18 |
|
19 |
-
# source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
|
20 |
def get_llm(model_id=MODEL, max_new_tokens=130, temperature=0.7):
|
21 |
-
"""
|
22 |
-
Returns a language model for HuggingFace inference.
|
23 |
-
|
24 |
-
Parameters:
|
25 |
-
- model_id (str): The ID of the HuggingFace model repository.
|
26 |
-
- max_new_tokens (int): The maximum number of new tokens to generate.
|
27 |
-
- temperature (float): The temperature for sampling from the model.
|
28 |
|
29 |
-
|
30 |
-
- llm (HuggingFaceEndpoint): The language model for HuggingFace inference.
|
31 |
-
"""
|
32 |
-
llm = HuggingFaceEndpoint(
|
33 |
repo_id=model_id,
|
34 |
max_new_tokens=max_new_tokens,
|
35 |
temperature=temperature,
|
36 |
-
token=hf_token
|
37 |
)
|
38 |
-
return llm
|
39 |
|
40 |
# create llm
|
41 |
llm = get_llm()
|
|
|
16 |
if "response" not in st.session_state:
|
17 |
st.session_state.response = ""
|
18 |
|
|
|
19 |
def get_llm(model_id=MODEL, max_new_tokens=130, temperature=0.7):
|
20 |
+
os.environ["HF_TOKEN"] = os.getenv("HF_TOKEN") # Optional but ensures it's set
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
+
return HuggingFaceEndpoint(
|
|
|
|
|
|
|
23 |
repo_id=model_id,
|
24 |
max_new_tokens=max_new_tokens,
|
25 |
temperature=temperature,
|
|
|
26 |
)
|
|
|
27 |
|
28 |
# create llm
|
29 |
llm = get_llm()
|