Update src/streamlit_app.py
Browse files- src/streamlit_app.py +21 -20
src/streamlit_app.py
CHANGED
@@ -9,6 +9,27 @@ MODEL = "Qwen/Qwen3-Embedding-0.6B"
|
|
9 |
# remembers session
|
10 |
if "response" not in st.session_state:
|
11 |
st.session_state.response = ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# create llm
|
14 |
llm = get_llm()
|
@@ -103,23 +124,3 @@ st.markdown(
|
|
103 |
unsafe_allow_html=True,
|
104 |
)
|
105 |
|
106 |
-
# source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
|
107 |
-
def get_llm(model_id=MODEL, max_new_tokens=130, temperature=0.7):
|
108 |
-
"""
|
109 |
-
Returns a language model for HuggingFace inference.
|
110 |
-
|
111 |
-
Parameters:
|
112 |
-
- model_id (str): The ID of the HuggingFace model repository.
|
113 |
-
- max_new_tokens (int): The maximum number of new tokens to generate.
|
114 |
-
- temperature (float): The temperature for sampling from the model.
|
115 |
-
|
116 |
-
Returns:
|
117 |
-
- llm (HuggingFaceEndpoint): The language model for HuggingFace inference.
|
118 |
-
"""
|
119 |
-
llm = HuggingFaceEndpoint(
|
120 |
-
repo_id=model_id,
|
121 |
-
max_new_tokens=max_new_tokens,
|
122 |
-
temperature=temperature,
|
123 |
-
token = os.getenv("HF_TOKEN")
|
124 |
-
)
|
125 |
-
return llm
|
|
|
9 |
# remembers session
|
10 |
if "response" not in st.session_state:
|
11 |
st.session_state.response = ""
|
12 |
+
|
13 |
+
# source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
|
14 |
+
def get_llm(model_id=MODEL, max_new_tokens=130, temperature=0.7):
|
15 |
+
"""
|
16 |
+
Returns a language model for HuggingFace inference.
|
17 |
+
|
18 |
+
Parameters:
|
19 |
+
- model_id (str): The ID of the HuggingFace model repository.
|
20 |
+
- max_new_tokens (int): The maximum number of new tokens to generate.
|
21 |
+
- temperature (float): The temperature for sampling from the model.
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
- llm (HuggingFaceEndpoint): The language model for HuggingFace inference.
|
25 |
+
"""
|
26 |
+
llm = HuggingFaceEndpoint(
|
27 |
+
repo_id=model_id,
|
28 |
+
max_new_tokens=max_new_tokens,
|
29 |
+
temperature=temperature,
|
30 |
+
token = os.getenv("HF_TOKEN")
|
31 |
+
)
|
32 |
+
return llm
|
33 |
|
34 |
# create llm
|
35 |
llm = get_llm()
|
|
|
124 |
unsafe_allow_html=True,
|
125 |
)
|
126 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|