Update src/streamlit_app.py
Browse files- src/streamlit_app.py +7 -8
src/streamlit_app.py
CHANGED
@@ -1,18 +1,17 @@
|
|
1 |
import streamlit as st
|
2 |
from langchain_huggingface import HuggingFaceEndpoint
|
3 |
from transformers import pipeline
|
4 |
-
# import os
|
5 |
|
6 |
# constants
|
7 |
QUESTION = "Compute the integral of f(x) = x^2."
|
8 |
MODEL = "Qwen/Qwen3-Embedding-0.6B"
|
9 |
-
max_new_tokens = 130
|
10 |
-
temperature = 0.7
|
11 |
|
12 |
-
#
|
13 |
-
|
14 |
-
|
15 |
-
|
|
|
|
|
16 |
|
17 |
# Initialize session state
|
18 |
if "help_clicks" not in st.session_state:
|
@@ -96,7 +95,7 @@ st.markdown(
|
|
96 |
)
|
97 |
|
98 |
# source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
|
99 |
-
def get_llm_hf_inference(model_id=
|
100 |
"""
|
101 |
Returns a language model for HuggingFace inference.
|
102 |
|
|
|
1 |
import streamlit as st
|
2 |
from langchain_huggingface import HuggingFaceEndpoint
|
3 |
from transformers import pipeline
|
|
|
4 |
|
5 |
# constants
|
6 |
QUESTION = "Compute the integral of f(x) = x^2."
|
7 |
MODEL = "Qwen/Qwen3-Embedding-0.6B"
|
|
|
|
|
8 |
|
9 |
+
# remembers session
|
10 |
+
if "response" not in st.session_state:
|
11 |
+
st.session_state.response = ""
|
12 |
+
|
13 |
+
# create llm
|
14 |
+
llm = get_llm()
|
15 |
|
16 |
# Initialize session state
|
17 |
if "help_clicks" not in st.session_state:
|
|
|
95 |
)
|
96 |
|
97 |
# source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
|
98 |
+
def get_llm_hf_inference(model_id=MODEL, max_new_tokens=130, temperature=0.7):
|
99 |
"""
|
100 |
Returns a language model for HuggingFace inference.
|
101 |
|