feministmystique commited on
Commit
4479fd1
Β·
verified Β·
1 Parent(s): ef19f5d

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +12 -3
src/streamlit_app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  from langchain_huggingface import HuggingFaceEndpoint
3
- from transformers import pipeline
4
 
5
  # constants
6
  QUESTION = "Compute the integral of f(x) = x^2."
@@ -59,12 +59,21 @@ with st.container():
59
 
60
  with col1:
61
  if st.button("πŸ“ Explain the question"):
 
 
 
62
  st.session_state.button_clicked = "Explain the question"
63
  with col2:
64
  if st.button("πŸ’‘ Give an example"):
 
 
 
65
  st.session_state.button_clicked = "Give an example"
66
  with col3:
67
  if st.button("πŸ€” Who cares?"):
 
 
 
68
  st.session_state.button_clicked = "Who cares?"
69
 
70
  st.markdown("---")
@@ -72,7 +81,7 @@ with st.container():
72
  # Display response text if a sub-button is clicked
73
  if st.session_state.button_clicked:
74
  with st.container():
75
- st.info(f"**hello world \n{QUESTION} \n{st.session_state.button_clicked}**")
76
 
77
  # Optional: Add footer or spacing
78
  st.markdown("<br><br>", unsafe_allow_html=True)
@@ -95,7 +104,7 @@ st.markdown(
95
  )
96
 
97
  # source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
98
- def get_llm_hf_inference(model_id=MODEL, max_new_tokens=130, temperature=0.7):
99
  """
100
  Returns a language model for HuggingFace inference.
101
 
 
1
  import streamlit as st
2
  from langchain_huggingface import HuggingFaceEndpoint
3
+ import os
4
 
5
  # constants
6
  QUESTION = "Compute the integral of f(x) = x^2."
 
59
 
60
  with col1:
61
  if st.button("πŸ“ Explain the question"):
62
+ prompt = f"[INST]You are a thoughtful AI assistant.\nUser: {QUESTION} [/INST]\nAI:"
63
+ st.session_state.response = llm.invoke(prompt)
64
+
65
  st.session_state.button_clicked = "Explain the question"
66
  with col2:
67
  if st.button("πŸ’‘ Give an example"):
68
+ prompt = f"[INST]You are a thoughtful AI assistant.\nUser: {QUESTION} [/INST]\nAI:"
69
+ st.session_state.response = llm.invoke(prompt)
70
+
71
  st.session_state.button_clicked = "Give an example"
72
  with col3:
73
  if st.button("πŸ€” Who cares?"):
74
+ prompt = f"[INST]You are a thoughtful AI assistant.\nUser: {QUESTION} [/INST]\nAI:"
75
+ st.session_state.response = llm.invoke(prompt)
76
+
77
  st.session_state.button_clicked = "Who cares?"
78
 
79
  st.markdown("---")
 
81
  # Display response text if a sub-button is clicked
82
  if st.session_state.button_clicked:
83
  with st.container():
84
+ st.info(st.session_state.response)
85
 
86
  # Optional: Add footer or spacing
87
  st.markdown("<br><br>", unsafe_allow_html=True)
 
104
  )
105
 
106
  # source: https://medium.com/@james.irving.phd/creating-your-personal-chatbot-using-hugging-face-spaces-and-streamlit-596a54b9e3ed
107
+ def get_llm(model_id=MODEL, max_new_tokens=130, temperature=0.7):
108
  """
109
  Returns a language model for HuggingFace inference.
110