Spaces:
Sleeping
Sleeping
remove hf_token argument
Browse files- explore.py +7 -5
explore.py
CHANGED
@@ -23,8 +23,8 @@ import random
|
|
23 |
# --server.maxUploadSize 3000
|
24 |
REPO_NAME = 'ncoria/llava-lora-vicuna-clip-5-epochs-merge'
|
25 |
|
26 |
-
def load_llava_model(
|
27 |
-
return load_llava_checkpoint_hf(REPO_NAME
|
28 |
|
29 |
def get_unique_labels(label_list: list[str]):
|
30 |
label_set = set()
|
@@ -129,7 +129,8 @@ def ask_question_with_image_llava(image, system_prompt, question,
|
|
129 |
|
130 |
def ask_summary_question(image_array, label_array, api_key):
|
131 |
# load llava model
|
132 |
-
|
|
|
133 |
|
134 |
# global variable
|
135 |
system_prompt = SYSTEM_PROMPT
|
@@ -172,7 +173,7 @@ if "embeddings_df" not in st.session_state:
|
|
172 |
st.title('batik: behavior discovery and LLM-based interpretation')
|
173 |
|
174 |
api_key = st.text_input("OpenAI API Key:","")
|
175 |
-
|
176 |
st.subheader("generate or import embeddings")
|
177 |
|
178 |
st.text("Upload files to generate embeddings.")
|
@@ -325,7 +326,8 @@ if uploaded_file is not None and st.session_state.embeddings_df is not None:
|
|
325 |
"Also, designate a behavioral subtype of the given label that describes the current social interaction based on what you see about the posture of the mice and "\
|
326 |
"how they are positioned with respect to each other. Usually, the body parts (i.e., tail, genitals, face, body, ears, paws)" \
|
327 |
"of the mice that are closest to each other will give some clue. Please limit behavioral subtype to a 1-4 word phrase. limit your response to 4 sentences."
|
328 |
-
|
|
|
329 |
response = ask_question_with_image_llava(image, system_prompt, question,
|
330 |
tokenizer, model, image_processor)
|
331 |
st.markdown(response)
|
|
|
23 |
# --server.maxUploadSize 3000
|
24 |
REPO_NAME = 'ncoria/llava-lora-vicuna-clip-5-epochs-merge'
|
25 |
|
26 |
+
def load_llava_model():
|
27 |
+
return load_llava_checkpoint_hf(REPO_NAME)
|
28 |
|
29 |
def get_unique_labels(label_list: list[str]):
|
30 |
label_set = set()
|
|
|
129 |
|
130 |
def ask_summary_question(image_array, label_array, api_key):
|
131 |
# load llava model
|
132 |
+
with st.spinner("Loading LLaVA model. This can take 10 to 30 minutes. Please wait..."):
|
133 |
+
tokenizer, model, image_processor = load_llava_model()
|
134 |
|
135 |
# global variable
|
136 |
system_prompt = SYSTEM_PROMPT
|
|
|
173 |
st.title('batik: behavior discovery and LLM-based interpretation')
|
174 |
|
175 |
api_key = st.text_input("OpenAI API Key:","")
|
176 |
+
|
177 |
st.subheader("generate or import embeddings")
|
178 |
|
179 |
st.text("Upload files to generate embeddings.")
|
|
|
326 |
"Also, designate a behavioral subtype of the given label that describes the current social interaction based on what you see about the posture of the mice and "\
|
327 |
"how they are positioned with respect to each other. Usually, the body parts (i.e., tail, genitals, face, body, ears, paws)" \
|
328 |
"of the mice that are closest to each other will give some clue. Please limit behavioral subtype to a 1-4 word phrase. limit your response to 4 sentences."
|
329 |
+
with st.spinner("Loading LLaVA model. This can take 10 to 30 minutes. Please wait..."):
|
330 |
+
tokenizer, model, image_processor = load_llava_model()
|
331 |
response = ask_question_with_image_llava(image, system_prompt, question,
|
332 |
tokenizer, model, image_processor)
|
333 |
st.markdown(response)
|