Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -165,7 +165,6 @@ def StreamLLMChatResponse(prompt):
|
|
165 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
166 |
|
167 |
# 4. Run query with payload
|
168 |
-
@st.cache_resource
|
169 |
def query(payload):
|
170 |
response = requests.post(API_URL, headers=headers, json=payload)
|
171 |
st.markdown(response.json())
|
@@ -182,7 +181,6 @@ def generate_filename(prompt, file_type):
|
|
182 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
183 |
|
184 |
# 6. Speech transcription via OpenAI service
|
185 |
-
@st.cache_resource
|
186 |
def transcribe_audio(openai_key, file_path, model):
|
187 |
openai.api_key = openai_key
|
188 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
@@ -218,7 +216,6 @@ def save_and_play_audio(audio_recorder):
|
|
218 |
return None
|
219 |
|
220 |
# 8. File creator that interprets type and creates output file for text, markdown and code
|
221 |
-
@st.cache_resource
|
222 |
def create_file(filename, prompt, response, should_save=True):
|
223 |
if not should_save:
|
224 |
return
|
|
|
165 |
st.write('Llama model is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
166 |
|
167 |
# 4. Run query with payload
|
|
|
168 |
def query(payload):
|
169 |
response = requests.post(API_URL, headers=headers, json=payload)
|
170 |
st.markdown(response.json())
|
|
|
181 |
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
182 |
|
183 |
# 6. Speech transcription via OpenAI service
|
|
|
184 |
def transcribe_audio(openai_key, file_path, model):
|
185 |
openai.api_key = openai_key
|
186 |
OPENAI_API_URL = "https://api.openai.com/v1/audio/transcriptions"
|
|
|
216 |
return None
|
217 |
|
218 |
# 8. File creator that interprets type and creates output file for text, markdown and code
|
|
|
219 |
def create_file(filename, prompt, response, should_save=True):
|
220 |
if not should_save:
|
221 |
return
|