MSey90 commited on
Commit
ba093b8
·
unverified ·
1 Parent(s): 8fe4b6c

applied chat template

Browse files
Files changed (1) hide show
  1. app.py +7 -2
app.py CHANGED
@@ -3,7 +3,10 @@ import streamlit as st
3
 
4
  # Access the secret token
5
  hf_token = st.secrets["default"]["hf_token"]
6
- st.write(hf_token)
 
 
 
7
 
8
  @st.cache_resource
9
  def load_model():
@@ -13,10 +16,12 @@ model = load_model()
13
 
14
  st.header("Test Environment for tiny_CaLL_r10_O1_f10_c1022")
15
  user_input = st.text_input("Enter your Prompt here:", "")
 
 
16
 
17
  if user_input:
18
  with st.spinner('Generating response...'):
19
- response = model(user_input, max_length=200, num_return_sequences=1)
20
  generated_text = response[0]['generated_text']
21
  st.write("Generated Text:")
22
  st.write(generated_text)
 
3
 
4
  # Access the secret token
5
  hf_token = st.secrets["default"]["hf_token"]
6
+ st.write(f"{hf_token = }")
7
+
8
+ @st.cache_resource
9
+ def context_text(text): return f"### Context\n{text}\n\n### Answer"
10
 
11
  @st.cache_resource
12
  def load_model():
 
16
 
17
  st.header("Test Environment for tiny_CaLL_r10_O1_f10_c1022")
18
  user_input = st.text_input("Enter your Prompt here:", "")
19
+ contexted_ipnut = context_text(user_input)
20
+
21
 
22
  if user_input:
23
  with st.spinner('Generating response...'):
24
+ response = model(contexted_ipnut, max_length=200, num_return_sequences=1)
25
  generated_text = response[0]['generated_text']
26
  st.write("Generated Text:")
27
  st.write(generated_text)