Robin Genolet commited on
Commit
2bfab33
·
1 Parent(s): 9730359

fix: prompt

Browse files
Files changed (2) hide show
  1. app.py +1 -0
  2. utils/epfl_meditron_utils.py +2 -2
app.py CHANGED
@@ -513,6 +513,7 @@ def display_llm_output():
513
  st.session_state["model_filename"],
514
  st.session_state["model_type"],
515
  st.session_state["gpu_layers"],
 
516
  llm_message)
517
  st.write(llm_response)
518
  st.write('Done displaying LLM response')
 
513
  st.session_state["model_filename"],
514
  st.session_state["model_type"],
515
  st.session_state["gpu_layers"],
516
+ "You are a medical assistant",
517
  llm_message)
518
  st.write(llm_response)
519
  st.write('Done displaying LLM response')
utils/epfl_meditron_utils.py CHANGED
@@ -1,7 +1,7 @@
1
 
2
 
3
 
4
- def get_llm_response(repo, filename, model_type, gpu_layers, prompt):
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
6
 
7
  model_name_or_path = "TheBloke/meditron-7B-GPTQ"
@@ -40,7 +40,7 @@ def get_llm_response(repo, filename, model_type, gpu_layers, prompt):
40
  <|im_start|>user
41
  {prompt}<|im_end|>
42
  <|im_start|>assistant
43
- '''.format(system_message="You are an assistant", prompt=prompt)
44
 
45
  response = pipe(prompt_template)[0]['generated_text']
46
  print(response)
 
1
 
2
 
3
 
4
+ def get_llm_response(repo, filename, model_type, gpu_layers, system_message, prompt):
5
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
6
 
7
  model_name_or_path = "TheBloke/meditron-7B-GPTQ"
 
40
  <|im_start|>user
41
  {prompt}<|im_end|>
42
  <|im_start|>assistant
43
+ '''
44
 
45
  response = pipe(prompt_template)[0]['generated_text']
46
  print(response)