Ferdi commited on
Commit
b587563
·
1 Parent(s): 3783dce

remove prompts.json

Browse files
Files changed (3) hide show
  1. src/app.py +3 -5
  2. src/conversation.py +0 -2
  3. src/utils.py +1 -8
src/app.py CHANGED
@@ -1,8 +1,6 @@
1
  import gradio as gr
2
  from utils import *
3
 
4
- prompt_keys = load_prompts_list_from_json('prompts.json')
5
-
6
  with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=gr.themes.colors.purple)) as demo:
7
  with gr.Row():
8
 
@@ -13,7 +11,7 @@ with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=
13
  vector_index_btn = gr.Button('Create vector store', variant='primary',scale=1)
14
  vector_index_msg_out = gr.Textbox(show_label=False, lines=1,scale=1, placeholder="Creating vectore store ...")
15
 
16
- prompt_dropdown = gr.Dropdown(label="Select a prompt", choices=prompt_keys, value=prompt_keys[0])
17
 
18
  with gr.Accordion(label="Text generation tuning parameters"):
19
  temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.1, step=0.05)
@@ -48,9 +46,9 @@ with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=
48
  model_load_btn.click(load_models, [llm], load_success_msg, api_name="load_models")
49
 
50
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
51
- bot, [chatbot,prompt_dropdown,temperature,max_new_tokens,k_context], chatbot)
52
  submit_btn.click(add_text, [chatbot, txt], [chatbot, txt]).then(
53
- bot, [chatbot,prompt_dropdown,temperature, max_new_tokens,k_context], chatbot).then(
54
  clear_cuda_cache, None, None
55
  )
56
 
 
1
  import gradio as gr
2
  from utils import *
3
 
 
 
4
  with gr.Blocks(gr.themes.Soft(primary_hue=gr.themes.colors.slate, secondary_hue=gr.themes.colors.purple)) as demo:
5
  with gr.Row():
6
 
 
11
  vector_index_btn = gr.Button('Create vector store', variant='primary',scale=1)
12
  vector_index_msg_out = gr.Textbox(show_label=False, lines=1,scale=1, placeholder="Creating vectore store ...")
13
 
14
+ instruction = gr.Textbox(label="System instruction", lines=3, value="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only.If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive.")
15
 
16
  with gr.Accordion(label="Text generation tuning parameters"):
17
  temperature = gr.Slider(label="temperature", minimum=0.1, maximum=1, value=0.1, step=0.05)
 
46
  model_load_btn.click(load_models, [llm], load_success_msg, api_name="load_models")
47
 
48
  txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
49
+ bot, [chatbot,instruction,temperature,max_new_tokens,k_context], chatbot)
50
  submit_btn.click(add_text, [chatbot, txt], [chatbot, txt]).then(
51
+ bot, [chatbot,instruction,temperature, max_new_tokens,k_context], chatbot).then(
52
  clear_cuda_cache, None, None
53
  )
54
 
src/conversation.py CHANGED
@@ -28,8 +28,6 @@ class Conversation_RAG:
28
  return llm
29
 
30
  def create_conversation(self, model, vectordb, k_context=5, instruction="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only. If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive."):
31
-
32
- print(instruction)
33
 
34
  template = instruction + """
35
  context:\n
 
28
  return llm
29
 
30
  def create_conversation(self, model, vectordb, k_context=5, instruction="Use the following pieces of context to answer the question at the end by. Generate the answer based on the given context only. If you do not find any information related to the question in the given context, just say that you don't know, don't try to make up an answer. Keep your answer expressive."):
 
 
31
 
32
  template = instruction + """
33
  context:\n
src/utils.py CHANGED
@@ -31,8 +31,6 @@ def bot(history,
31
  max_new_tokens=512,
32
  k_context=5,
33
  ):
34
-
35
- instruction = load_prompt('prompts.json', instruction)
36
 
37
  model = conv_qa.create_model(max_new_tokens=max_new_tokens, temperature=temperature)
38
 
@@ -62,9 +60,4 @@ def clear_cuda_cache():
62
  def load_prompts_list_from_json(json_filepath):
63
  with open(json_filepath, 'r') as file:
64
  data = json.load(file)
65
- return list(data.keys())
66
-
67
- def load_prompt(json_filepath, key):
68
- with open(json_filepath, 'r') as file:
69
- data = json.load(file)
70
- return data.get(key, key)
 
31
  max_new_tokens=512,
32
  k_context=5,
33
  ):
 
 
34
 
35
  model = conv_qa.create_model(max_new_tokens=max_new_tokens, temperature=temperature)
36
 
 
60
  def load_prompts_list_from_json(json_filepath):
61
  with open(json_filepath, 'r') as file:
62
  data = json.load(file)
63
+ return list(data.keys())