Spaces:
Sleeping
Sleeping
WilliamGazeley
commited on
Commit
·
47c54d0
1
Parent(s):
7aab4a8
Better state management
Browse files- src/app.py +1 -0
- src/functioncall.py +2 -1
src/app.py
CHANGED
@@ -27,6 +27,7 @@ def get_response(prompt):
|
|
27 |
|
28 |
def get_output(context, user_input):
|
29 |
try:
|
|
|
30 |
prompt_schema = llm.prompter.read_yaml_file("prompt_assets/output_sys_prompt.yml")
|
31 |
sys_prompt = llm.prompter.format_yaml_prompt(prompt_schema, dict()) + \
|
32 |
f"Information:\n{context}"
|
|
|
27 |
|
28 |
def get_output(context, user_input):
|
29 |
try:
|
30 |
+
config.status.update(label=":bulb: Preparing answer..")
|
31 |
prompt_schema = llm.prompter.read_yaml_file("prompt_assets/output_sys_prompt.yml")
|
32 |
sys_prompt = llm.prompter.format_yaml_prompt(prompt_schema, dict()) + \
|
33 |
f"Information:\n{context}"
|
src/functioncall.py
CHANGED
@@ -83,7 +83,6 @@ class ModelInference:
|
|
83 |
return results_dict
|
84 |
|
85 |
def run_inference(self, prompt: List[Dict[str, str]]):
|
86 |
-
config.status.update(label=":brain: Thinking..")
|
87 |
inputs = self.tokenizer.apply_chat_template(
|
88 |
prompt,
|
89 |
add_generation_prompt=True,
|
@@ -108,6 +107,7 @@ class ModelInference:
|
|
108 |
chat = [{"role": "user", "content": user_message}]
|
109 |
tools = functions.get_openai_tools()
|
110 |
prompt = self.prompter.generate_prompt(chat, tools, num_fewshot)
|
|
|
111 |
completion = self.run_inference(prompt)
|
112 |
|
113 |
def recursive_loop(prompt, completion, depth):
|
@@ -140,6 +140,7 @@ class ModelInference:
|
|
140 |
completion = self.run_inference(prompt)
|
141 |
return completion
|
142 |
|
|
|
143 |
completion = self.run_inference(prompt)
|
144 |
return recursive_loop(prompt, completion, depth)
|
145 |
elif error_message:
|
|
|
83 |
return results_dict
|
84 |
|
85 |
def run_inference(self, prompt: List[Dict[str, str]]):
|
|
|
86 |
inputs = self.tokenizer.apply_chat_template(
|
87 |
prompt,
|
88 |
add_generation_prompt=True,
|
|
|
107 |
chat = [{"role": "user", "content": user_message}]
|
108 |
tools = functions.get_openai_tools()
|
109 |
prompt = self.prompter.generate_prompt(chat, tools, num_fewshot)
|
110 |
+
config.status.update(label=":brain: Thinking..")
|
111 |
completion = self.run_inference(prompt)
|
112 |
|
113 |
def recursive_loop(prompt, completion, depth):
|
|
|
140 |
completion = self.run_inference(prompt)
|
141 |
return completion
|
142 |
|
143 |
+
config.status.update(label=":brain: Analysing information..")
|
144 |
completion = self.run_inference(prompt)
|
145 |
return recursive_loop(prompt, completion, depth)
|
146 |
elif error_message:
|