theosaurus
commited on
Commit
·
582f303
1
Parent(s):
2aab28e
feat/ Implemented CoT with Blocks statement
Browse files
app.py
CHANGED
@@ -8,6 +8,7 @@ import torch
|
|
8 |
from typing import Optional, Iterator, Dict, Any, List
|
9 |
from threading import Thread
|
10 |
from types import NoneType
|
|
|
11 |
|
12 |
|
13 |
# Initialize logging and device information
|
@@ -1003,66 +1004,234 @@ knowledge_textbox = gr.Textbox(
|
|
1003 |
visible=False
|
1004 |
)
|
1005 |
|
1006 |
-
|
1007 |
-
|
1008 |
-
|
1009 |
-
|
1010 |
-
|
1011 |
-
|
1012 |
-
|
1013 |
-
|
1014 |
-
|
1015 |
-
|
1016 |
-
|
1017 |
-
|
1018 |
-
|
1019 |
-
|
1020 |
-
|
1021 |
-
|
1022 |
-
|
1023 |
-
|
1024 |
-
|
1025 |
-
|
1026 |
-
|
1027 |
-
|
1028 |
-
|
1029 |
-
|
1030 |
-
|
1031 |
-
|
1032 |
-
|
1033 |
-
|
1034 |
-
|
1035 |
-
|
1036 |
-
|
1037 |
-
|
1038 |
-
|
1039 |
-
|
1040 |
-
|
1041 |
-
|
1042 |
-
|
1043 |
-
|
1044 |
-
|
1045 |
-
|
1046 |
-
|
1047 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1048 |
],
|
1049 |
-
|
1050 |
-
cache_examples=False,
|
1051 |
-
show_progress="full",
|
1052 |
-
examples=["Donne moi les outils et matériaux utilisés dans ce texte et génère des classes ontologiques sur cette base en format Turtle."]
|
1053 |
-
run_examples_on_click=False
|
1054 |
)
|
|
|
|
|
|
|
1055 |
|
1056 |
-
|
1057 |
-
|
1058 |
-
|
1059 |
-
|
1060 |
-
|
1061 |
-
|
1062 |
-
|
1063 |
-
|
1064 |
-
|
1065 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1066 |
if __name__ == "__main__":
|
1067 |
auth = HuggingFaceLogin()
|
1068 |
if auth.login():
|
|
|
8 |
from typing import Optional, Iterator, Dict, Any, List
|
9 |
from threading import Thread
|
10 |
from types import NoneType
|
11 |
+
import traceback
|
12 |
|
13 |
|
14 |
# Initialize logging and device information
|
|
|
1004 |
visible=False
|
1005 |
)
|
1006 |
|
1007 |
+
with gr.Blocks() as demo:
|
1008 |
+
gr.Markdown("# Ontology Generation with Chain-of-Thought")
|
1009 |
+
|
1010 |
+
# State components for maintaining conversation
|
1011 |
+
chat_state = gr.State([])
|
1012 |
+
|
1013 |
+
chatbot = gr.Chatbot(type="messages")
|
1014 |
+
message_input = gr.Textbox(
|
1015 |
+
label="message",
|
1016 |
+
placeholder="Ask about the elicitation text...",
|
1017 |
+
lines=2
|
1018 |
+
)
|
1019 |
+
|
1020 |
+
with gr.Row():
|
1021 |
+
file_explorer = gr.FileExplorer(
|
1022 |
+
glob="**/*.txt",
|
1023 |
+
file_count="single",
|
1024 |
+
label="Upload file",
|
1025 |
+
show_label=True
|
1026 |
+
)
|
1027 |
+
knowledge_input = gr.Textbox(
|
1028 |
+
label="Knowledge text",
|
1029 |
+
lines=6,
|
1030 |
+
visible=True
|
1031 |
+
)
|
1032 |
+
with gr.Accordion("Advanced Settings", open=False):
|
1033 |
+
system_prompt_input = gr.Textbox(
|
1034 |
+
label="System Prompt",
|
1035 |
+
lines=4,
|
1036 |
+
value=DEFAULT_SYSTEM_PROMPT
|
1037 |
+
)
|
1038 |
+
with gr.Row():
|
1039 |
+
with gr.Column():
|
1040 |
+
max_tokens_slider = gr.Slider(
|
1041 |
+
label="Max new tokens",
|
1042 |
+
minimum=1,
|
1043 |
+
maximum=14000,
|
1044 |
+
step=1,
|
1045 |
+
value=1024
|
1046 |
+
)
|
1047 |
+
temperature_slider = gr.Slider(
|
1048 |
+
label="Temperature",
|
1049 |
+
minimum=0.1,
|
1050 |
+
maximum=4.0,
|
1051 |
+
step=0.1,
|
1052 |
+
value=0.2
|
1053 |
+
)
|
1054 |
+
|
1055 |
+
with gr.Column():
|
1056 |
+
top_p_slider = gr.Slider(
|
1057 |
+
label="Top-p (nucleus sampling)",
|
1058 |
+
minimum=0.05,
|
1059 |
+
maximum=1.0,
|
1060 |
+
step=0.05,
|
1061 |
+
value=0.8
|
1062 |
+
)
|
1063 |
+
top_k_slider = gr.Slider(
|
1064 |
+
label="Top-k",
|
1065 |
+
minimum=1,
|
1066 |
+
maximum=1000,
|
1067 |
+
step=1,
|
1068 |
+
value=50
|
1069 |
+
)
|
1070 |
+
repetition_penalty_slider = gr.Slider(
|
1071 |
+
label="Repetition penalty",
|
1072 |
+
minimum=1.0,
|
1073 |
+
maximum=2.0,
|
1074 |
+
step=0.05,
|
1075 |
+
value=1.0
|
1076 |
+
)
|
1077 |
+
|
1078 |
+
# Example prompts
|
1079 |
+
examples = gr.Examples(
|
1080 |
+
examples=[
|
1081 |
+
["List the main classes in the ontology"],
|
1082 |
+
["Summarize the object properties in the ontology"],
|
1083 |
+
["Create a new property to link Person and Environment"]
|
1084 |
],
|
1085 |
+
inputs=message_input
|
|
|
|
|
|
|
|
|
1086 |
)
|
1087 |
+
|
1088 |
+
def user_message(message:str, history:List[Dict[str, str]]):
|
1089 |
+
"""Add user message to chat history.
|
1090 |
|
1091 |
+
Args:
|
1092 |
+
message (str): The User Message to send
|
1093 |
+
history (List[Dict[str,str]]): The previous chat conversation history.
|
1094 |
+
"""
|
1095 |
+
if message.strip() == "":
|
1096 |
+
return history, message
|
1097 |
+
|
1098 |
+
history = history + [{"role":"user", "content": message}]
|
1099 |
+
return history, ""
|
1100 |
|
1101 |
+
def bot_response(history, knowledge, system_prompt, max_tokens, temp, top_p, top_k, rep_penalty):
|
1102 |
+
"""Generate assistant response with visible thinking.
|
1103 |
+
|
1104 |
+
Args:
|
1105 |
+
history (List[Dict[str, str]]): The previous chat conversation history
|
1106 |
+
knowledge (Any): Documents to pass as knowledge to the multimodal model
|
1107 |
+
system_prompt (str): System prompt that the model follows
|
1108 |
+
max_tokens (int): Max number of allowed output tokens
|
1109 |
+
temp (float): Model's Temperature
|
1110 |
+
top_p (int): Model's Top p value
|
1111 |
+
top_k (int): Model's Top k value
|
1112 |
+
rep_penalty (float): Model's repetition penalty
|
1113 |
+
|
1114 |
+
Returns:
|
1115 |
+
history (List[Dict[str, str]]): The history of the conversation updated
|
1116 |
+
"""
|
1117 |
+
try:
|
1118 |
+
if not history or history[-1]["role"] != "user":
|
1119 |
+
return history
|
1120 |
+
|
1121 |
+
user_message = history[-1]["content"]
|
1122 |
+
# thinking message with pending status
|
1123 |
+
history.append({
|
1124 |
+
"role": "assistant",
|
1125 |
+
"content": "Je réfléchis étape par étape...",
|
1126 |
+
"metadata": {
|
1127 |
+
"title": "Réflexion",
|
1128 |
+
"status": "pending"
|
1129 |
+
}
|
1130 |
+
})
|
1131 |
+
yield history
|
1132 |
+
|
1133 |
+
thinking_conversation = []
|
1134 |
+
if system_prompt:
|
1135 |
+
thinking_conversation.append({"role": "system", "content": system_prompt})
|
1136 |
+
if knowledge:
|
1137 |
+
thinking_conversation.append({
|
1138 |
+
"role": "assistant",
|
1139 |
+
"content": f"Voici le document que je dois comprendre: {knowledge}\n\nJe vais l'analyser étape par étape."
|
1140 |
+
})
|
1141 |
+
|
1142 |
+
for msg in history[:-2]: # All msg except user message and thinking part
|
1143 |
+
thinking_conversation.append(msg)
|
1144 |
+
|
1145 |
+
thinking_prompt = user_message + "\n\nRéfléchis étape par étape.Identifie d'abord les entités, puis les relations, puis organise hiérarchiquement avant de formaliser."
|
1146 |
+
thinking_conversation.append({"role": "user", "content": thinking_prompt})
|
1147 |
+
|
1148 |
+
# GENERATE THINKING
|
1149 |
+
thinking_result = generate_llm_response(
|
1150 |
+
thinking_conversation,
|
1151 |
+
max_new_tokens=max_tokens * 2,
|
1152 |
+
temperature=temp,
|
1153 |
+
top_p=top_p,
|
1154 |
+
top_k=top_k,
|
1155 |
+
repetition_penalty=rep_penalty
|
1156 |
+
)
|
1157 |
+
|
1158 |
+
# update the thinking message
|
1159 |
+
history[-1] = {
|
1160 |
+
"role": "assistant",
|
1161 |
+
"content": thinking_result,
|
1162 |
+
"metadata": {
|
1163 |
+
"title": "Réflexion",
|
1164 |
+
"status": "done"
|
1165 |
+
}
|
1166 |
+
}
|
1167 |
+
yield history
|
1168 |
+
|
1169 |
+
final_conversation = []
|
1170 |
+
if system_prompt:
|
1171 |
+
final_conversation.append({"role": "system", "content": system_prompt})
|
1172 |
+
|
1173 |
+
if knowledge:
|
1174 |
+
final_conversation.append({
|
1175 |
+
"role": "assistant",
|
1176 |
+
"content": f"J'ai analysé ce document: {knowledge}"
|
1177 |
+
})
|
1178 |
+
|
1179 |
+
for msg in history[:-1]: # exclude thinking
|
1180 |
+
if "metadata" not in msg or "title" not in msg.get("metadata", {}):
|
1181 |
+
final_conversation.append(msg)
|
1182 |
+
|
1183 |
+
final_conversation.append({
|
1184 |
+
"role": "assistant",
|
1185 |
+
"content": f"Voici mon analyse étape par étape:\n{thinking_result}\n\nMaintenant je vais formaliser le résulat final."
|
1186 |
+
})
|
1187 |
+
final_answer = generate_llm_response(
|
1188 |
+
final_conversation,
|
1189 |
+
max_new_tokens=max_tokens,
|
1190 |
+
temperature=temp * 0.8, # Lower temperature for final answer
|
1191 |
+
top_p=top_p,
|
1192 |
+
top_k=top_k,
|
1193 |
+
repetition_penalty=rep_penalty
|
1194 |
+
)
|
1195 |
+
history.append({
|
1196 |
+
"role": "assistant",
|
1197 |
+
"content": final_answer
|
1198 |
+
})
|
1199 |
+
yield history
|
1200 |
+
|
1201 |
+
except Exception as e:
|
1202 |
+
error_traceback = traceback.format_exc()
|
1203 |
+
print(f"Error traceback:\n{error_traceback}")
|
1204 |
+
|
1205 |
+
history.append({
|
1206 |
+
"role": "assistant",
|
1207 |
+
"content": f"An error occurred: {str(e)}\n\nTraceback details:\n{error_traceback}"
|
1208 |
+
})
|
1209 |
+
yield history
|
1210 |
+
|
1211 |
+
file_explorer.change(
|
1212 |
+
append_text_knowledge,
|
1213 |
+
file_explorer,
|
1214 |
+
knowledge_input
|
1215 |
+
)
|
1216 |
+
|
1217 |
+
message_input.submit(
|
1218 |
+
user_message,
|
1219 |
+
inputs=[message_input, chatbot],
|
1220 |
+
outputs=[chatbot, message_input]
|
1221 |
+
).then(
|
1222 |
+
bot_response,
|
1223 |
+
inputs=[
|
1224 |
+
chatbot,
|
1225 |
+
knowledge_input,
|
1226 |
+
system_prompt_input,
|
1227 |
+
max_tokens_slider,
|
1228 |
+
temperature_slider,
|
1229 |
+
top_p_slider,
|
1230 |
+
top_k_slider,
|
1231 |
+
repetition_penalty_slider
|
1232 |
+
],
|
1233 |
+
outputs=chatbot
|
1234 |
+
)
|
1235 |
if __name__ == "__main__":
|
1236 |
auth = HuggingFaceLogin()
|
1237 |
if auth.login():
|