Spaces:
Running
Running
update
Browse files
demo.py
CHANGED
@@ -213,7 +213,7 @@ def ensure_client_available():
|
|
213 |
def model_prompting(
|
214 |
llm_model: str,
|
215 |
prompt: str,
|
216 |
-
max_token_num: Optional[int] = 2048
|
217 |
temperature: Optional[float] = 0.2,
|
218 |
top_p: Optional[float] = 0.7,
|
219 |
stream: Optional[bool] = True,
|
@@ -766,7 +766,7 @@ class GNNLLMSystem:
|
|
766 |
response = model_prompting(
|
767 |
llm_model=mapped_model_name,
|
768 |
prompt=formatted_query,
|
769 |
-
max_token_num=4096
|
770 |
temperature=0.0,
|
771 |
top_p=0.9,
|
772 |
stream=True
|
@@ -859,7 +859,7 @@ Keep the description concise and informative. Respond with just the task descrip
|
|
859 |
task_description = model_prompting(
|
860 |
llm_model="meta/llama-3.1-8b-instruct",
|
861 |
prompt=prompt,
|
862 |
-
max_token_num=2048
|
863 |
temperature=0.1,
|
864 |
top_p=0.9,
|
865 |
stream=True
|
@@ -1073,7 +1073,7 @@ def process_query(query):
|
|
1073 |
response = model_prompting(
|
1074 |
llm_model=api_model,
|
1075 |
prompt=final_prompt,
|
1076 |
-
max_token_num=4096
|
1077 |
temperature=0.0,
|
1078 |
top_p=0.9,
|
1079 |
stream=True
|
@@ -1187,7 +1187,7 @@ def process_thought_template_query(query, template_style, task_description, top_
|
|
1187 |
llama_response = model_prompting(
|
1188 |
llm_model="meta/llama-3.1-8b-instruct",
|
1189 |
prompt=enhanced_query,
|
1190 |
-
max_token_num=4096
|
1191 |
temperature=0.0,
|
1192 |
top_p=0.9,
|
1193 |
stream=True
|
|
|
213 |
def model_prompting(
|
214 |
llm_model: str,
|
215 |
prompt: str,
|
216 |
+
max_token_num: Optional[int] = 1024, # Changed from 2048 to 1024
|
217 |
temperature: Optional[float] = 0.2,
|
218 |
top_p: Optional[float] = 0.7,
|
219 |
stream: Optional[bool] = True,
|
|
|
766 |
response = model_prompting(
|
767 |
llm_model=mapped_model_name,
|
768 |
prompt=formatted_query,
|
769 |
+
max_token_num=1024, # Changed from 4096 to 1024
|
770 |
temperature=0.0,
|
771 |
top_p=0.9,
|
772 |
stream=True
|
|
|
859 |
task_description = model_prompting(
|
860 |
llm_model="meta/llama-3.1-8b-instruct",
|
861 |
prompt=prompt,
|
862 |
+
max_token_num=1024, # Changed from 2048 to 1024
|
863 |
temperature=0.1,
|
864 |
top_p=0.9,
|
865 |
stream=True
|
|
|
1073 |
response = model_prompting(
|
1074 |
llm_model=api_model,
|
1075 |
prompt=final_prompt,
|
1076 |
+
max_token_num=1024, # Changed from 4096 to 1024
|
1077 |
temperature=0.0,
|
1078 |
top_p=0.9,
|
1079 |
stream=True
|
|
|
1187 |
llama_response = model_prompting(
|
1188 |
llm_model="meta/llama-3.1-8b-instruct",
|
1189 |
prompt=enhanced_query,
|
1190 |
+
max_token_num=1024, # Changed from 4096 to 1024
|
1191 |
temperature=0.0,
|
1192 |
top_p=0.9,
|
1193 |
stream=True
|