Sync
Browse files- examples/baseline.py +5 -3
examples/baseline.py
CHANGED
@@ -35,6 +35,8 @@ max_input_tokens = int(os.environ.get("LAB_LLM_CONTEXT_WINDOW_SIZE", 65536))
|
|
35 |
enable_tooling = os.environ.get("LAB_LLM_ENABLE_TOOLING", "OFF") == "ON"
|
36 |
enable_streaming = os.environ.get("LAB_LLM_ENABLE_STREAMING", "OFF") == "ON"
|
37 |
max_log_size = int(os.environ.get("LAB_LLM_MAX_LOG_SIZE", 1000000000))
|
|
|
|
|
38 |
fix_dir = os.environ["LAB_FIX_DIR"]
|
39 |
os.makedirs(fix_dir, exist_ok=True)
|
40 |
|
@@ -290,7 +292,7 @@ def chat_with_streaming(env, messages, full_messages):
|
|
290 |
is_thinking = True
|
291 |
print(delta.reasoning_content, end="", flush=True)
|
292 |
reasoning_content += delta.reasoning_content
|
293 |
-
|
294 |
if delta.content != "" and is_answering is False:
|
295 |
print("\nAnswer:")
|
296 |
is_answering = True
|
@@ -386,7 +388,7 @@ def get_issue_desc(env: Env) -> str:
|
|
386 |
if issue is None:
|
387 |
return ""
|
388 |
title = issue["title"]
|
389 |
-
body = issue["body"]
|
390 |
return f"Issue title: {title}\nIssue body: {body}\n"
|
391 |
|
392 |
|
@@ -462,7 +464,7 @@ def fix_issue(issue_id):
|
|
462 |
context_requirement = f"Please make sure the answer includes the prefix:\n```cpp\n{prefix}\n```\nand the suffix:\n```cpp\n{suffix}\n```\n"
|
463 |
desc += format_requirement + context_requirement
|
464 |
append_message(messages, full_messages, {"role": "user", "content": desc})
|
465 |
-
for idx in range(
|
466 |
print(f"Round {idx + 1}")
|
467 |
if estimate_input_tokens(messages) > max_input_tokens:
|
468 |
return
|
|
|
35 |
enable_tooling = os.environ.get("LAB_LLM_ENABLE_TOOLING", "OFF") == "ON"
|
36 |
enable_streaming = os.environ.get("LAB_LLM_ENABLE_STREAMING", "OFF") == "ON"
|
37 |
max_log_size = int(os.environ.get("LAB_LLM_MAX_LOG_SIZE", 1000000000))
|
38 |
+
max_sample_count = int(os.environ.get("LAB_LLM_MAX_SAMPLE_COUNT", 4))
|
39 |
+
omit_issue_body = os.environ.get("LAB_LLM_OMIT_ISSUE_BODY", "OFF") == "ON"
|
40 |
fix_dir = os.environ["LAB_FIX_DIR"]
|
41 |
os.makedirs(fix_dir, exist_ok=True)
|
42 |
|
|
|
292 |
is_thinking = True
|
293 |
print(delta.reasoning_content, end="", flush=True)
|
294 |
reasoning_content += delta.reasoning_content
|
295 |
+
elif delta.content is not None:
|
296 |
if delta.content != "" and is_answering is False:
|
297 |
print("\nAnswer:")
|
298 |
is_answering = True
|
|
|
388 |
if issue is None:
|
389 |
return ""
|
390 |
title = issue["title"]
|
391 |
+
body = "<omitted>" if omit_issue_body else issue["body"]
|
392 |
return f"Issue title: {title}\nIssue body: {body}\n"
|
393 |
|
394 |
|
|
|
464 |
context_requirement = f"Please make sure the answer includes the prefix:\n```cpp\n{prefix}\n```\nand the suffix:\n```cpp\n{suffix}\n```\n"
|
465 |
desc += format_requirement + context_requirement
|
466 |
append_message(messages, full_messages, {"role": "user", "content": desc})
|
467 |
+
for idx in range(max_sample_count):
|
468 |
print(f"Round {idx + 1}")
|
469 |
if estimate_input_tokens(messages) > max_input_tokens:
|
470 |
return
|