Attempt to add more resilience against failure
Browse files- bench-TriLMs.py +3 -3
bench-TriLMs.py
CHANGED
@@ -74,7 +74,7 @@ def quantize(types: Sequence[str] = ALL_TYPES, sizes: Sequence[str] = MODEL_SIZE
|
|
74 |
)
|
75 |
logger.info("Running: %s", command)
|
76 |
ret = os.system(command)
|
77 |
-
if ret != 0:
|
78 |
logger.error("Failed to quantize to %s", target)
|
79 |
# Should it still continue?
|
80 |
|
@@ -113,8 +113,8 @@ def llama_bench(
|
|
113 |
command = [str(LLAMA_CPP_PATH / "build" / "bin" / "llama-bench")] + args
|
114 |
logger.info("Running: %s", " ".join(command))
|
115 |
result = subprocess.run(command, capture_output=True)
|
116 |
-
logger.debug(result.stderr.decode())
|
117 |
-
if result.returncode != 0:
|
118 |
logger.error("Failed to run %s", " ".join(command))
|
119 |
break;
|
120 |
|
|
|
74 |
)
|
75 |
logger.info("Running: %s", command)
|
76 |
ret = os.system(command)
|
77 |
+
if ret != 0 or target.is_file() and target.stat().st_size == 0:
|
78 |
logger.error("Failed to quantize to %s", target)
|
79 |
# Should it still continue?
|
80 |
|
|
|
113 |
command = [str(LLAMA_CPP_PATH / "build" / "bin" / "llama-bench")] + args
|
114 |
logger.info("Running: %s", " ".join(command))
|
115 |
result = subprocess.run(command, capture_output=True)
|
116 |
+
logger.debug(result.stderr.decode(errors="ignore"))
|
117 |
+
if result.returncode != 0 or len(result.stdout) == 0:
|
118 |
logger.error("Failed to run %s", " ".join(command))
|
119 |
break;
|
120 |
|