Fix bug on function evaluation
Browse files
__pycache__/prompt.cpython-311.pyc
ADDED
Binary file (9.24 kB). View file
|
|
src/evaluation/__pycache__/compare_result.cpython-311.pyc
ADDED
Binary file (3.85 kB). View file
|
|
src/prompts/__pycache__/prompt.cpython-311.pyc
ADDED
Binary file (9.25 kB). View file
|
|
test_pretrained.ipynb
CHANGED
@@ -256,7 +256,7 @@
|
|
256 |
},
|
257 |
{
|
258 |
"cell_type": "code",
|
259 |
-
"execution_count":
|
260 |
"metadata": {},
|
261 |
"outputs": [],
|
262 |
"source": [
|
@@ -275,7 +275,7 @@
|
|
275 |
" query_output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)\n",
|
276 |
"\n",
|
277 |
" # Evaluate model result\n",
|
278 |
-
" valid, sql_matched, result_matched = compare_result(row[\"sql_query\"], row[\"result\"], query_output)\n",
|
279 |
" if valid:\n",
|
280 |
" num_valid += 1\n",
|
281 |
" if sql_matched:\n",
|
|
|
256 |
},
|
257 |
{
|
258 |
"cell_type": "code",
|
259 |
+
"execution_count": null,
|
260 |
"metadata": {},
|
261 |
"outputs": [],
|
262 |
"source": [
|
|
|
275 |
" query_output = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)\n",
|
276 |
"\n",
|
277 |
" # Evaluate model result\n",
|
278 |
+
" valid, sql_matched, result_matched = compare_result(cursor, row[\"sql_query\"], row[\"result\"], query_output)\n",
|
279 |
" if valid:\n",
|
280 |
" num_valid += 1\n",
|
281 |
" if sql_matched:\n",
|