text
stringlengths 1
2.05k
|
---|
pect("failed to execute process");
assert!(!status.success());
} |
fn run_js_tests(test_dir: &str, example_name: String, js_test: &str, vk: bool) {
let example = format!("--example={}", example_name);
let dir = format!("--dir={}", test_dir);
let mut args = vec!["run", "test", js_test, &example, &dir];
let vk_string: String;
if vk {
vk_string = format!("--vk={}", vk);
args.push(&vk_string);
};
let status = Command::new("pnpm")
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
} |
fn kzg_evm_on_chain_input_prove_and_verify(
test_dir: &str,
example_name: String,
input_source: &str,
output_source: &str,
input_visibility: &str,
output_visibility: &str,
) {
gen_circuit_settings_and_witness(
test_dir,
example_name.clone(),
input_visibility,
"private",
output_visibility,
1,
"resources",
Some(vec![4]),
1,
false,
&mut 0.0,
Commitments::KZG,
2,
);
let model_path = format!("{}/{}/network.compiled", test_dir, example_name);
let settings_path = format!("{}/{}/settings.json", test_dir, example_name);
init_params(settings_path.clone().into());
let data_path = format!("{}/{}/input.json", test_dir, example_name);
let witness_path = format!("{}/{}/witness.json", test_dir, example_name);
let test_on_chain_data_path = format!("{}/{}/on_chain_input.json", test_dir, example_name);
let rpc_arg = format!("--rpc-url={}", LIMITLESS_ANVIL_URL.as_str());
let private_key = format!("--private-key={}", *ANVIL_DEFAULT_PRIVATE_KEY);
let test_input_source = format!("--input-source={}", input_source);
let test_output_source = format!("--output-source={}", output_source);
let witness: GraphWitness = GraphWitness::from_path(witness_path.clone().into()).unwrap();
let mut input: GraphData = GraphData::from_path(data_path.clone().into()).unwrap();
if input_visibility == "hashed" {
let hashes = witness.processed_inputs.unwrap().poseidon_hash.unwrap();
input.input_data = DataSource::File(
hashes
.iter()
.map(|h| vec![FileSourceInner::Field(*h)])
.collect(),
);
}
if output_visibility == "hashed" {
let hashes = witness.processed_outputs.unwrap() |
.poseidon_hash.unwrap();
input.output_data = Some(DataSource::File(
hashes
.iter()
.map(|h| vec![FileSourceInner::Field(*h)])
.collect(),
));
} else {
input.output_data = Some(DataSource::File(
witness
.pretty_elements
.unwrap()
.rescaled_outputs
.iter()
.map(|o| {
o.iter()
.map(|f| FileSourceInner::Float(f.parse().unwrap()))
.collect()
})
.collect(),
));
}
input.save(data_path.clone().into()).unwrap();
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"setup-test-evm-data",
"-D",
data_path.as_str(),
"-M",
&model_path,
"--test-data",
test_on_chain_data_path.as_str(),
rpc_arg.as_str(),
test_input_source.as_str(),
test_output_source.as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"setup",
"-M",
&model_path,
"--pk-path",
&format!("{}/{}/key.pk", test_dir, example_name),
"--vk-path",
&format!("{}/{}/key.vk", test_dir, example_name),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"prove",
"-W",
&witness_path, |
"-M",
&model_path,
"--proof-path",
&format!("{}/{}/proof.pf", test_dir, example_name),
"--pk-path",
&format!("{}/{}/key.pk", test_dir, example_name),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let vk_arg = format!("{}/{}/key.vk", test_dir, example_name);
let settings_arg = format!("--settings-path={}", settings_path);
let mut args = vec!["create-evm-verifier", "--vk-path", &vk_arg, &settings_arg];
let sol_arg = format!("{}/{}/kzg.sol", test_dir, example_name);
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let addr_path_verifier_arg = format!(
"--addr-path={}/{}/addr_verifier.txt",
test_dir, example_name
);
let mut args = vec![
"deploy-evm-verifier",
rpc_arg.as_str(),
addr_path_verifier_arg.as_str(),
];
args.push("--sol-code-path");
args.push(sol_arg.as_str());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let sol_arg = format!("{}/{}/kzg.sol", test_dir, example_name);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"create-evm-da",
&settings_arg,
"--sol-code-path",
sol_arg.as_str(),
"-D",
test_on_chain_data_path.as_str(),
])
.status()
.expect("failed to execute process");
assert!(status |
.success());
let addr_path_da_arg = format!("--addr-path={}/{}/addr_da.txt", test_dir, example_name);
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"deploy-evm-da",
format!("--settings-path={}", settings_path).as_str(),
"-D",
test_on_chain_data_path.as_str(),
"--sol-code-path",
sol_arg.as_str(),
rpc_arg.as_str(),
addr_path_da_arg.as_str(),
private_key.as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let pf_arg = format!("{}/{}/proof.pf", test_dir, example_name);
let addr_verifier =
std::fs::read_to_string(format!("{}/{}/addr_verifier.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_verifier_arg = format!("--addr-verifier={}", addr_verifier);
let addr_da = std::fs::read_to_string(format!("{}/{}/addr_da.txt", test_dir, example_name))
.expect("failed to read address file");
let deployed_addr_da_arg = format!("--addr-da={}", addr_da);
let args = vec![
"verify-evm",
"--proof-path",
pf_arg.as_str(),
deployed_addr_verifier_arg.as_str(),
deployed_addr_da_arg.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args([
"setup-test-evm-data",
"-D",
data_path.as_str(),
"-M",
&model_path,
"--test-data",
test_on_chai |
n_data_path.as_str(),
rpc_arg.as_str(),
test_input_source.as_str(),
test_output_source.as_str(),
])
.status()
.expect("failed to execute process");
assert!(status.success());
let deployed_addr_arg = format!("--addr={}", addr_da);
let args = vec![
"test-update-account-calls",
deployed_addr_arg.as_str(),
"-D",
test_on_chain_data_path.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(&args)
.status()
.expect("failed to execute process");
assert!(status.success());
let args = vec![
"verify-evm",
"--proof-path",
PF_FAILURE,
deployed_addr_verifier_arg.as_str(),
deployed_addr_da_arg.as_str(),
rpc_arg.as_str(),
];
let status = Command::new(format!("{}/release/ezkl", *CARGO_TARGET_DIR))
.args(args)
.status()
.expect("failed to execute process");
assert!(!status.success());
} |
fn build_ezkl() {
let args = [
"build",
"--release",
"--bin",
"ezkl",
"--features",
"icicle",
];
let args = ["build", "--release", "--bin", "ezkl"];
let args = [
"build",
"--release",
"--bin",
"ezkl",
"--no-default-features",
"--features",
"ezkl",
];
let status = Command::new("cargo")
.args(args)
.status()
.expect("failed to execute process");
assert!(status.success());
} |
fn build_wasm_ezkl() {
let status = Command::new("wasm-pack")
.args([
"build",
"--release",
"--target",
"nodejs",
"--out-dir",
"./tests/wasm/nodejs",
".",
"--",
"-Z",
"build-std=panic_abort,std",
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new("sed")
.args([
"-i",
"3s|.*|imports['env'] = {memory: new WebAssembly.Memory({initial:20,maximum:65536,shared:true})}|",
"./tests/wasm/nodejs/ezkl.js",
])
.status()
.expect("failed to execute process");
assert!(status.success());
}
} |
import ezkl |
import json |
import onnx |
import onnxruntime |
import numpy as np |
import sys
def get_ezkl_output(witness_file, settings_file):
witness_output = json.load(open(witness_file))
outputs = witness_output['outputs']
with open(settings_file) as f:
settings = json.load(f)
ezkl_outputs = [[ezkl.felt_to_float(
outputs[i][j], settings['model_output_scales'][i]) for j in range(len(outputs[i]))] for i in range(len(outputs))]
return ezkl_outputs
def get_onnx_output(model_file, input_file):
onnx_model = onnx.load(model_file)
onnx.checker.check_model(onnx_model)
with open(input_file) as f:
inputs = json.load(f)
num_inputs = len(onnx_model.graph.input)
onnx_input = dict()
for i in range(num_inputs):
input_node = onnx_model.graph.input[i]
dims = []
elem_type = input_node.type.tensor_type.elem_type
print("elem_type: ", elem_type)
for dim in input_node.type.tensor_type.shape.dim:
if dim.dim_value == 0:
dims.append(1)
else:
dims.append(dim.dim_value)
if elem_type == 6:
inputs_onnx = np.array(inputs['input_data'][i]).astype(
np.int32).reshape(dims)
elif elem_type == 7:
inputs_onnx = np.array(inputs['input_data'][i]).astype(
np.int64).reshape(dims)
elif elem_type == 9:
inputs_onnx = np.array(inputs['input_data'][i]).astype(
bool).reshape(dims)
else:
inputs_onnx = np.array(inputs['input_data'][i]).astype(
np.float32).reshape(dims)
onnx_input[input_node.name] = inputs_onnx
try:
onnx_session = onnxruntime.InferenceSession(model_file)
onnx_output = onnx_session.run(None, onnx_input)
except Exception as e:
print("error: ", e)
onnx_output = inputs['output_data']
print("onnx ", onnx_output)
return onnx_output[0]
def compare_outputs(zk_output, onnx_output):
res = []
contains_sublist = any(isinstance(sub, list) for |
sub in zk_output)
print("zk ", zk_output)
if contains_sublist:
try:
if len(onnx_output) == 1:
zk_output = zk_output[0]
except Exception as e:
zk_output = zk_output[0]
print("zk ", zk_output)
zip_object = zip(np.array(zk_output).flatten(),
np.array(onnx_output).flatten())
for (i, (list1_i, list2_i)) in enumerate(zip_object):
if list1_i == 0.0 and list2_i == 0.0:
res.append(0)
else:
diff = list1_i - list2_i
res.append(100 * (diff) / (list2_i))
if abs(diff) > 0.0:
print("------- index: ", i)
print("------- diff: ", diff)
print("------- zk_output: ", list1_i)
print("------- onnx_output: ", list2_i)
return res
if __name__ == '__main__':
model_file = sys.argv[1]
input_file = sys.argv[2]
witness_file = sys.argv[3]
settings_file = sys.argv[4]
target = float(sys.argv[5])
ezkl_output = get_ezkl_output(witness_file, settings_file)
onnx_output = get_onnx_output(model_file, input_file)
percentage_difference = compare_outputs(ezkl_output, onnx_output)
mean_percentage_difference = np.mean(np.abs(percentage_difference))
max_percentage_difference = np.max(np.abs(percentage_difference))
print("mean percent diff: ", mean_percentage_difference)
print("max percent diff: ", max_percentage_difference)
assert mean_percentage_difference < target, "Percentage difference is too high" |
mod py_tests {
use lazy_static::lazy_static;
use std::env::var;
use std::process::{Child, Command};
use std::sync::Once;
use tempdir::TempDir;
static COMPILE: Once = Once::new();
static ENV_SETUP: Once = Once::new();
static DOWNLOAD_VOICE_DATA: Once = Once::new();
lazy_static! {
static ref CARGO_TARGET_DIR: String =
var("CARGO_TARGET_DIR").unwrap_or_else(|_| "./target".to_string());
static ref ANVIL_URL: String = "http:
}
fn start_anvil(limitless: bool) -> Child {
let mut args = vec!["-p", "3030"];
if limitless {
args.push("--code-size-limit=41943040");
args.push("--disable-block-gas-limit");
}
let child = Command::new("anvil")
.args(args)
.spawn()
.expect("failed to start anvil process");
std::thread::sleep(std::time::Duration::from_secs(3));
child
} |
fn download_voice_data() {
let voice_data_dir = shellexpand::tilde("~/data/voice_data");
DOWNLOAD_VOICE_DATA.call_once(|| {
let status = Command::new("bash")
.args(["examples/notebooks/voice_data.sh", &voice_data_dir])
.status()
.expect("failed to execute process");
assert!(status.success());
});
std::env::set_var("VOICE_DATA_DIR", format!("{}", voice_data_dir));
} |
fn setup_py_env() {
ENV_SETUP.call_once(|| {
let status = Command::new("pip")
.args(["install", "tf2onnx==1.16.1"])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new("pip")
.args([
"install",
"torch-geometric==2.5.2",
"torch==2.2.2",
"torchvision==0.17.2",
"pandas==2.2.1",
"numpy==1.26.4",
"seaborn==0.13.2",
"notebook==7.1.2",
"nbconvert==7.16.3",
"onnx==1.16.0",
"kaggle==1.6.8",
"py-solc-x==2.0.2",
"web3==6.16.0",
"librosa==0.10.1",
"keras==3.1.1",
"tensorflow==2.16.1",
"tensorflow-datasets==4.9.4",
"pytorch-lightning==2.2.1",
"sk2torch==1.2.0",
"scikit-learn==1.4.1.post1",
"xgboost==2.0.3",
"hummingbird-ml==0.4.11",
"lightgbm==4.3.0",
])
.status()
.expect("failed to execute process");
assert!(status.success());
let status = Command::new("pip")
.args(["install", "numpy==1.23"])
.status()
.expect("failed to execute process");
assert!(status.success());
});
} |
fn init_binary() {
COMPILE.call_once(|| {
println!("using cargo target dir: {}", *CARGO_TARGET_DIR);
setup_py_env();
});
} |
fn mv_test_(test_dir: &str, test: &str) {
let path: std::path::PathBuf = format!("{}/{}", test_dir, test).into();
if !path.exists() {
let status = Command::new("cp")
.args([
"-R",
&format!("./examples/notebooks/{}", test),
&format!("{}/{}", test_dir, test),
])
.status()
.expect("failed to execute process");
assert!(status.success());
}
}
const TESTS: [&str; 32] = [
"proof_splitting.ipynb",
"variance.ipynb",
"mnist_gan.ipynb",
"keras_simple_demo.ipynb",
"mnist_gan_proof_splitting.ipynb",
"hashed_vis.ipynb",
"simple_demo_all_public.ipynb",
"data_attest.ipynb",
"little_transformer.ipynb",
"simple_demo_aggregated_proofs.ipynb",
"ezkl_demo.ipynb",
"lstm.ipynb",
"set_membership.ipynb",
"decision_tree.ipynb",
"random_forest.ipynb",
"gradient_boosted_trees.ipynb",
"xgboost.ipynb",
"lightgbm.ipynb",
"svm.ipynb",
"simple_demo_public_input_output.ipynb",
"simple_demo_public_network_output.ipynb",
"gcn.ipynb",
"linear_regression.ipynb",
"stacked_regression.ipynb",
"data_attest_hashed.ipynb",
"kzg_vis.ipynb",
"kmeans.ipynb",
"solvency.ipynb",
"sklearn_mlp.ipynb",
"generalized_inverse.ipynb",
"mnist_classifier.ipynb",
"world_rotation.ipynb",
];
macro_rules! test_func {
() => {
mod tests {
use seq_macro::seq;
use crate::py_tests::TESTS;
use test_case::test_case;
use super::*;
seq!(N in 0..=31 { |
fn run_notebook_(test: &str) {
crate::py_tests::init_binary();
let mut limitless = false;
if test == TESTS[5] {
limitless = true;
}
let mut anvil_child = crate::py_tests::start_anvil(limitless);
let test_dir: TempDir = TempDir::new("nb").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, test);
run_notebook(path, test);
test_dir.close().unwrap();
anvil_child.kill().unwrap();
}
}); |
fn voice_notebook_() {
crate::py_tests::init_binary();
let mut anvil_child = crate::py_tests::start_anvil(false);
crate::py_tests::download_voice_data();
let test_dir: TempDir = TempDir::new("voice_judge").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "voice_judge.ipynb");
run_notebook(path, "voice_judge.ipynb");
test_dir.close().unwrap();
anvil_child.kill().unwrap();
} |
fn postgres_notebook_() {
crate::py_tests::init_binary();
let test_dir: TempDir = TempDir::new("mean_postgres").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "mean_postgres.ipynb");
run_notebook(path, "mean_postgres.ipynb");
test_dir.close().unwrap();
} |
fn tictactoe_autoencoder_notebook_() {
crate::py_tests::init_binary();
let test_dir: TempDir = TempDir::new("tictactoe_autoencoder").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "tictactoe_autoencoder.ipynb");
run_notebook(path, "tictactoe_autoencoder.ipynb");
test_dir.close().unwrap();
} |
fn tictactoe_binary_classification_notebook_() {
crate::py_tests::init_binary();
let test_dir: TempDir = TempDir::new("tictactoe_binary_classification").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "tictactoe_binary_classification.ipynb");
run_notebook(path, "tictactoe_binary_classification.ipynb");
test_dir.close().unwrap();
} |
fn nbeats_notebook_() {
crate::py_tests::init_binary();
let test_dir: TempDir = TempDir::new("nbeats").unwrap();
let path = test_dir.path().to_str().unwrap();
crate::py_tests::mv_test_(path, "nbeats_timeseries_forecasting.ipynb");
crate::py_tests::mv_test_(path, "eth_price.csv");
run_notebook(path, "nbeats_timeseries_forecasting.ipynb");
test_dir.close().unwrap();
}
}
};
} |
fn run_notebook(test_dir: &str, test: &str) {
let status = Command::new("bash")
.arg("-c")
.arg("source .env/bin/activate")
.status()
.expect("failed to execute process");
assert!(status.success());
let path: std::path::PathBuf = format!("{}/{}", test_dir, test).into();
let status = Command::new("jupyter")
.args([
"nbconvert",
"--to",
"notebook",
"--execute",
(path.to_str().unwrap()),
])
.status()
.expect("failed to execute process");
assert!(status.success());
}
test_func!();
} |
import ezkl |
import os |
import pytest |
import json |
import subprocess |
import time
folder_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'.',
)
)
examples_path = os.path.abspath(
os.path.join(
folder_path,
'..',
'..',
'examples',
)
)
srs_path = os.path.join(folder_path, 'kzg_test.params')
params_k17_path = os.path.join(folder_path, 'kzg_test_k17.params')
params_k20_path = os.path.join(folder_path, 'kzg_test_k20.params')
anvil_url = "http:
def setup_module(module):
"""setup anvil."""
global proc
proc = subprocess.Popen(["anvil", "-p", "3030"])
time.sleep(1)
def teardown_module(module):
"""teardown anvil.
"""
proc.terminate()
def test_py_run_args():
"""
Test for PyRunArgs
"""
run_args = ezkl.PyRunArgs()
run_args.input_visibility = "hashed"
run_args.output_visibility = "hashed"
run_args.tolerance = 1.5
def test_poseidon_hash():
"""
Test for poseidon_hash
"""
message = [1.0, 2.0, 3.0, 4.0]
message = [ezkl.float_to_felt(x, 7) for x in message]
res = ezkl.poseidon_hash(message)
assert ezkl.felt_to_big_endian(
res[0]) == "0x0da7e5e5c8877242fa699f586baf770d731defd54f952d4adeb85047a0e32f45"
def test_field_serialization():
"""
Test field element serialization
"""
input = 890
scale = 7
felt = ezkl.float_to_felt(input, scale)
roundtrip_input = ezkl.felt_to_float(felt, scale)
assert input == roundtrip_input
input = -700
scale = 7
felt = ezkl.float_to_felt(input, scale)
roundtrip_input = ezkl.felt_to_float(felt, scale)
assert input == roundtrip_input
def test_buffer_to_felts():
"""
Test buffer_to_felt
"""
buffer = bytearray("a sample string!", 'utf-8')
felts = ezkl.buffer_to_felts(buffer)
ref_felt_1 = "0x0000000000000000000000000000000021676e6972747320656c706d61732061"
assert ezkl.felt_to_big_endian(felts[0]) == ref_felt_1
buffer = bytearray("a sample string!"+"high", 'utf-8')
felts = ezkl.buffer_to_felts(buffer) |
ref_felt_2 = "0x0000000000000000000000000000000000000000000000000000000068676968"
assert [ezkl.felt_to_big_endian(felts[0]), ezkl.felt_to_big_endian(felts[1])] == [ref_felt_1, ref_felt_2]
def test_gen_srs():
"""
test for gen_srs() with 17 logrows and 20 logrows.
You may want to comment this test as it takes a long time to run
"""
ezkl.gen_srs(params_k17_path, 17)
assert os.path.isfile(params_k17_path)
ezkl.gen_srs(params_k20_path, 20)
assert os.path.isfile(params_k20_path)
def test_calibrate_over_user_range():
data_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'network.onnx'
)
output_path = os.path.join(
folder_path,
'settings.json'
)
run_args = ezkl.PyRunArgs()
run_args.input_visibility = "hashed"
run_args.output_visibility = "hashed"
res = ezkl.gen_settings(
model_path, output_path, py_run_args=run_args)
assert res == True
res = ezkl.calibrate_settings(
data_path, model_path, output_path, "resources", 1, [0, 1, 2])
assert res == True
assert os.path.isfile(output_path)
def test_calibrate():
data_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'network.onnx'
)
output_path = os.path.join(
folder_path,
'settings.json'
)
run_args = ezkl.PyRunArgs()
run_args.input_visibility = "hashed"
run_args.output_visibility = "hashed"
res = ezkl.gen_settings(
model_path, output_path, py_run_args=run_args)
assert res == True
res = ezkl.calibrate_settings(
data_path, model_path, output_path, "resources")
assert res == True
assert os.path.isfile(output_path)
def test_model_compile():
"""
Test for |
model compilation/serialization
"""
model_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'network.onnx'
)
compiled_model_path = os.path.join(
folder_path,
'model.compiled'
)
settings_path = os.path.join(
folder_path,
'settings.json'
)
res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)
assert res == True
def test_forward():
"""
Test for vanilla forward pass
"""
data_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'input.json'
)
model_path = os.path.join(
folder_path,
'model.compiled'
)
output_path = os.path.join(
folder_path,
'witness.json'
)
res = ezkl.gen_witness(data_path, model_path, output_path)
with open(output_path, "r") as f:
data = json.load(f)
assert data["inputs"] == res["inputs"]
assert data["outputs"] == res["outputs"]
assert data["processed_inputs"]["poseidon_hash"] == res["processed_inputs"]["poseidon_hash"]
assert data["processed_outputs"]["poseidon_hash"] == res["processed_outputs"]["poseidon_hash"]
def test_get_srs():
"""
Test for get_srs
"""
settings_path = os.path.join(folder_path, 'settings.json')
res = ezkl.get_srs(settings_path, srs_path=srs_path)
assert res == True
assert os.path.isfile(srs_path)
another_srs_path = os.path.join(folder_path, "kzg_test_k8.params")
res = ezkl.get_srs(logrows=8, srs_path=another_srs_path, commitment=ezkl.PyCommitments.KZG)
assert os.path.isfile(another_srs_path)
def test_mock():
"""
Test for mock
"""
data_path = os.path.join(
folder_path,
'witness.json'
)
model_path = os.path.join(
folder_path,
'model.compiled'
)
settings_path = os.path.join(folder_path, 'settings.json')
res = ezkl.mock(data_path, model_path)
assert res == True
def test_setup():
"""
Test for |
setup
"""
data_path = os.path.join(
folder_path,
'witness.json'
)
model_path = os.path.join(
folder_path,
'model.compiled'
)
pk_path = os.path.join(folder_path, 'test.pk')
vk_path = os.path.join(folder_path, 'test.vk')
settings_path = os.path.join(folder_path, 'settings.json')
res = ezkl.setup(
model_path,
vk_path,
pk_path,
srs_path=srs_path,
)
assert res == True
assert os.path.isfile(vk_path)
assert os.path.isfile(pk_path)
assert os.path.isfile(settings_path)
res = ezkl.gen_vk_from_pk_single(pk_path, settings_path, vk_path)
assert res == True
assert os.path.isfile(vk_path)
def test_setup_evm():
"""
Test for setup
"""
model_path = os.path.join(
folder_path,
'model.compiled'
)
pk_path = os.path.join(folder_path, 'test_evm.pk')
vk_path = os.path.join(folder_path, 'test_evm.vk')
res = ezkl.setup(
model_path,
vk_path,
pk_path,
srs_path=srs_path,
)
assert res == True
assert os.path.isfile(vk_path)
assert os.path.isfile(pk_path)
def test_prove_and_verify():
"""
Test for prove and verify
"""
data_path = os.path.join(
folder_path,
'witness.json'
)
model_path = os.path.join(
folder_path,
'model.compiled'
)
pk_path = os.path.join(folder_path, 'test.pk')
proof_path = os.path.join(folder_path, 'test.pf')
res = ezkl.prove(
data_path,
model_path,
pk_path,
proof_path,
"for-aggr",
srs_path=srs_path,
)
assert res['transcript_type'] == 'Poseidon'
assert os.path.isfile(proof_path)
settings_path = os.path.join(folder_path, 'settings.json')
vk_path = os.path.join(folder_path, 'test.vk')
res = ezkl.verify(proof_path, settings_path,
vk_path, srs_path)
assert res == True
assert os.path.isfile(vk_path)
def test_prove_evm(): |
"""
Test for prove using evm transcript
"""
data_path = os.path.join(
folder_path,
'witness.json'
)
model_path = os.path.join(
folder_path,
'model.compiled'
)
pk_path = os.path.join(folder_path, 'test_evm.pk')
proof_path = os.path.join(folder_path, 'test_evm.pf')
res = ezkl.prove(
data_path,
model_path,
pk_path,
proof_path,
"single",
srs_path=srs_path,
)
assert res['transcript_type'] == 'EVM'
assert os.path.isfile(proof_path)
def test_create_evm_verifier():
"""
Create EVM verifier with solidity code
In order to run this test you will need to install solc in your environment
"""
vk_path = os.path.join(folder_path, 'test_evm.vk')
settings_path = os.path.join(folder_path, 'settings.json')
sol_code_path = os.path.join(folder_path, 'test.sol')
abi_path = os.path.join(folder_path, 'test.abi')
res = ezkl.create_evm_verifier(
vk_path,
settings_path,
sol_code_path,
abi_path,
srs_path=srs_path,
)
assert res == True
assert os.path.isfile(sol_code_path)
def test_deploy_evm():
"""
Test deployment of the verifier smart contract
In order to run this you will need to install solc in your environment
"""
addr_path = os.path.join(folder_path, 'address.json')
sol_code_path = os.path.join(folder_path, 'test.sol')
res = ezkl.deploy_evm(
addr_path,
sol_code_path,
rpc_url=anvil_url,
)
assert res == True
def test_deploy_evm_with_private_key():
"""
Test deployment of the verifier smart contract using a custom private key
In order to run this you will need to install solc in your environment
"""
addr_path = os.path.join(folder_path, 'address.json')
sol_code_path = os.path.join(folder_path, 'test.sol')
anvil_default_private_key = "ac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80" |
res = ezkl.deploy_evm(
addr_path,
sol_code_path,
rpc_url=anvil_url,
private_key=anvil_default_private_key
)
assert res == True
custom_zero_balance_private_key = "ff9dfe0b6d31e93ba13460a4d6f63b5e31dd9532b1304f1cbccea7092a042aa4"
with pytest.raises(RuntimeError, match="Failed to run deploy_evm"):
res = ezkl.deploy_evm(
addr_path,
sol_code_path,
rpc_url=anvil_url,
private_key=custom_zero_balance_private_key
)
def test_verify_evm():
"""
Verifies an evm proof
In order to run this you will need to install solc in your environment
"""
proof_path = os.path.join(folder_path, 'test_evm.pf')
addr_path = os.path.join(folder_path, 'address.json')
with open(addr_path, 'r') as file:
addr = file.read().rstrip()
print(addr)
res = ezkl.verify_evm(
addr,
proof_path,
rpc_url=anvil_url,
)
assert res == True
def test_aggregate_and_verify_aggr():
data_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'network.onnx'
)
compiled_model_path = os.path.join(
folder_path,
'compiled_relu.onnx'
)
pk_path = os.path.join(folder_path, '1l_relu.pk')
vk_path = os.path.join(folder_path, '1l_relu.vk')
settings_path = os.path.join(
folder_path, '1l_relu_aggr_settings.json')
res = ezkl.gen_settings(model_path, settings_path)
assert res == True
res = ezkl.calibrate_settings(
data_path, model_path, settings_path, "resources")
assert res == True
assert os.path.isfile(settings_path)
res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)
assert res == True
ezkl.setup(
compiled_model_path,
vk_path,
pk_path,
srs_path=srs_path,
) |
proof_path = os.path.join(folder_path, '1l_relu.pf')
output_path = os.path.join(
folder_path,
'1l_relu_aggr_witness.json'
)
res = ezkl.gen_witness(data_path, compiled_model_path,
output_path)
ezkl.prove(
output_path,
compiled_model_path,
pk_path,
proof_path,
"for-aggr",
srs_path=srs_path,
)
res = ezkl.mock_aggregate([proof_path], 20)
assert res == True
aggregate_proof_path = os.path.join(folder_path, 'aggr_1l_relu.pf')
aggregate_vk_path = os.path.join(folder_path, 'aggr_1l_relu.vk')
aggregate_pk_path = os.path.join(folder_path, 'aggr_1l_relu.pk')
res = ezkl.setup_aggregate(
[proof_path],
aggregate_vk_path,
aggregate_pk_path,
20,
srs_path=params_k20_path,
)
res = ezkl.gen_vk_from_pk_aggr(aggregate_pk_path, aggregate_vk_path)
assert res == True
assert os.path.isfile(vk_path)
res = ezkl.aggregate(
[proof_path],
aggregate_proof_path,
aggregate_pk_path,
"poseidon",
20,
"unsafe",
srs_path=params_k20_path,
)
assert res == True
assert os.path.isfile(aggregate_proof_path)
assert os.path.isfile(aggregate_vk_path)
res = ezkl.verify_aggr(
aggregate_proof_path,
aggregate_vk_path,
20,
srs_path=params_k20_path,
)
assert res == True
def test_evm_aggregate_and_verify_aggr():
data_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'input.json'
)
model_path = os.path.join(
examples_path,
'onnx',
'1l_relu',
'network.onnx'
)
pk_path = os.path.join(folder_path, '1l_relu.pk')
vk_path = os.path.join(folder_path, '1l_relu.vk')
settings_path = os.path.join(
folder_path, '1l_relu_evm_aggr_settings.json')
ezkl.gen_settings(
model_path,
settings_path,
)
ezkl.calibrate_settings( |
data_path,
model_path,
settings_path,
"resources",
)
compiled_model_path = os.path.join(
folder_path,
'compiled_relu.onnx'
)
res = ezkl.compile_circuit(model_path, compiled_model_path, settings_path)
assert res == True
ezkl.setup(
compiled_model_path,
vk_path,
pk_path,
srs_path=srs_path,
)
proof_path = os.path.join(folder_path, '1l_relu.pf')
output_path = os.path.join(
folder_path,
'1l_relu_aggr_evm_witness.json'
)
res = ezkl.gen_witness(data_path, compiled_model_path,
output_path)
ezkl.prove(
output_path,
compiled_model_path,
pk_path,
proof_path,
"for-aggr",
srs_path=srs_path,
)
aggregate_proof_path = os.path.join(folder_path, 'aggr_evm_1l_relu.pf')
aggregate_vk_path = os.path.join(folder_path, 'aggr_evm_1l_relu.vk')
aggregate_pk_path = os.path.join(folder_path, 'aggr_evm_1l_relu.pk')
res = ezkl.setup_aggregate(
[proof_path],
aggregate_vk_path,
aggregate_pk_path,
20,
srs_path=params_k20_path,
)
res = ezkl.aggregate(
[proof_path],
aggregate_proof_path,
aggregate_pk_path,
"evm",
20,
"unsafe",
srs_path=params_k20_path,
)
assert res == True
assert os.path.isfile(aggregate_proof_path)
assert os.path.isfile(aggregate_vk_path)
sol_code_path = os.path.join(folder_path, 'aggr_evm_1l_relu.sol')
abi_path = os.path.join(folder_path, 'aggr_evm_1l_relu.abi')
res = ezkl.create_evm_verifier_aggr(
[settings_path],
aggregate_vk_path,
sol_code_path,
abi_path,
logrows=20,
srs_path=params_k20_path,
)
assert res == True
assert os.path.isfile(sol_code_path)
addr_path = os.path.join(folder_path, 'address_aggr.json')
res = ezkl.deploy_evm(
addr_path,
sol_code_path,
rpc |
_url=anvil_url,
)
res = ezkl.verify_aggr(
aggregate_proof_path,
aggregate_vk_path,
20,
srs_path=params_k20_path,
)
assert res == True
def get_examples():
EXAMPLES_OMIT = [
'mobilenet_large',
'mobilenet',
'doodles',
'nanoGPT',
"self_attention",
'multihead_attention',
'large_op_graph',
'1l_instance_norm',
'variable_cnn',
'accuracy',
'linear_regression',
"mnist_gan",
]
examples = []
for subdir, _, _ in os.walk(os.path.join(examples_path, "onnx")):
name = subdir.split('/')[-1]
if name in EXAMPLES_OMIT or name == "onnx":
continue
else:
examples.append((
os.path.join(subdir, "network.onnx"),
os.path.join(subdir, "input.json"),
))
return examples
@pytest.mark.parametrize("model_file, input_file", get_examples())
def test_all_examples(model_file, input_file):
"""Tests all examples in the examples folder"""
settings_path = os.path.join(folder_path, "settings.json")
compiled_model_path = os.path.join(folder_path, 'network.ezkl')
pk_path = os.path.join(folder_path, 'test.pk')
vk_path = os.path.join(folder_path, 'test.vk')
witness_path = os.path.join(folder_path, 'witness.json')
proof_path = os.path.join(folder_path, 'proof.json')
print("Testing example: ", model_file)
res = ezkl.gen_settings(model_file, settings_path)
assert res
res = ezkl.calibrate_settings(
input_file, model_file, settings_path, "resources")
assert res
print("Compiling example: ", model_file)
res = ezkl.compile_circuit(model_file, compiled_model_path, settings_path)
assert res
with open(settings_path, 'r') as f:
data = json.load(f)
logrows = data["run_args"]["logrows"]
srs_path = os.path.join(folder_path, f"srs_{logrows}")
if not os.path |
.exists(srs_path):
print("Generating srs file: ", srs_path)
ezkl.gen_srs(os.path.join(folder_path, srs_path), logrows)
print("Setting up example: ", model_file)
res = ezkl.setup(
compiled_model_path,
vk_path,
pk_path,
srs_path
)
assert res == True
assert os.path.isfile(vk_path)
assert os.path.isfile(pk_path)
print("Generating witness for example: ", model_file)
res = ezkl.gen_witness(input_file, compiled_model_path, witness_path)
assert os.path.isfile(witness_path)
print("Proving example: ", model_file)
ezkl.prove(
witness_path,
compiled_model_path,
pk_path,
proof_path,
"single",
srs_path=srs_path,
)
assert os.path.isfile(proof_path)
print("Verifying example: ", model_file)
res = ezkl.verify(
proof_path,
settings_path,
vk_path,
srs_path=srs_path,
)
assert res == True |
"""
This is meant to be used locally for development.
Generating the SRS is costly so we run this instead of creating a new SRS each
time we run tests.
"""
import ezkl
import os
srs_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'.',
'kzg_test.params',
)
)
def gen_test_srs(logrows=17):
"""Generates a test srs with 17 log rows"""
ezkl.gen_srs(srs_path, logrows)
def delete_test_srs():
"""Deletes test srs after tests are done"""
os.remove(srs_path)
if __name__ == "__main__":
# gen_test_srs()
path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..',
'examples',
'onnx',
'1l_average',
'network.onnx'
)
)
print(ezkl.table(path))
|
mod wasm32 {
use ezkl::circuit::modules::polycommit::PolyCommitChip;
use ezkl::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use ezkl::circuit::modules::poseidon::PoseidonChip;
use ezkl::circuit::modules::Module;
use ezkl::graph::modules::POSEIDON_LEN_GRAPH;
use ezkl::graph::GraphCircuit;
use ezkl::graph::{GraphSettings, GraphWitness};
use ezkl::pfsys;
use ezkl::wasm::{
bufferToVecOfFelt, compiledCircuitValidation, encodeVerifierCalldata, feltToBigEndian,
feltToFloat, feltToInt, feltToLittleEndian, genPk, genVk, genWitness, inputValidation,
kzgCommit, pkValidation, poseidonHash, proofValidation, prove, settingsValidation,
srsValidation, u8_array_to_u128_le, verify, verifyAggr, vkValidation, witnessValidation,
};
use halo2_proofs::plonk::VerifyingKey;
use halo2_proofs::poly::commitment::CommitmentScheme;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::kzg::commitment::ParamsKZG;
use halo2_solidity_verifier::encode_calldata;
use halo2curves::bn256::Bn256;
use halo2curves::bn256::{Fr, G1Affine};
use snark_verifier::util::arithmetic::PrimeField;
use wasm_bindgen::JsError;
pub use wasm_bindgen_rayon::init_thread_pool;
use wasm_bindgen_test::*;
wasm_bindgen_test::wasm_bindgen_test_configure!(run_in_browser);
pub const WITNESS: &[u8] = include_bytes!("../tests/wasm/witness.json");
pub const NETWORK_COMPILED: &[u8] = include_bytes!("../tests/wasm/model.compiled");
pub const NETWORK: &[u8] = include_bytes!("../tests/wasm/network.onnx");
pub const INPUT: &[u8] = include_bytes!("../tests/wasm/input.json");
pub const PROOF: &[u8] = include_bytes!("../tests/wasm/proof.json");
pub const PROOF_AGGR: &[u8] = include_bytes!("../tests/wasm/proof_aggr.json");
pub const SETTINGS: &[u8] = include_bytes!("../tests/wasm/settings.json");
pub const PK: &[u8] = include_bytes!("../tests/wasm/pk.key");
pub co |
nst VK: &[u8] = include_bytes!("../tests/wasm/vk.key");
pub const VK_AGGR: &[u8] = include_bytes!("../tests/wasm/vk_aggr.key");
pub const SRS: &[u8] = include_bytes!("../tests/wasm/kzg");
pub const SRS1: &[u8] = include_bytes!("../tests/wasm/kzg1.srs");
async |
fn can_verify_aggr() {
let value = verifyAggr(
wasm_bindgen::Clamped(PROOF_AGGR.to_vec()),
wasm_bindgen::Clamped(VK_AGGR.to_vec()),
21,
wasm_bindgen::Clamped(SRS1.to_vec()),
"kzg",
)
.map_err(|_| "failed")
.unwrap();
assert!(value);
}
async |
fn verify_encode_verifier_calldata() {
let ser_proof = wasm_bindgen::Clamped(PROOF.to_vec());
let calldata = encodeVerifierCalldata(ser_proof.clone(), None)
.map_err(|_| "failed")
.unwrap();
let snark: pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&PROOF).unwrap();
let flattened_instances = snark.instances.into_iter().flatten();
let reference_calldata = encode_calldata(
None,
&snark.proof,
&flattened_instances.clone().collect::<Vec<_>>(),
);
assert_eq!(calldata, reference_calldata);
let vk_address = hex::decode("0000000000000000000000000000000000000000").unwrap();
let vk_address: [u8; 20] = {
let mut array = [0u8; 20];
array.copy_from_slice(&vk_address);
array
};
let serialized = serde_json::to_vec(&vk_address).unwrap();
let calldata = encodeVerifierCalldata(ser_proof, Some(serialized))
.map_err(|_| "failed")
.unwrap();
let reference_calldata = encode_calldata(
Some(vk_address),
&snark.proof,
&flattened_instances.collect::<Vec<_>>(),
);
assert_eq!(calldata, reference_calldata);
} |
fn verify_kzg_commit() {
let mut message: Vec<Fr> = vec![];
for i in 0..32 {
message.push(Fr::from(i as u64));
}
let message_ser = serde_json::to_vec(&message).unwrap();
let settings: GraphSettings = serde_json::from_slice(&SETTINGS).unwrap();
let mut reader = std::io::BufReader::new(SRS);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader).unwrap();
let mut reader = std::io::BufReader::new(VK);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
settings.clone(),
)
.unwrap();
let commitment_ser = kzgCommit(
wasm_bindgen::Clamped(message_ser),
wasm_bindgen::Clamped(VK.to_vec()),
wasm_bindgen::Clamped(SETTINGS.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
let commitment: Vec<halo2curves::bn256::G1Affine> =
serde_json::from_slice(&commitment_ser[..]).unwrap();
let reference_commitment = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
¶ms,
);
assert_eq!(commitment, reference_commitment);
}
async |
fn verify_field_serialization_roundtrip() {
for i in 0..32 {
let field_element = Fr::from(i);
let serialized = serde_json::to_vec(&field_element).unwrap();
let clamped = wasm_bindgen::Clamped(serialized);
let scale = 2;
let floating_point = feltToFloat(clamped.clone(), scale)
.map_err(|_| "failed")
.unwrap();
assert_eq!(floating_point, (i as f64) / 4.0);
let integer: i128 =
serde_json::from_slice(&feltToInt(clamped.clone()).map_err(|_| "failed").unwrap())
.unwrap();
assert_eq!(integer, i as i128);
let hex_string = format!("{:?}", field_element.clone());
let returned_string: String = feltToBigEndian(clamped.clone())
.map_err(|_| "failed")
.unwrap();
assert_eq!(hex_string, returned_string);
let repr = serde_json::to_string(&field_element).unwrap();
let little_endian_string: String = serde_json::from_str(&repr).unwrap();
let returned_string: String =
feltToLittleEndian(clamped).map_err(|_| "failed").unwrap();
assert_eq!(little_endian_string, returned_string);
}
}
async |
fn verify_buffer_to_field_elements() {
let string_high = String::from("high");
let mut buffer = string_high.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
buffer.resize(16, 0);
let reference_int = u8_array_to_u128_le(buffer.try_into().unwrap());
let reference_field_element_high = PrimeField::from_u128(reference_int);
assert_eq!(field_elements[0], reference_field_element_high);
let string_sample = String::from("a sample string!");
let buffer = string_sample.clone().into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
let reference_int = u8_array_to_u128_le(buffer.try_into().unwrap());
let reference_field_element_sample = PrimeField::from_u128(reference_int);
assert_eq!(field_elements[0], reference_field_element_sample);
let string_concat = string_sample + &string_high;
let buffer = string_concat.into_bytes();
let clamped = wasm_bindgen::Clamped(buffer.clone());
let field_elements_ser = bufferToVecOfFelt(clamped).map_err(|_| "failed").unwrap();
let field_elements: Vec<Fr> = serde_json::from_slice(&field_elements_ser[..]).unwrap();
assert_eq!(field_elements[0], reference_field_element_sample);
assert_eq!(field_elements[1], reference_field_element_high);
}
async |
fn verify_hash() {
let mut message: Vec<Fr> = vec![];
for i in 0..32 {
message.push(Fr::from(i as u64));
}
let message_ser = serde_json::to_vec(&message).unwrap();
let hash = poseidonHash(wasm_bindgen::Clamped(message_ser))
.map_err(|_| "failed")
.unwrap();
let hash: Vec<Vec<Fr>> = serde_json::from_slice(&hash[..]).unwrap();
let reference_hash =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|_| "failed")
.unwrap();
assert_eq!(hash, reference_hash)
}
async |
fn verify_gen_witness() {
let witness = genWitness(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(INPUT.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
let witness: GraphWitness = serde_json::from_slice(&witness[..]).unwrap();
let reference_witness: GraphWitness = serde_json::from_slice(&WITNESS).unwrap();
assert_eq!(witness, reference_witness);
}
async |
fn gen_pk_test() {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();
let pk = genPk(
wasm_bindgen::Clamped(vk),
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
assert!(pk.len() > 0);
}
async |
fn gen_vk_test() {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();
assert!(vk.len() > 0);
}
async |
fn pk_is_valid_test() {
let vk = genVk(
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
true,
)
.map_err(|_| "failed")
.unwrap();
let pk = genPk(
wasm_bindgen::Clamped(vk.clone()),
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
let proof = prove(
wasm_bindgen::Clamped(WITNESS.to_vec()),
wasm_bindgen::Clamped(pk.clone()),
wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
assert!(proof.len() > 0);
let value = verify(
wasm_bindgen::Clamped(proof.to_vec()),
wasm_bindgen::Clamped(vk),
wasm_bindgen::Clamped(SETTINGS.to_vec()),
wasm_bindgen::Clamped(SRS.to_vec()),
)
.map_err(|_| "failed")
.unwrap();
assert!(value);
}
async |
fn verify_validations() {
let witness = witnessValidation(wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()));
assert!(witness.is_err());
let witness = witnessValidation(wasm_bindgen::Clamped(WITNESS.to_vec()));
assert!(witness.is_ok());
let circuit = compiledCircuitValidation(wasm_bindgen::Clamped(NETWORK.to_vec()));
assert!(circuit.is_err());
let circuit = compiledCircuitValidation(wasm_bindgen::Clamped(NETWORK_COMPILED.to_vec()));
assert!(circuit.is_ok());
let input = inputValidation(wasm_bindgen::Clamped(WITNESS.to_vec()));
assert!(input.is_err());
let input = inputValidation(wasm_bindgen::Clamped(INPUT.to_vec()));
assert!(input.is_ok());
let proof = proofValidation(wasm_bindgen::Clamped(WITNESS.to_vec()));
assert!(proof.is_err());
let proof = proofValidation(wasm_bindgen::Clamped(PROOF.to_vec()));
assert!(proof.is_ok());
let vk = vkValidation(
wasm_bindgen::Clamped(VK.to_vec()),
wasm_bindgen::Clamped(SETTINGS.to_vec()),
);
assert!(vk.is_ok());
let pk = pkValidation(
wasm_bindgen::Clamped(PK.to_vec()),
wasm_bindgen::Clamped(SETTINGS.to_vec()),
);
assert!(pk.is_ok());
let settings = settingsValidation(wasm_bindgen::Clamped(PROOF.to_vec()));
assert!(settings.is_err());
let settings = settingsValidation(wasm_bindgen::Clamped(SETTINGS.to_vec()));
assert!(settings.is_ok());
let srs = srsValidation(wasm_bindgen::Clamped(SRS.to_vec()));
assert!(srs.is_ok());
}
} |
from __future__ |
import annotations |
import typing |
import os
from os |
import path |
import json
from dataclasses |
import dataclass |
import re |
import numpy as np |
class SafeDict(dict):
def __missing__(self, key):
return '{' + key + '}'
circom_template_string = '''pragma circom 2.0.0;
{include}
template Model() {brace_left}
{signal}
{component}
{main}
{brace_right}
component main = Model();
'''
templates: typing.Dict[str, Template] = {
}
def parse_shape(shape: typing.List[int]) -> str:
'''parse shape to integers enclosed by []'''
shape_str = ''
for dim in shape:
shape_str += '[{}]'.format(dim)
return shape_str
def parse_index(shape: typing.List[int]) -> str:
'''parse shape to indices enclosed by []'''
index_str = ''
for i in range(len(shape)):
index_str += '[i{}]'.format(i)
return index_str
@dataclass
class Template:
op_name: str
fpath: str
args: typing.Dict[str]
input_names: typing.List[str] = None
input_dims: typing.List[int] = None
output_names: typing.List[str] = None
output_dims: typing.List[int] = None
def __str__(self) -> str:
args_str = ', '.join(self.args)
args_str = '(' + args_str + ')'
return '{:>20}{:30} {}{}{}{} \t<-- {}'.format(
self.op_name, args_str,
self.input_names, self.input_dims,
self.output_names, self.output_dims,
self.fpath)
def file_parse(fpath):
'''parse circom file and register templates'''
with open(fpath, 'r') as f:
lines = f.read().split('\n')
lines = [l for l in lines if not l.strip().startswith('
lines = ' '.join(lines)
lines = re.sub('/\*.*?\*/', 'IGN', lines)
funcs = re.findall('template (\w+) ?\((.*?)\) ?\{(.*?)\}', lines)
for func in funcs:
op_name = func[0].strip()
args = func[1].split(',')
main = func[2].strip()
assert op_name not in templates, \
'duplicated template: {} in {} vs. {}'.format(
op_name, templates[op_name].fpath, fpath)
signals = re.findall('signal (\w+) (\w+)(.*?);', main)
infos = [[] for i in range(4)]
for sig |
in signals:
sig_types = ['input', 'output']
assert sig[0] in sig_types, sig[1] + ' | ' + main
idx = sig_types.index(sig[0])
infos[idx*2+0].append(sig[1])
sig_dim = sig[2].count('[')
infos[idx*2+1].append(sig_dim)
templates[op_name] = Template(
op_name, fpath,
[a.strip() for a in args],
*infos)
def dir_parse(dir_path, skips=[]):
'''parse circom files in a directory'''
names = os.listdir(dir_path)
for name in names:
if name in skips:
continue
fpath = path.join(dir_path, name)
if os.path.isdir(fpath):
dir_parse(fpath)
elif os.path.isfile(fpath):
if fpath.endswith('.circom'):
file_parse(fpath)
@dataclass
class Signal:
name: str
shape: typing.List[int]
value: typing.Any = None
def inject_signal(self, comp_name: str) -> str:
'''inject signal into the beginning of the circuit'''
if self.value is not None or self.name == 'out' or self.name == 'remainder':
return 'signal input {}_{}{};\n'.format(
comp_name, self.name, parse_shape(self.shape))
return ''
def inject_main(self, comp_name: str, prev_comp_name: str = None, prev_signal: Signal = None) -> str:
'''inject signal into main'''
inject_str = ''
if self.value is not None or self.name == 'out' or self.name == 'remainder':
if comp_name.endswith('softmax') and self.name == 'out':
inject_str += '{}.out <== {}_out[0];\n'.format(
comp_name, comp_name)
return inject_str
for i in range(len(self.shape)):
inject_str += '{}for (var i{} = 0; i{} < {}; i{}++) {{\n'.format(
' '*i*4, i, i, self.shape[i], i)
if 'activation' in comp_name or 're_lu' in comp_name:
inject_str += '{}{}{}.{} <== {}_{}{};\n'.f |
ormat(' '*(i+1)*4,
comp_name, parse_index(self.shape), self.name,
comp_name, self.name, parse_index(self.shape))
else:
inject_str += '{}{}.{}{} <== {}_{}{};\n'.format(' '*(i+1)*4,
comp_name, self.name, parse_index(self.shape),
comp_name, self.name, parse_index(self.shape))
inject_str += '}'*len(self.shape)+'\n'
return inject_str
if self.shape != prev_signal.shape:
raise ValueError('shape mismatch: {} vs. {}'.format(self.shape, prev_signal.shape))
for i in range(len(self.shape)):
inject_str += '{}for (var i{} = 0; i{} < {}; i{}++) {{\n'.format(
' '*i*4, i, i, self.shape[i], i)
if 'activation' in comp_name or 're_lu' in comp_name:
inject_str += '{}{}{}.{} <== {}.{}{};\n'.format(' '*(i+1)*4,
comp_name, parse_index(self.shape), self.name,
prev_comp_name, prev_signal.name, parse_index(self.shape))
elif 'activation' in prev_comp_name or 're_lu' in prev_comp_name:
inject_str += '{}{}.{}{} <== {}{}.{};\n'.format(' '*(i+1)*4,
comp_name, self.name, parse_index(self.shape),
prev_comp_name, parse_index(self.shape), prev_signal.name)
else:
inject_str += '{}{}.{}{} <== {}.{}{};\n'.format(' '*(i+1)*4,
comp_name, self.name, parse_index(self.shape),
prev_comp_name, prev_signal.name, parse_index(self.shape))
inject_str += '}'*len(self.shape)+'\n'
return inject_str
def inject_input_signal(self) -> str:
'''inject the circuit input signal'''
if self.value is not None:
raise ValueError('input signal should not have value')
return 'signal input in{};\n'.format(parse_shape(self.shape))
def inject_output_signal |
(self) -> str:
'''inject the circuit output signal'''
if self.value is not None:
raise ValueError('output signal should not have value')
return 'signal output out{};\n'.format(parse_shape(self.shape))
def inject_input_main(self, comp_name: str) -> str:
'''inject the circuit input signal into main'''
if self.value is not None:
raise ValueError('input signal should not have value')
inject_str = ''
for i in range(len(self.shape)):
inject_str += '{}for (var i{} = 0; i{} < {}; i{}++) {{\n'.format(
' '*i*4, i, i, self.shape[i], i)
inject_str += '{}{}.{}{} <== in{};\n'.format(' '*(i+1)*4,
comp_name, self.name, parse_index(self.shape),
parse_index(self.shape))
inject_str += '}'*len(self.shape)+'\n'
return inject_str
def inject_output_main(self, prev_comp_name: str, prev_signal: Signal) -> str:
'''inject the circuit output signal into main'''
if self.value is not None:
raise ValueError('output signal should not have value')
if self.shape != prev_signal.shape:
raise ValueError('shape mismatch: {} vs. {}'.format(self.shape, prev_signal.shape))
if 'softmax' in prev_comp_name:
return 'out[0] <== {}.out;\n'.format(prev_comp_name)
inject_str = ''
for i in range(len(self.shape)):
inject_str += '{}for (var i{} = 0; i{} < {}; i{}++) {{\n'.format(
' '*i*4, i, i, self.shape[i], i)
if 're_lu' in prev_comp_name:
inject_str += '{}out{} <== {}{}.{};\n'.format(' '*(i+1)*4,
parse_index(self.shape),
prev_comp_name, parse_index(self.shape), prev_signal.name)
else:
inject_str += '{}out{} <== {}.{}{};\n'.format(' '*(i+1)*4,
parse_index(self.shape),
prev_comp_na |
me, prev_signal.name, parse_index(self.shape))
inject_str += '}'*len(self.shape)+'\n'
return inject_str
@dataclass
class Component:
name: str
template: Template
inputs: typing.List[Signal]
outputs: typing.List[Signal]
args: typing.Dict[str, typing.Any] = None
def inject_include(self) -> str:
''' |
include the component template'''
return ' |
include "../{}";\n'.format(self.template.fpath)
def inject_signal(self, prev_comp: Component = None, last_comp: bool = False) -> str:
'''inject the component signals'''
inject_str = ''
for signal in self.inputs:
if signal.name == 'out' or signal.name == 'remainder':
inject_str += signal.inject_signal(self.name)
if last_comp is True and signal.name == 'out':
inject_str += signal.inject_output_signal()
elif signal.value is None and prev_comp is None:
inject_str += signal.inject_input_signal()
elif signal.value is not None:
inject_str += signal.inject_signal(self.name)
return inject_str
def inject_component(self) -> str:
'''inject the component declaration'''
if self.template.op_name == 'ReLU':
for signal in self.inputs:
if signal.name == 'out':
output_signal = signal
break
inject_str = 'component {}{};\n'.format(self.name, parse_shape(output_signal.shape))
for i in range(len(output_signal.shape)):
inject_str += '{}for (var i{} = 0; i{} < {}; i{}++) {{\n'.format(
' '*i*4, i, i, output_signal.shape[i], i)
inject_str += '{}{}{} = ReLU();\n'.format(' '*(i+1)*4,
self.name, parse_index(output_signal.shape))
inject_str += '}'*len(output_signal.shape)+'\n'
return inject_str
return 'component {} = {}({});\n'.format(
self.name, self.template.op_name, self.parse_args(self.template.args, self.args))
def inject_main(self, prev_comp: Component = None, last_comp: bool = False) -> str:
'''inject the component main'''
inject_str = ''
for signal in self.inputs:
if signal.value is not None or signal.name == 'out' or signal.name == 'remainder':
inject_str += signal.inject_main(se |
lf.name)
elif prev_comp is None:
inject_str += signal.inject_input_main(self.name)
else:
for sig in prev_comp.inputs:
if sig.name == 'out':
output_signal = sig
break
if output_signal is None:
output_signal = prev_comp.outputs[0]
inject_str += signal.inject_main(self.name, prev_comp.name, output_signal)
print
if last_comp:
for signal in self.inputs:
if signal.name == 'out':
inject_str += signal.inject_output_main(self.name, signal)
break
for signal in self.outputs:
inject_str += signal.inject_output_main(self.name, signal)
return inject_str
def to_json(self, dec: int) -> typing.Dict[str, typing.Any]:
'''convert the component params to json format'''
json_dict = {}
for signal in self.inputs:
if signal.value is not None:
if signal.name == 'bias' or signal.name == 'b':
scaling = float(10**(2*dec))
else:
scaling = float(10**dec)
value = [str(int(v*scaling)) for v in signal.value.flatten().tolist()]
if len(signal.shape) > 1:
value = np.array(value).reshape(signal.shape).tolist()
json_dict.update({f'{self.name}_{signal.name}': value})
return json_dict
@staticmethod
def parse_args(template_args: typing.List[str], args: typing.Dict[str, typing.Any]) -> str:
'''parse the args to a format string, ready to be injected'''
args_str = '{'+'}, {'.join(template_args)+'}'
return args_str.format(**args)
@dataclass
class Circuit:
components: typing.List[Component]
def __init__(self):
self.components = []
def add_component(self, component: Component):
self.components |
.append(component)
def add_components(self, components: typing.List[Component]):
self.components.extend(components)
def inject_include(self) -> str:
'''inject the |
include statements'''
inject_str = []
for component in self.components:
inject_str.append(component.inject_include())
return ''.join(set(inject_str))
def inject_signal(self) -> str:
'''inject the signal declarations'''
inject_str = self.components[0].inject_signal()
for i in range(1, len(self.components)):
inject_str += self.components[i].inject_signal(self.components[i-1], i==len(self.components)-1)
return inject_str
def inject_component(self) -> str:
'''inject the component declarations'''
inject_str = ''
for component in self.components:
inject_str += component.inject_component()
return inject_str
def inject_main(self) -> str:
'''inject the main template'''
inject_str = self.components[0].inject_main()
for i in range(1, len(self.components)):
inject_str += self.components[i].inject_main(self.components[i-1], i==len(self.components)-1)
return inject_str
def to_circom(self) -> str:
'''convert the circuit to a circom file'''
return circom_template_string.format(**{
'include': self.inject_include(),
'brace_left': '{',
'signal': self.inject_signal(),
'component': self.inject_component(),
'main': self.inject_main(),
'brace_right': '}',
})
def to_json(self, dec: int) -> str:
'''convert the model weights to json format'''
json_dict = {}
for component in self.components:
json_dict.update(component.to_json(dec))
return json.dumps(json_dict) |
# Read keras model into list of parameters like op, input, output, weight, bias
from __future__ import annotations
from dataclasses import dataclass
import typing
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Layer as KerasLayer
import numpy as np
supported_ops = [
'Activation',
'AveragePooling2D',
'BatchNormalization',
'Conv2D',
'Dense',
'Flatten',
'GlobalAveragePooling2D',
'GlobalMaxPooling2D',
'MaxPooling2D',
'ReLU',
'Softmax',
]
skip_ops = [
'Dropout',
'InputLayer',
]
# read each layer in a model and convert it to a class called Layer
@dataclass
class Layer:
''' A single layer in a Keras model. '''
op: str
name: str
input: typing.List[int]
output: typing.List[int]
config: typing.Dict[str, typing.Any]
weights: typing.List[np.ndarray]
def __init__(self, layer: KerasLayer):
self.op = layer.__class__.__name__
self.name = layer.name
self.input = layer.input_shape[1:]
self.output = layer.output_shape[1:]
self.config = layer.get_config()
self.weights = layer.get_weights()
class Model:
layers: typing.List[Layer]
def __init__(self, filename: str, raw: bool = False):
''' Load a Keras model from a file. '''
model = load_model(filename)
self.layers = [Layer(layer) for layer in model.layers if self._for_transpilation(layer.__class__.__name__)]
@staticmethod
def _for_transpilation(op: str) -> bool:
if op in skip_ops:
return False
if op in supported_ops:
return True
raise NotImplementedError(f'Unsupported op: {op}') |
from .circom |
import Circuit, Component
python_template_string = '''""" Make an interger-only circuit of the corresponding CIRCOM circuit.
Usage:
circuit.py <circuit.json> <input.json> [-o <output>]
circuit.py (-h | --help)
Options:
-h --help Show this screen.
-o <output> --output=<output> Output directory [default: output].
"""
from docopt |
import docopt |
import json
try:
from keras2circom.util |
import *
except: |
import sys |
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from keras2circom.util |
import *
def inference(input, circuit):
out = input['in']
output = {brackets}
{components}
return out, output
def main():
""" Main entry point of the app """
args = docopt(__doc__)
with open(args['<input.json>']) as f:
input = json.load(f)
with open(args['<circuit.json>']) as f:
circuit = json.load(f)
out, output = inference(input, circuit)
with open(args['--output'] + '/output.json', 'w') as f:
json.dump(output, f)
if __name__ == "__main__":
""" This is executed when run from the command line """
main()
'''
def to_py(circuit: Circuit, dec: int) -> str:
comp_str = ""
for component in circuit.components:
comp_str += transpile_component(component, dec)
return python_template_string.format(
brackets="{}",
components=comp_str,
)
def transpile_component(component: Component, dec: int) -> str:
comp_str = ""
if component.template.op_name == "AveragePooling2D":
comp_str += " out, remainder = AveragePooling2DInt({nRows}, {nCols}, {nChannels}, {poolSize}, {strides}, {input})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
poolSize=component.args["poolSize"],
strides=component.args["strides"],
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "BatchNormalization2D":
comp_str += " out, remainder = BatchNormalizationInt({nRows}, {nCols}, {nChannels}, {n}, {input}, {a}, {b})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
n=component.args["n"], |
input="out",
a="circuit['{name}_a']".format(name=component.name),
b="circuit['{name}_b']".format(name=component.name),
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "Conv1D":
comp_str += " out, remainder = Conv1DInt({nInputs}, {nChannels}, {nFilters}, {kernelSize}, {strides}, {n}, {input}, {weights}, {bias})\n".format(
nInputs=component.args["nInputs"],
nChannels=component.args["nChannels"],
nFilters=component.args["nFilters"],
kernelSize=component.args["kernelSize"],
strides=component.args["strides"],
n=component.args["n"],
input="out",
weights="circuit['{name}_weights']".format(name=component.name),
bias="circuit['{name}_bias']".format(name=component.name),
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "Conv2D":
comp_str += " out, remainder = Conv2DInt({nRows}, {nCols}, {nChannels}, {nFilters}, {kernelSize}, {strides}, {n}, {input}, {weights}, {bias})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
nFilters=component.args["nFilters"],
kernelSize=component.args["kernelSize"],
strides=component.args["strides"],
n=component.args["n"],
input="out",
weights="circuit['{name}_weights']".format(name=component.name),
bias="circuit['{name}_bias']".format(name=compo |
nent.name),
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "Dense":
comp_str += " out, remainder = DenseInt({nInputs}, {nOutputs}, {n}, {input}, {weights}, {bias})\n".format(
nInputs=component.args["nInputs"],
nOutputs=component.args["nOutputs"],
n=component.args["n"],
input="out",
weights="circuit['{name}_weights']".format(name=component.name),
bias="circuit['{name}_bias']".format(name=component.name),
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "GlobalAveragePooling2D":
comp_str += " out, remainder = GlobalAveragePooling2DInt({nRows}, {nCols}, {nChannels}, {input})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
comp_str += " output['{name}_remainder'] = remainder\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "GlobalMaxPooling2D":
comp_str += " out = GlobalMaxPooling2DInt({nRows}, {nCols}, {nChannels}, {input})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=c |
omponent.name,
)
return comp_str+"\n"
elif component.template.op_name == "MaxPooling2D":
comp_str += " out = MaxPooling2DInt({nRows}, {nCols}, {nChannels}, {poolSize}, {strides}, {input})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
poolSize=component.args["poolSize"],
strides=component.args["strides"],
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "Flatten2D":
comp_str += " out = Flatten2DInt({nRows}, {nCols}, {nChannels}, {input})\n".format(
nRows=component.args["nRows"],
nCols=component.args["nCols"],
nChannels=component.args["nChannels"],
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "ReLU":
nRows, nCols, nChannels = component.inputs[0].shape
comp_str += " out = ReLUInt({nRows}, {nCols}, {nChannels}, {input})\n".format(
nRows=nRows,
nCols=nCols,
nChannels=nChannels,
input="out"
)
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
return comp_str+"\n"
elif component.template.op_name == "ArgMax":
comp_str += " out = ArgMaxInt(out)\n"
comp_str += " output['{name}_out'] = out\n".format(
name=component.name,
)
return comp_str+"\n"
else:
raise ValueError("Unknown component type: {}".format(component.template.op_name)) |
from .circom |
import *
from .model |
import *
from .script |
import * |
import os
def transpile(filename: str, output_dir: str = 'output', raw: bool = False, dec: int = 18) -> Circuit:
''' Transpile a Keras model to a CIRCOM circuit.'''
model = Model(filename, raw)
circuit = Circuit()
for layer in model.layers[:-1]:
circuit.add_components(transpile_layer(layer, dec))
circuit.add_components(transpile_layer(model.layers[-1], dec, True))
if raw:
if circuit.components[-1].template.op_name == 'ArgMax':
circuit.components.pop()
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_dir + '/circuit.circom', 'w') as f:
f.write(circuit.to_circom())
with open(output_dir + '/circuit.json', 'w') as f:
f.write(circuit.to_json(int(dec)))
with open(output_dir + '/circuit.py', 'w') as f:
f.write(to_py(circuit, int(dec)))
return circuit
def transpile_layer(layer: Layer, dec: int = 18, last: bool = False) -> typing.List[Component]:
''' Transpile a Keras layer to CIRCOM component(s).'''
if layer.op == 'Activation':
if layer.config['activation'] == 'softmax':
if last:
return transpile_ArgMax(layer)
raise ValueError('Softmax must be the last layer')
if layer.config['activation'] == 'relu':
return transpile_ReLU(layer)
if layer.config['activation'] == 'linear':
return []
raise NotImplementedError(f'Activation {layer.config["activation"]} not implemented')
if layer.op == 'Softmax':
if last:
return transpile_ArgMax(layer)
raise ValueError('Softmax must be the last layer')
if layer.op == 'ReLU':
return transpile_ReLU(layer)
if layer.op == 'AveragePooling2D':
return transpile_AveragePooling2D(layer)
if layer.op == 'BatchNormalization':
return transpile_BatchNormalization2D(layer, dec)
if layer.op == 'Conv2D':
return transpile_Conv2D(layer, dec) |
if layer.op == 'Dense':
return transpile_Dense(layer, dec, last)
if layer.op == 'Flatten':
return transpile_Flatten2D(layer)
if layer.op == 'GlobalAveragePooling2D':
return transpile_GlobalAveragePooling2D(layer)
if layer.op == 'GlobalMaxPooling2D':
return transpile_GlobalMaxPooling2D(layer)
if layer.op == 'MaxPooling2D':
return transpile_MaxPooling2D(layer)
raise NotImplementedError(f'Layer {layer.op} is not supported yet.')
def transpile_ArgMax(layer: Layer) -> typing.List[Component]:
return [Component(layer.name, templates['ArgMax'], [Signal('in', layer.output), Signal('out', (1,))], [], {'n': layer.output[0]})]
def transpile_ReLU(layer: Layer) -> typing.List[Component]:
return [Component(layer.name, templates['ReLU'], [Signal('in', layer.output), Signal('out', layer.output)], [])]
def transpile_AveragePooling2D(layer: Layer) -> typing.List[Component]:
if layer.config['data_format'] != 'channels_last':
raise NotImplementedError('Only data_format="channels_last" is supported')
if layer.config['padding'] != 'valid':
raise NotImplementedError('Only padding="valid" is supported')
if layer.config['pool_size'][0] != layer.config['pool_size'][1]:
raise NotImplementedError('Only pool_size[0] == pool_size[1] is supported')
if layer.config['strides'][0] != layer.config['strides'][1]:
raise NotImplementedError('Only strides[0] == strides[1] is supported')
return [Component(layer.name, templates['AveragePooling2D'], [Signal('in', layer.input), Signal('out', layer.output), Signal('remainder', layer.output)],[],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
'poolSize': layer.config['pool_size'][0],
'strides': layer.config['strides'][0],
})]
def transpile_BatchNormalization2D(layer: Layer, dec: int) -> typing.List[Component]:
if layer.input.__len__() != 3:
raise NotImplement |
edError('Only 2D inputs are supported')
if layer.config['axis'][0] != 3:
raise NotImplementedError('Only axis=3 is supported')
if layer.config['center'] != True:
raise NotImplementedError('Only center=True is supported')
if layer.config['scale'] != True:
raise NotImplementedError('Only scale=True is supported')
gamma = layer.weights[0]
beta = layer.weights[1]
moving_mean = layer.weights[2]
moving_var = layer.weights[3]
epsilon = layer.config['epsilon']
a = gamma/(moving_var+epsilon)**.5
b = beta-gamma*moving_mean/(moving_var+epsilon)**.5
return [Component(layer.name, templates['BatchNormalization2D'], [
Signal('in', layer.input),
Signal('a', a.shape, a),
Signal('b', b.shape, b),
Signal('out', layer.output),
Signal('remainder', layer.output),
],[],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
'n': '10**'+dec,
})]
def transpile_Conv2D(layer: Layer, dec: int) -> typing.List[Component]:
if layer.config['data_format'] != 'channels_last':
raise NotImplementedError('Only data_format="channels_last" is supported')
if layer.config['padding'] != 'valid':
raise NotImplementedError('Only padding="valid" is supported')
if layer.config['strides'][0] != layer.config['strides'][1]:
raise NotImplementedError('Only strides[0] == strides[1] is supported')
if layer.config['kernel_size'][0] != layer.config['kernel_size'][1]:
raise NotImplementedError('Only kernel_size[0] == kernel_size[1] is supported')
if layer.config['dilation_rate'][0] != 1:
raise NotImplementedError('Only dilation_rate[0] == 1 is supported')
if layer.config['dilation_rate'][1] != 1:
raise NotImplementedError('Only dilation_rate[1] == 1 is supported')
if layer.config['groups'] != 1:
raise NotImplementedError('Only groups == 1 is supported')
if layer.config['activation'] not in [ |
'linear', 'relu']:
raise NotImplementedError(f'Activation {layer.config["activation"]} is not supported')
if layer.config['use_bias'] == False:
layer.weights.append(np.zeros(layer.weights[0].shape[-1]))
conv = Component(layer.name, templates['Conv2D'], [
Signal('in', layer.input),
Signal('weights', layer.weights[0].shape, layer.weights[0]),
Signal('bias', layer.weights[1].shape, layer.weights[1]),
Signal('out', layer.output),
Signal('remainder', layer.output),
],[],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
'nFilters': layer.config['filters'],
'kernelSize': layer.config['kernel_size'][0],
'strides': layer.config['strides'][0],
'n': '10**'+dec,
})
if layer.config['activation'] == 'relu':
activation = Component(layer.name+'_re_lu', templates['ReLU'], [Signal('in', layer.output), Signal('out', layer.output)], [])
return [conv, activation]
return [conv]
def transpile_Dense(layer: Layer, dec: int, last: bool = False) -> typing.List[Component]:
if not last and layer.config['activation'] == 'softmax':
raise NotImplementedError('Softmax is only supported as last layer')
if layer.config['activation'] not in ['linear', 'relu', 'softmax']:
raise NotImplementedError(f'Activation {layer.config["activation"]} is not supported')
if layer.config['use_bias'] == False:
layer.weights.append(np.zeros(layer.weights[0].shape[-1]))
dense = Component(layer.name, templates['Dense'], [
Signal('in', layer.input),
Signal('weights', layer.weights[0].shape, layer.weights[0]),
Signal('bias', layer.weights[1].shape, layer.weights[1]),
Signal('out', layer.output),
Signal('remainder', layer.output),
],[],{
'nInputs': layer.input[0],
'nOutputs': layer.output[0],
'n': '10**'+dec,
})
if layer.config['activa |
tion'] == 'relu':
activation = Component(layer.name+'_re_lu', templates['ReLU'], [Signal('in', layer.output), Signal('out', layer.output)], [])
return [dense, activation]
if layer.config['activation'] == 'softmax':
activation = Component(layer.name+'_softmax', templates['ArgMax'], [Signal('in', layer.output), Signal('out', (1,))], [], {'n': layer.output[0]})
return [dense, activation]
return [dense]
def transpile_Flatten2D(layer: Layer) -> typing.List[Component]:
if layer.input.__len__() != 3:
raise NotImplementedError('Only 2D inputs are supported')
return [Component(layer.name, templates['Flatten2D'], [
Signal('in', layer.input),
Signal('out', layer.output),
],[],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
})]
def transpile_GlobalAveragePooling2D(layer: Layer) -> typing.List[Component]:
if layer.config['data_format'] != 'channels_last':
raise NotImplementedError('Only data_format="channels_last" is supported')
if layer.config['keepdims']:
raise NotImplementedError('Only keepdims=False is supported')
return [Component(layer.name, templates['GlobalAveragePooling2D'], [
Signal('in', layer.input),
Signal('out', layer.output),
Signal('remainder', layer.output),
],[],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
})]
def transpile_GlobalMaxPooling2D(layer: Layer) -> typing.List[Component]:
if layer.config['data_format'] != 'channels_last':
raise NotImplementedError('Only data_format="channels_last" is supported')
if layer.config['keepdims']:
raise NotImplementedError('Only keepdims=False is supported')
return [Component(layer.name, templates['GlobalMaxPooling2D'], [
Signal('in', layer.input),
Signal('out', layer.output),
],[],{
'nRows': layer.input[0],
'nCols': |
layer.input[1],
'nChannels': layer.input[2],
})]
def transpile_MaxPooling2D(layer: Layer) -> typing.List[Component]:
if layer.config['data_format'] != 'channels_last':
raise NotImplementedError('Only data_format="channels_last" is supported')
if layer.config['padding'] != 'valid':
raise NotImplementedError('Only padding="valid" is supported')
if layer.config['pool_size'][0] != layer.config['pool_size'][1]:
raise NotImplementedError('Only pool_size[0] == pool_size[1] is supported')
if layer.config['strides'][0] != layer.config['strides'][1]:
raise NotImplementedError('Only strides[0] == strides[1] is supported')
return [Component(layer.name, templates['MaxPooling2D'], [Signal('in', layer.input), Signal('out', layer.output)], [],{
'nRows': layer.input[0],
'nCols': layer.input[1],
'nChannels': layer.input[2],
'poolSize': layer.config['pool_size'][0],
'strides': layer.config['strides'][0],
})] |
def AveragePooling2DInt (nRows, nCols, nChannels, poolSize, strides, input):
out = [[[0 for _ in range(nChannels)] for _ in range((nCols-poolSize)
remainder = [[[None for _ in range(nChannels)] for _ in range((nCols-poolSize)
for i in range((nRows-poolSize)
for j in range((nCols-poolSize)
for k in range(nChannels):
for x in range(poolSize):
for y in range(poolSize):
out[i][j][k] += int(input[i*strides+x][j*strides+y][k])
remainder[i][j][k] = str(out[i][j][k] % poolSize**2)
out[i][j][k] = str(out[i][j][k]
return out, remainder
def BatchNormalizationInt(nRows, nCols, nChannels, n, X_in, a_in, b_in):
out = [[[None for _ in range(nChannels)] for _ in range(nCols)] for _ in range(nRows)]
remainder = [[[None for _ in range(nChannels)] for _ in range(nCols)] for _ in range(nRows)]
for i in range(nRows):
for j in range(nCols):
for k in range(nChannels):
out[i][j][k] = int(X_in[i][j][k])*int(a_in[k]) + int(b_in[k])
remainder[i][j][k] = str(out[i][j][k] % n)
out[i][j][k] = str(out[i][j][k]
return out, remainder
def Conv1DInt(nInputs, nChannels, nFilters, kernelSize, strides, n, input, weights, bias):
out = [[0 for _ in range(nFilters)] for j in range((nInputs - kernelSize)
remainder = [[None for _ in range(nFilters)] for _ in range((nInputs - kernelSize)
for i in range((nInputs - kernelSize)
for j in range(nFilters):
for k in range(kernelSize):
for l in range(nChannels):
out[i][j] += int(input[i*strides + k][l])*int(weights[k][l][j])
out[i][j] += int(bias[j])
remainder[i][j] = str(out[i][j] % n)
out[i][j] = str(out[i][j]
return out, remainder
def Conv2DInt(nRows, nCols, nChannels, nFilters, kernelSize, strides, n, input, weights, bias):
out = [[[0 for _ in range(nFilters)] for _ in range((nCols - ke |
rnelSize)
remainder = [[[None for _ in range(nFilters)] for _ in range((nCols - kernelSize)
for i in range((nRows - kernelSize)
for j in range((nCols - kernelSize)
for m in range(nFilters):
for k in range(nChannels):
for x in range(kernelSize):
for y in range(kernelSize):
out[i][j][m] += int(input[i*strides+x][j*strides+y][k])*int(weights[x][y][k][m])
out[i][j][m] += int(bias[m])
remainder[i][j][m] = str(out[i][j][m] % n)
out[i][j][m] = str(out[i][j][m]
return out, remainder
def DenseInt(nInputs, nOutputs, n, input, weights, bias):
out = [0 for _ in range(nOutputs)]
remainder = [None for _ in range(nOutputs)]
for j in range(nOutputs):
for i in range(nInputs):
out[j] += int(input[i])*int(weights[i][j])
out[j] += int(bias[j])
remainder[j] = str(out[j] % n)
out[j] = str(out[j]
return out, remainder
def GlobalAveragePooling2DInt(nRows, nCols, nChannels, input):
out = [0 for _ in range(nChannels)]
remainder = [None for _ in range(nChannels)]
for k in range(nChannels):
for i in range(nRows):
for j in range(nCols):
out[k] += int(input[i][j][k])
remainder[k] = str(out[k] % (nRows * nCols))
out[k] = str(out[k]
return out, remainder
def GlobalMaxPooling2DInt(nRows, nCols, nChannels, input):
out = [max(int(input[i][j][k]) for i in range(nRows) for j in range(nCols)) for k in range(nChannels)]
return out
def MaxPooling2DInt(nRows, nCols, nChannels, poolSize, strides, input):
out = [[[str(max(int(input[i*strides + x][j*strides + y][k]) for x in range(poolSize) for y in range(poolSize))) for k in range(nChannels)] for j in range((nCols - poolSize)
return out
def Flatten2DInt(nRows, nCols, nChannels, input):
out = [str(int(input[i][j][k])) for i in range(nRows) for j in range(nCols) for k in range(nChannels)] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.