text
stringlengths 1
2.05k
|
---|
struct PyRunArgs {
pub tolerance: f32,
pub input_scale: crate::Scale,
pub param_scale: crate::Scale,
pub scale_rebase_multiplier: u32,
pub lookup_range: crate::circuit::table::Range,
pub logrows: u32,
pub num_inner_cols: usize,
pub input_visibility: Visibility,
pub output_visibility: Visibility,
pub param_visibility: Visibility,
pub variables: Vec<(String, usize)>,
pub div_rebasing: bool,
pub rebase_frac_zero_constants: bool,
pub check_mode: CheckMode,
pub commitment: PyCommitments,
}
impl PyRunArgs {
fn new() -> Self {
RunArgs::default().into()
}
}
impl From<PyRunArgs> for RunArgs {
fn from(py_run_args: PyRunArgs) -> Self {
RunArgs {
tolerance: Tolerance::from(py_run_args.tolerance),
input_scale: py_run_args.input_scale,
param_scale: py_run_args.param_scale,
num_inner_cols: py_run_args.num_inner_cols,
scale_rebase_multiplier: py_run_args.scale_rebase_multiplier,
lookup_range: py_run_args.lookup_range,
logrows: py_run_args.logrows,
input_visibility: py_run_args.input_visibility,
output_visibility: py_run_args.output_visibility,
param_visibility: py_run_args.param_visibility,
variables: py_run_args.variables,
div_rebasing: py_run_args.div_rebasing,
rebase_frac_zero_constants: py_run_args.rebase_frac_zero_constants,
check_mode: py_run_args.check_mode,
commitment: Some(py_run_args.commitment.into()),
}
}
}
impl Into<PyRunArgs> for RunArgs {
fn into(self) -> PyRunArgs {
PyRunArgs {
tolerance: self.tolerance.val,
input_scale: self.input_scale,
param_scale: self.param_scale,
num_inner_cols: self.num_inner_cols,
scal |
e_rebase_multiplier: self.scale_rebase_multiplier,
lookup_range: self.lookup_range,
logrows: self.logrows,
input_visibility: self.input_visibility,
output_visibility: self.output_visibility,
param_visibility: self.param_visibility,
variables: self.variables,
div_rebasing: self.div_rebasing,
rebase_frac_zero_constants: self.rebase_frac_zero_constants,
check_mode: self.check_mode,
commitment: self.commitment.into(),
}
}
}
pub enum PyCommitments {
KZG,
IPA,
}
impl From<Option<Commitments>> for PyCommitments {
fn from(commitment: Option<Commitments>) -> Self {
match commitment {
Some(Commitments::KZG) => PyCommitments::KZG,
Some(Commitments::IPA) => PyCommitments::IPA,
None => PyCommitments::KZG,
}
}
}
impl From<PyCommitments> for Commitments {
fn from(py_commitments: PyCommitments) -> Self {
match py_commitments {
PyCommitments::KZG => Commitments::KZG,
PyCommitments::IPA => Commitments::IPA,
}
}
}
impl Into<PyCommitments> for Commitments {
fn into(self) -> PyCommitments {
match self {
Commitments::KZG => PyCommitments::KZG,
Commitments::IPA => PyCommitments::IPA,
}
}
}
impl FromStr for PyCommitments {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"kzg" => Ok(PyCommitments::KZG),
"ipa" => Ok(PyCommitments::IPA),
_ => Err("Invalid value for Commitments".to_string()),
}
}
}
felt,
))]
fn felt_to_big_endian(felt: PyFelt) -> PyResult<String> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
Ok(format!("{:?}", felt))
}
felt,
))]
fn felt_to_int(felt: PyFelt) -> PyResult<i128> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let in |
t_rep = felt_to_i128(felt);
Ok(int_rep)
}
felt,
scale
))]
fn felt_to_float(felt: PyFelt, scale: crate::Scale) -> PyResult<f64> {
let felt = crate::pfsys::string_to_field::<Fr>(&felt);
let int_rep = felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
let float_rep = int_rep as f64 / multiplier;
Ok(float_rep)
}
input,
scale
))]
fn float_to_felt(input: f64, scale: crate::Scale) -> PyResult<PyFelt> {
let int_rep = quantize_float(&input, 0.0, scale)
.map_err(|_| PyIOError::new_err("Failed to quantize input"))?;
let felt = i128_to_felt(int_rep);
Ok(crate::pfsys::field_to_string::<Fr>(&felt))
}
buffer
))]
fn buffer_to_felts(buffer: Vec<u8>) -> PyResult<Vec<String>> {
fn u8_array_to_u128_le(arr: [u8; 16]) -> u128 {
let mut n: u128 = 0;
for &b in arr.iter().rev() {
n <<= 8;
n |= b as u128;
}
n
}
let buffer = &buffer[..];
let chunks = buffer.chunks_exact(16);
let remainder = chunks.remainder();
let mut remainder = remainder.to_vec();
let chunks: Result<Vec<[u8; 16]>, PyErr> = chunks
.map(|slice| {
let array: [u8; 16] = slice
.try_into()
.map_err(|_| PyIOError::new_err("Failed to slice input buffer"))?;
Ok(array)
})
.collect();
let mut chunks = chunks?;
if !remainder.is_empty() {
remainder.resize(16, 0);
let remainder_array: [u8; 16] = remainder
.try_into()
.map_err(|_| PyIOError::new_err("Failed to slice remainder"))?;
chunks.push(remainder_array);
}
let field_elements: Vec<Fr> = chunks
.iter()
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
let field_elements: Vec<String> = field_elements
.iter()
.map(|x| crate::pfsys::field_to_string::<Fr>(x))
.collect( |
);
Ok(field_elements)
}
message,
))]
fn poseidon_hash(message: Vec<PyFelt>) -> PyResult<Vec<PyFelt>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|_| PyIOError::new_err("Failed to run poseidon"))?;
let hash = output[0]
.iter()
.map(crate::pfsys::field_to_string::<Fr>)
.collect::<Vec<_>>();
Ok(hash)
}
message,
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
srs_path=None
))]
fn kzg_commit(
message: Vec<PyFelt>,
vk_path: PathBuf,
settings_path: PathBuf,
srs_path: Option<PathBuf>,
) -> PyResult<Vec<PyG1Affine>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let settings = GraphSettings::load(&settings_path)
.map_err(|_| PyIOError::new_err("Failed to load circuit settings"))?;
let srs_path =
crate::execute::get_srs_path(settings.run_args.logrows, srs_path, Commitments::KZG);
let srs = load_srs_prover::<KZGCommitmentScheme<Bn256>>(srs_path)
.map_err(|_| PyIOError::new_err("Failed to load srs"))?;
let vk = load_vk::<KZGCommitmentScheme<Bn256>, GraphCircuit>(vk_path, settings)
.map_err(|_| PyIOError::new_err("Failed to load vk"))?;
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
&srs,
);
Ok(output.iter().map(|x| (*x).into()).collect::<Vec<_>>())
}
message,
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
srs_path=None
))]
fn ipa_commit(
message: Vec<PyFelt>,
vk_path: PathBuf,
settings_path: PathBuf,
sr |
s_path: Option<PathBuf>,
) -> PyResult<Vec<PyG1Affine>> {
let message: Vec<Fr> = message
.iter()
.map(crate::pfsys::string_to_field::<Fr>)
.collect::<Vec<_>>();
let settings = GraphSettings::load(&settings_path)
.map_err(|_| PyIOError::new_err("Failed to load circuit settings"))?;
let srs_path =
crate::execute::get_srs_path(settings.run_args.logrows, srs_path, Commitments::KZG);
let srs = load_srs_prover::<IPACommitmentScheme<G1Affine>>(srs_path)
.map_err(|_| PyIOError::new_err("Failed to load srs"))?;
let vk = load_vk::<IPACommitmentScheme<G1Affine>, GraphCircuit>(vk_path, settings)
.map_err(|_| PyIOError::new_err("Failed to load vk"))?;
let output = PolyCommitChip::commit::<IPACommitmentScheme<G1Affine>>(
message,
(vk.cs().blinding_factors() + 1) as u32,
&srs,
);
Ok(output.iter().map(|x| (*x).into()).collect::<Vec<_>>())
}
proof_path=PathBuf::from(DEFAULT_PROOF),
witness_path=PathBuf::from(DEFAULT_WITNESS),
))]
fn swap_proof_commitments(proof_path: PathBuf, witness_path: PathBuf) -> PyResult<()> {
crate::execute::swap_proof_commitments_cmd(proof_path, witness_path)
.map_err(|_| PyIOError::new_err("Failed to swap commitments"))?;
Ok(())
}
path_to_pk=PathBuf::from(DEFAULT_PK),
circuit_settings_path=PathBuf::from(DEFAULT_SETTINGS),
vk_output_path=PathBuf::from(DEFAULT_VK),
))]
fn gen_vk_from_pk_single(
path_to_pk: PathBuf,
circuit_settings_path: PathBuf,
vk_output_path: PathBuf,
) -> PyResult<bool> {
let settings = GraphSettings::load(&circuit_settings_path)
.map_err(|_| PyIOError::new_err("Failed to load circuit settings"))?;
let pk = load_pk::<KZGCommitmentScheme<Bn256>, GraphCircuit>(path_to_pk, settings)
.map_err(|_| PyIOError::new_err("Failed to load pk"))?;
let vk = pk.get_vk();
save_vk::<G1Affine>(&vk_output_path, vk)
.map_err(|_| PyIOError::new_err("Failed to save |
vk"))?;
Ok(true)
}
path_to_pk=PathBuf::from(DEFAULT_PK_AGGREGATED),
vk_output_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
))]
fn gen_vk_from_pk_aggr(path_to_pk: PathBuf, vk_output_path: PathBuf) -> PyResult<bool> {
let pk = load_pk::<KZGCommitmentScheme<Bn256>, AggregationCircuit>(path_to_pk, ())
.map_err(|_| PyIOError::new_err("Failed to load pk"))?;
let vk = pk.get_vk();
save_vk::<G1Affine>(&vk_output_path, vk)
.map_err(|_| PyIOError::new_err("Failed to save vk"))?;
Ok(true)
}
model = PathBuf::from(DEFAULT_MODEL),
py_run_args = None
))]
fn table(model: PathBuf, py_run_args: Option<PyRunArgs>) -> PyResult<String> {
let run_args: RunArgs = py_run_args.unwrap_or_else(PyRunArgs::new).into();
let mut reader = File::open(model).map_err(|_| PyIOError::new_err("Failed to open model"))?;
let result = Model::new(&mut reader, &run_args);
match result {
Ok(m) => Ok(m.table_nodes()),
Err(_) => Err(PyIOError::new_err("Failed to |
import model")),
}
}
srs_path,
logrows,
))]
fn gen_srs(srs_path: PathBuf, logrows: usize) -> PyResult<()> {
let params = ezkl_gen_srs::<KZGCommitmentScheme<Bn256>>(logrows as u32);
save_params::<KZGCommitmentScheme<Bn256>>(&srs_path, ¶ms)?;
Ok(())
}
settings_path=PathBuf::from(DEFAULT_SETTINGS),
logrows=None,
srs_path=None,
commitment=None,
))]
fn get_srs(
settings_path: Option<PathBuf>,
logrows: Option<u32>,
srs_path: Option<PathBuf>,
commitment: Option<PyCommitments>,
) -> PyResult<bool> {
let commitment: Option<Commitments> = match commitment {
Some(c) => Some(c.into()),
None => None,
};
Runtime::new()
.unwrap()
.block_on(crate::execute::get_srs_cmd(
srs_path,
settings_path,
logrows,
commitment,
))
.map_err(|e| {
let err_str = format!("Failed to get srs: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
model=PathBuf::from(DEFAULT_MODEL),
output=PathBuf::from(DEFAULT_SETTINGS),
py_run_args = None,
))]
fn gen_settings(
model: PathBuf,
output: PathBuf,
py_run_args: Option<PyRunArgs>,
) -> Result<bool, PyErr> {
let run_args: RunArgs = py_run_args.unwrap_or_else(PyRunArgs::new).into();
crate::execute::gen_circuit_settings(model, output, run_args).map_err(|e| {
let err_str = format!("Failed to generate settings: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
data = PathBuf::from(DEFAULT_CALIBRATION_FILE),
model = PathBuf::from(DEFAULT_MODEL),
settings = PathBuf::from(DEFAULT_SETTINGS),
target = CalibrationTarget::default(),
lookup_safety_margin = DEFAULT_LOOKUP_SAFETY_MARGIN.parse().unwrap(),
scales = None,
scale_rebase_multiplier = DEFAULT_SCALE_REBASE_MULTIPLIERS.split(",").map(|x| x.parse().unwrap()).collect(),
max_lo |
grows = None,
only_range_check_rebase = DEFAULT_ONLY_RANGE_CHECK_REBASE.parse().unwrap(),
))]
fn calibrate_settings(
data: PathBuf,
model: PathBuf,
settings: PathBuf,
target: CalibrationTarget,
lookup_safety_margin: i128,
scales: Option<Vec<crate::Scale>>,
scale_rebase_multiplier: Vec<u32>,
max_logrows: Option<u32>,
only_range_check_rebase: bool,
) -> Result<bool, PyErr> {
crate::execute::calibrate(
model,
data,
settings,
target,
lookup_safety_margin,
scales,
scale_rebase_multiplier,
only_range_check_rebase,
max_logrows,
)
.map_err(|e| {
let err_str = format!("Failed to calibrate settings: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
data=PathBuf::from(DEFAULT_DATA),
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
output=PathBuf::from(DEFAULT_WITNESS),
vk_path=None,
srs_path=None,
))]
fn gen_witness(
data: PathBuf,
model: PathBuf,
output: Option<PathBuf>,
vk_path: Option<PathBuf>,
srs_path: Option<PathBuf>,
) -> PyResult<PyObject> {
let output =
crate::execute::gen_witness(model, data, output, vk_path, srs_path).map_err(|e| {
let err_str = format!("Failed to run generate witness: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Python::with_gil(|py| Ok(output.to_object(py)))
}
witness=PathBuf::from(DEFAULT_WITNESS),
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
))]
fn mock(witness: PathBuf, model: PathBuf) -> PyResult<bool> {
crate::execute::mock(model, witness).map_err(|e| {
let err_str = format!("Failed to run mock: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
aggregation_snarks=vec![PathBuf::from(DEFAULT_PROOF)],
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
split_proofs = false,
))]
fn mock_aggregate(
aggregation_snarks: Vec<PathBuf>, |
logrows: u32,
split_proofs: bool,
) -> PyResult<bool> {
crate::execute::mock_aggregate(aggregation_snarks, logrows, split_proofs).map_err(|e| {
let err_str = format!("Failed to run mock: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
vk_path=PathBuf::from(DEFAULT_VK),
pk_path=PathBuf::from(DEFAULT_PK),
srs_path=None,
witness_path = None,
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
))]
fn setup(
model: PathBuf,
vk_path: PathBuf,
pk_path: PathBuf,
srs_path: Option<PathBuf>,
witness_path: Option<PathBuf>,
disable_selector_compression: bool,
) -> Result<bool, PyErr> {
crate::execute::setup(
model,
srs_path,
vk_path,
pk_path,
witness_path,
disable_selector_compression,
)
.map_err(|e| {
let err_str = format!("Failed to run setup: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
witness=PathBuf::from(DEFAULT_WITNESS),
model=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
pk_path=PathBuf::from(DEFAULT_PK),
proof_path=None,
proof_type=ProofType::default(),
srs_path=None,
))]
fn prove(
witness: PathBuf,
model: PathBuf,
pk_path: PathBuf,
proof_path: Option<PathBuf>,
proof_type: ProofType,
srs_path: Option<PathBuf>,
) -> PyResult<PyObject> {
let snark = crate::execute::prove(
witness,
model,
pk_path,
proof_path,
srs_path,
proof_type,
CheckMode::UNSAFE,
)
.map_err(|e| {
let err_str = format!("Failed to run prove: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Python::with_gil(|py| Ok(snark.to_object(py)))
}
proof_path=PathBuf::from(DEFAULT_PROOF),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
vk_path=PathBuf::from(DEFAULT_VK), |
srs_path=None,
reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse::<bool>().unwrap(),
))]
fn verify(
proof_path: PathBuf,
settings_path: PathBuf,
vk_path: PathBuf,
srs_path: Option<PathBuf>,
reduced_srs: bool,
) -> Result<bool, PyErr> {
crate::execute::verify(proof_path, settings_path, vk_path, srs_path, reduced_srs).map_err(
|e| {
let err_str = format!("Failed to run verify: {}", e);
PyRuntimeError::new_err(err_str)
},
)?;
Ok(true)
}
sample_snarks=vec![PathBuf::from(DEFAULT_PROOF)],
vk_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
pk_path=PathBuf::from(DEFAULT_PK_AGGREGATED),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
split_proofs = false,
srs_path = None,
disable_selector_compression=DEFAULT_DISABLE_SELECTOR_COMPRESSION.parse().unwrap(),
commitment=DEFAULT_COMMITMENT.parse().unwrap(),
))]
fn setup_aggregate(
sample_snarks: Vec<PathBuf>,
vk_path: PathBuf,
pk_path: PathBuf,
logrows: u32,
split_proofs: bool,
srs_path: Option<PathBuf>,
disable_selector_compression: bool,
commitment: PyCommitments,
) -> Result<bool, PyErr> {
crate::execute::setup_aggregate(
sample_snarks,
vk_path,
pk_path,
srs_path,
logrows,
split_proofs,
disable_selector_compression,
commitment.into(),
)
.map_err(|e| {
let err_str = format!("Failed to setup aggregate: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
model=PathBuf::from(DEFAULT_MODEL),
compiled_circuit=PathBuf::from(DEFAULT_COMPILED_CIRCUIT),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
))]
fn compile_circuit(
model: PathBuf,
compiled_circuit: PathBuf,
settings_path: PathBuf,
) -> Result<bool, PyErr> {
crate::execute::compile_circuit(model, compiled_circuit, settings_path).map_err(|e| {
let err_str = format!("Failed to setup aggreg |
ate: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
aggregation_snarks=vec![PathBuf::from(DEFAULT_PROOF)],
proof_path=PathBuf::from(DEFAULT_PROOF_AGGREGATED),
vk_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
transcript=TranscriptType::default(),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
check_mode=CheckMode::UNSAFE,
split_proofs = false,
srs_path=None,
commitment=DEFAULT_COMMITMENT.parse().unwrap(),
))]
fn aggregate(
aggregation_snarks: Vec<PathBuf>,
proof_path: PathBuf,
vk_path: PathBuf,
transcript: TranscriptType,
logrows: u32,
check_mode: CheckMode,
split_proofs: bool,
srs_path: Option<PathBuf>,
commitment: PyCommitments,
) -> Result<bool, PyErr> {
crate::execute::aggregate(
proof_path,
aggregation_snarks,
vk_path,
srs_path,
transcript,
logrows,
check_mode,
split_proofs,
commitment.into(),
)
.map_err(|e| {
let err_str = format!("Failed to run aggregate: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
proof_path=PathBuf::from(DEFAULT_PROOF_AGGREGATED),
vk_path=PathBuf::from(DEFAULT_VK),
logrows=DEFAULT_AGGREGATED_LOGROWS.parse().unwrap(),
commitment=DEFAULT_COMMITMENT.parse().unwrap(),
reduced_srs=DEFAULT_USE_REDUCED_SRS_FOR_VERIFICATION.parse().unwrap(),
srs_path=None,
))]
fn verify_aggr(
proof_path: PathBuf,
vk_path: PathBuf,
logrows: u32,
commitment: PyCommitments,
reduced_srs: bool,
srs_path: Option<PathBuf>,
) -> Result<bool, PyErr> {
crate::execute::verify_aggr(
proof_path,
vk_path,
srs_path,
logrows,
reduced_srs,
commitment.into(),
)
.map_err(|e| {
let err_str = format!("Failed to run verify_aggr: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
} |
vk_path=PathBuf::from(DEFAULT_VK),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier(
vk_path: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_verifier(
vk_path,
srs_path,
settings_path,
sol_code_path,
abi_path,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
input_data=PathBuf::from(DEFAULT_DATA),
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
abi_path=PathBuf::from(DEFAULT_VERIFIER_DA_ABI),
))]
fn create_evm_data_attestation(
input_data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
) -> Result<bool, PyErr> {
crate::execute::create_evm_data_attestation(settings_path, sol_code_path, abi_path, input_data)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_data_attestation: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
data_path,
compiled_circuit_path,
test_data,
input_source,
output_source,
rpc_url=None,
))]
fn setup_test_evm_witness(
data_path: PathBuf,
compiled_circuit_path: PathBuf,
test_data: PathBuf,
input_source: PyTestDataSource,
output_source: PyTestDataSource,
rpc_url: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::setup_test_evm_witness(
data_path,
compiled_circuit_pat |
h,
test_data,
rpc_url,
input_source.into(),
output_source.into(),
))
.map_err(|e| {
let err_str = format!("Failed to run setup_test_evm_witness: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
addr_path,
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2Verifier",
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
addr_path,
sol_code_path=PathBuf::from(DEFAULT_VK_SOL),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS.parse().unwrap(),
private_key=None,
))]
fn deploy_vk_evm(
addr_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_evm(
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
"Halo2VerifyingKey",
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
addr_path,
input_data,
settings_path=PathBuf::from(DEFAULT_SETTINGS),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE_DA),
rpc_url=None,
optimizer_runs=DEFAULT_OPTIMIZER_RUNS. |
parse().unwrap(),
private_key=None
))]
fn deploy_da_evm(
addr_path: PathBuf,
input_data: PathBuf,
settings_path: PathBuf,
sol_code_path: PathBuf,
rpc_url: Option<String>,
optimizer_runs: usize,
private_key: Option<String>,
) -> Result<bool, PyErr> {
Runtime::new()
.unwrap()
.block_on(crate::execute::deploy_da_evm(
input_data,
settings_path,
sol_code_path,
rpc_url,
addr_path,
optimizer_runs,
private_key,
))
.map_err(|e| {
let err_str = format!("Failed to run deploy_da_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
addr_verifier,
proof_path=PathBuf::from(DEFAULT_PROOF),
rpc_url=None,
addr_da = None,
addr_vk = None,
))]
fn verify_evm(
addr_verifier: &str,
proof_path: PathBuf,
rpc_url: Option<String>,
addr_da: Option<&str>,
addr_vk: Option<&str>,
) -> Result<bool, PyErr> {
let addr_verifier = H160Flag::from(addr_verifier);
let addr_da = if let Some(addr_da) = addr_da {
let addr_da = H160Flag::from(addr_da);
Some(addr_da)
} else {
None
};
let addr_vk = if let Some(addr_vk) = addr_vk {
let addr_vk = H160Flag::from(addr_vk);
Some(addr_vk)
} else {
None
};
Runtime::new()
.unwrap()
.block_on(crate::execute::verify_evm(
proof_path,
addr_verifier,
rpc_url,
addr_da,
addr_vk,
))
.map_err(|e| {
let err_str = format!("Failed to run verify_evm: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
aggregation_settings=vec![PathBuf::from(DEFAULT_PROOF)],
vk_path=PathBuf::from(DEFAULT_VK_AGGREGATED),
sol_code_path=PathBuf::from(DEFAULT_SOL_CODE),
abi_path=PathBuf::from(DEFAULT_VERIFIER_ABI),
logrows=DEFAULT_ |
AGGREGATED_LOGROWS.parse().unwrap(),
srs_path=None,
render_vk_seperately = DEFAULT_RENDER_VK_SEPERATELY.parse().unwrap(),
))]
fn create_evm_verifier_aggr(
aggregation_settings: Vec<PathBuf>,
vk_path: PathBuf,
sol_code_path: PathBuf,
abi_path: PathBuf,
logrows: u32,
srs_path: Option<PathBuf>,
render_vk_seperately: bool,
) -> Result<bool, PyErr> {
crate::execute::create_evm_aggregate_verifier(
vk_path,
srs_path,
sol_code_path,
abi_path,
aggregation_settings,
logrows,
render_vk_seperately,
)
.map_err(|e| {
let err_str = format!("Failed to run create_evm_verifier_aggr: {}", e);
PyRuntimeError::new_err(err_str)
})?;
Ok(true)
}
fn ezkl(_py: Python<'_>, m: &PyModule) -> PyResult<()> {
pyo3_log::init();
m.add_class::<PyRunArgs>()?;
m.add_class::<PyG1Affine>()?;
m.add_class::<PyG1>()?;
m.add_class::<PyTestDataSource>()?;
m.add_class::<PyCommitments>()?;
m.add("__version__", env!("CARGO_PKG_VERSION"))?;
m.add_function(wrap_pyfunction!(felt_to_big_endian, m)?)?;
m.add_function(wrap_pyfunction!(felt_to_int, m)?)?;
m.add_function(wrap_pyfunction!(felt_to_float, m)?)?;
m.add_function(wrap_pyfunction!(kzg_commit, m)?)?;
m.add_function(wrap_pyfunction!(ipa_commit, m)?)?;
m.add_function(wrap_pyfunction!(swap_proof_commitments, m)?)?;
m.add_function(wrap_pyfunction!(poseidon_hash, m)?)?;
m.add_function(wrap_pyfunction!(float_to_felt, m)?)?;
m.add_function(wrap_pyfunction!(buffer_to_felts, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_aggr, m)?)?;
m.add_function(wrap_pyfunction!(gen_vk_from_pk_single, m)?)?;
m.add_function(wrap_pyfunction!(table, m)?)?;
m.add_function(wrap_pyfunction!(mock, m)?)?;
m.add_function(wrap_pyfunction!(setup, m)?)?;
m.add_function(wrap_pyfunction!(prove, m)?)?;
m.add_function(wrap_pyfunction!(verify, m)?)?;
m.add_function(wrap_pyfunction!(gen_srs, m)?)?;
m.add_function(w |
rap_pyfunction!(get_srs, m)?)?;
m.add_function(wrap_pyfunction!(gen_witness, m)?)?;
m.add_function(wrap_pyfunction!(gen_settings, m)?)?;
m.add_function(wrap_pyfunction!(calibrate_settings, m)?)?;
m.add_function(wrap_pyfunction!(aggregate, m)?)?;
m.add_function(wrap_pyfunction!(mock_aggregate, m)?)?;
m.add_function(wrap_pyfunction!(setup_aggregate, m)?)?;
m.add_function(wrap_pyfunction!(compile_circuit, m)?)?;
m.add_function(wrap_pyfunction!(verify_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier, m)?)?;
m.add_function(wrap_pyfunction!(deploy_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_vk_evm, m)?)?;
m.add_function(wrap_pyfunction!(deploy_da_evm, m)?)?;
m.add_function(wrap_pyfunction!(verify_evm, m)?)?;
m.add_function(wrap_pyfunction!(setup_test_evm_witness, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_verifier_aggr, m)?)?;
m.add_function(wrap_pyfunction!(create_evm_data_attestation, m)?)?;
Ok(())
} |
use lazy_static::lazy_static;
use std::collections::HashMap;
lazy_static! {
pub static ref PUBLIC_SRS_SHA256_HASHES: HashMap<u32, &'static str> = HashMap::from_iter([
(
1,
"cafb2aa72c200ddc4e28aacabb8066e829207e2484b8d17059a566232f8a297b",
),
(
2,
"8194ec51da5d332d2e17283ade34920644774452c2fadf33742e8c739e275d8e",
),
(
3,
"0729e815bce2ac4dfad7819982c6479c3b22c32b71f64dca05e8fdd90e8535ef",
),
(
4,
"2c0785da20217fcafd3b12cc363a95eb2529037cc8a9bddf8fb15025cbc8cdc9",
),
(
5,
"5b950e3b76e7a9923d69f6d6585ce6b5f9458e5ec57a71c9de5005d32d544692",
),
(
6,
"85030b2924111fc60acaf4fb8a7bad89531fbe0271aeab0c21e545f71eee273d",
),
(
7,
"e65f95150519fe01c2bedf8f832f5249822ef84c9c017307419e10374ff9eeb1",
),
(
8,
"446092fd1d6030e5bb2f2a8368267d5ed0fbdb6a766f6c5e4a4841827ad3106f",
),
(
9,
"493d088951882ad81af11e08c791a38a37c0ffff14578cf2c7fb9b7bca654d8b",
),
(
10,
"9705d450e5dfd06adb673705f7bc34418ec86339203198beceb2ae7f1ffefedb",
),
(
11,
"257fa566ed9bc0767d3e63e92b5e966829fa3347d320a32055dc31ee7d33f8a4",
),
(
12,
"28b151069f41abc121baa6d2eaa8f9e4c4d8326ddbefee2bd9c0776b80ac6fad",
),
(
13,
"d5d94bb25bdc024f649213593027d861042ee807cafd94b49b54f1663f8f267d",
),
(
14,
"c09129f064c08ecb07ea3689a2247dcc177de6837e7d2f5f946e30453abbccef",
),
(
15,
"90807800a1c3b248a452e1732c45ee5099f38b737356f5542c0584ec9c3ebb45",
),
(
16,
"2a1a494630e71bc026dd5c0eab4c1b9a5dbc656228c1f0d48f5dbd3909b161d3",
), |
(
17,
"41509f380362a8d14401c5ae92073154922fe23e45459ce6f696f58607655db7",
),
(
18,
"d0148475717a2ba269784a178cb0ab617bc77f16c58d4a3cbdfe785b591c7034",
),
(
19,
"d1a1655b4366a766d1578beb257849a92bf91cb1358c1a2c37ab180c5d3a204d",
),
(
20,
"54ef75911da76d7a6b7ea341998aaf66cb06c679c53e0a88a4fe070dd3add963",
),
(
21,
"486e044cf98704e07f41137d2b89698dc03d1fbf34d13b60902fea19a6013b4b",
),
(
22,
"1ee9b4396db3e4e2516ac5016626ab6ba967f091d5d23afbdb7df122a0bb9d0c",
),
(
23,
"748e48b9b6d06f9c82d26bf551d0af43ee2e801e4be56d7ccb20312e267fd1d6",
),
(
24,
"f94fa4afa2f5147680f907d4dd96a8826206c26bd3328cd379feaed614b234de",
),
(
25,
"dec49a69893fbcd66cd06296b2d936a6aceb431c130b2e52675fe4274b504f57",
),
(
26,
"b198a51d48b88181508d8e4ea9dea39db285e4585663b29b7e4ded0c22a94875",
),
]);
} |
pub mod ops;
pub mod val;
pub mod var;
use halo2curves::ff::PrimeField;
use maybe_rayon::{
prelude::{
IndexedParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator,
ParallelIterator,
},
slice::ParallelSliceMut,
};
use serde::{Deserialize, Serialize};
pub use val::*;
pub use var::*;
use crate::{
circuit::utils,
fieldutils::{felt_to_i32, i128_to_felt, i32_to_felt},
graph::Visibility,
};
use halo2_proofs::{
arithmetic::Field,
circuit::{AssignedCell, Region, Value},
plonk::{Advice, Assigned, Column, ConstraintSystem, Expression, Fixed, VirtualCells},
poly::Rotation,
};
use itertools::Itertools;
use std::error::Error;
use std::fmt::Debug;
use std::iter::Iterator;
use std::ops::{Add, Deref, DerefMut, Div, Mul, Neg, Range, Sub};
use std::{cmp::max, ops::Rem};
use thiserror::Error;
pub enum TensorError {
DimMismatch(String),
DimError(String),
WrongMethod,
SigBitTruncationError,
FeltError,
TableLookupError,
Unsupported,
Overflow(String),
}
pub trait TensorType: Clone + Debug + 'static {
fn zero() -> Option<Self> {
None
}
fn one() -> Option<Self> {
None
}
fn tmax(&self, _: &Self) -> Option<Self> {
None
}
}
macro_rules! tensor_type {
($rust_type:ty, $tensor_type:ident, $zero:expr, $one:expr) => {
impl TensorType for $rust_type {
fn zero() -> Option<Self> {
Some($zero)
}
fn one() -> Option<Self> {
Some($one)
}
fn tmax(&self, other: &Self) -> Option<Self> {
Some(max(*self, *other))
}
}
};
}
impl TensorType for f32 {
fn zero() -> Option<Self> {
Some(0.0)
}
fn tmax(&self, other: &Self) -> Option<Self> {
match (self.is_nan(), other.is_nan()) {
(true, true) => Some(f32::NAN), |
(true, false) => Some(*other),
(false, true) => Some(*self),
(false, false) => {
if self >= other {
Some(*self)
} else {
Some(*other)
}
}
}
}
}
impl TensorType for f64 {
fn zero() -> Option<Self> {
Some(0.0)
}
fn tmax(&self, other: &Self) -> Option<Self> {
match (self.is_nan(), other.is_nan()) {
(true, true) => Some(f64::NAN),
(true, false) => Some(*other),
(false, true) => Some(*self),
(false, false) => {
if self >= other {
Some(*self)
} else {
Some(*other)
}
}
}
}
}
tensor_type!(bool, Bool, false, true);
tensor_type!(i128, Int128, 0, 1);
tensor_type!(i32, Int32, 0, 1);
tensor_type!(usize, USize, 0, 1);
tensor_type!((), Empty, (), ());
tensor_type!(utils::F32, F32, utils::F32(0.0), utils::F32(1.0));
impl<T: TensorType> TensorType for Tensor<T> {
fn zero() -> Option<Self> {
Some(Tensor::new(Some(&[T::zero().unwrap()]), &[1]).unwrap())
}
fn one() -> Option<Self> {
Some(Tensor::new(Some(&[T::one().unwrap()]), &[1]).unwrap())
}
}
impl<T: TensorType> TensorType for Value<T> {
fn zero() -> Option<Self> {
Some(Value::known(T::zero().unwrap()))
}
fn one() -> Option<Self> {
Some(Value::known(T::one().unwrap()))
}
fn tmax(&self, other: &Self) -> Option<Self> {
Some(
(self.clone())
.zip(other.clone())
.map(|(a, b)| a.tmax(&b).unwrap()),
)
}
}
impl<F: PrimeField + PartialOrd> TensorType for Assigned<F>
where
F: Field,
{
fn zero() -> Option<Self> {
Some(F::ZERO.into())
}
fn one() -> Option<Self> {
Some(F::ONE.into())
}
fn tmax(&self, other: &Self) -> Option<Self> {
if self.evaluate() >= other.evaluate() { |
Some(*self)
} else {
Some(*other)
}
}
}
impl<F: PrimeField> TensorType for Expression<F>
where
F: Field,
{
fn zero() -> Option<Self> {
Some(Expression::Constant(F::ZERO))
}
fn one() -> Option<Self> {
Some(Expression::Constant(F::ONE))
}
fn tmax(&self, _: &Self) -> Option<Self> {
todo!()
}
}
impl TensorType for Column<Advice> {}
impl TensorType for Column<Fixed> {}
impl<F: PrimeField + PartialOrd> TensorType for AssignedCell<Assigned<F>, F> {
fn tmax(&self, other: &Self) -> Option<Self> {
let mut output: Option<Self> = None;
self.value_field().zip(other.value_field()).map(|(a, b)| {
if a.evaluate() >= b.evaluate() {
output = Some(self.clone());
} else {
output = Some(other.clone());
}
});
output
}
}
impl<F: PrimeField + PartialOrd> TensorType for AssignedCell<F, F> {
fn tmax(&self, other: &Self) -> Option<Self> {
let mut output: Option<Self> = None;
self.value().zip(other.value()).map(|(a, b)| {
if a >= b {
output = Some(self.clone());
} else {
output = Some(other.clone());
}
});
output
}
}
impl TensorType for halo2curves::pasta::Fp {
fn zero() -> Option<Self> {
Some(halo2curves::pasta::Fp::zero())
}
fn one() -> Option<Self> {
Some(halo2curves::pasta::Fp::one())
}
fn tmax(&self, other: &Self) -> Option<Self> {
Some((*self).max(*other))
}
}
impl TensorType for halo2curves::bn256::Fr {
fn zero() -> Option<Self> {
Some(halo2curves::bn256::Fr::zero())
}
fn one() -> Option<Self> {
Some(halo2curves::bn256::Fr::one())
}
fn tmax(&self, other: &Self) -> Option<Self> {
Some((*self).max(*other))
}
}
pub struct Tensor<T: TensorType> {
inner: Vec<T>,
dims: Vec<usize>,
scale: Option<crate::Scale>, |
visibility: Option<Visibility>,
}
impl<T: TensorType> IntoIterator for Tensor<T> {
type Item = T;
type IntoIter = ::std::vec::IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.inner.into_iter()
}
}
impl<T: TensorType> Deref for Tensor<T> {
type Target = [T];
fn deref(&self) -> &[T] {
self.inner.deref()
}
}
impl<T: TensorType> DerefMut for Tensor<T> {
fn deref_mut(&mut self) -> &mut [T] {
self.inner.deref_mut()
}
}
impl<T: PartialEq + TensorType> PartialEq for Tensor<T> {
fn eq(&self, other: &Tensor<T>) -> bool {
self.dims == other.dims && self.deref() == other.deref()
}
}
impl<I: Iterator> From<I> for Tensor<I::Item>
where
I::Item: TensorType + Clone,
Vec<I::Item>: FromIterator<I::Item>,
{
fn from(value: I) -> Tensor<I::Item> {
let data: Vec<I::Item> = value.collect::<Vec<I::Item>>();
Tensor::new(Some(&data), &[data.len()]).unwrap()
}
}
impl<T> FromIterator<T> for Tensor<T>
where
T: TensorType + Clone,
Vec<T>: FromIterator<T>,
{
fn from_iter<I: IntoIterator<Item = T>>(value: I) -> Tensor<T> {
let data: Vec<I::Item> = value.into_iter().collect::<Vec<I::Item>>();
Tensor::new(Some(&data), &[data.len()]).unwrap()
}
}
impl<F: PrimeField + Clone + TensorType + PartialOrd> From<Tensor<AssignedCell<Assigned<F>, F>>>
for Tensor<i32>
{
fn from(value: Tensor<AssignedCell<Assigned<F>, F>>) -> Tensor<i32> {
let mut output = Vec::new();
value.map(|x| {
x.evaluate().value().map(|y| {
let e = felt_to_i32(*y);
output.push(e);
e
})
});
Tensor::new(Some(&output), value.dims()).unwrap()
}
}
impl<F: PrimeField + Clone + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>>
for Tensor<i32>
{
fn from(value: Tensor<AssignedCell<F, F>>) -> Tensor<i32> {
let mut output = Vec::new();
value.map(|x| {
let mut i = 0; |
x.value().map(|y| {
let e = felt_to_i32(*y);
output.push(e);
i += 1;
});
if i == 0 {
output.push(0);
}
});
Tensor::new(Some(&output), value.dims()).unwrap()
}
}
impl<F: PrimeField + Clone + TensorType + PartialOrd> From<Tensor<AssignedCell<Assigned<F>, F>>>
for Tensor<Value<F>>
{
fn from(value: Tensor<AssignedCell<Assigned<F>, F>>) -> Tensor<Value<F>> {
let mut output = Vec::new();
for x in value.iter() {
output.push(x.value_field().evaluate());
}
Tensor::new(Some(&output), value.dims()).unwrap()
}
}
impl<F: PrimeField + TensorType + Clone + PartialOrd> From<Tensor<Value<F>>> for Tensor<i32> {
fn from(t: Tensor<Value<F>>) -> Tensor<i32> {
let mut output = Vec::new();
t.map(|x| {
let mut i = 0;
x.map(|y| {
let e = felt_to_i32(y);
output.push(e);
i += 1;
});
if i == 0 {
output.push(0);
}
});
Tensor::new(Some(&output), t.dims()).unwrap()
}
}
impl<F: PrimeField + TensorType + Clone + PartialOrd> From<Tensor<Value<F>>>
for Tensor<Value<Assigned<F>>>
{
fn from(t: Tensor<Value<F>>) -> Tensor<Value<Assigned<F>>> {
let mut ta: Tensor<Value<Assigned<F>>> = Tensor::from((0..t.len()).map(|i| t[i].into()));
ta.reshape(t.dims()).unwrap();
ta
}
}
impl<F: PrimeField + TensorType + Clone> From<Tensor<i32>> for Tensor<Value<F>> {
fn from(t: Tensor<i32>) -> Tensor<Value<F>> {
let mut ta: Tensor<Value<F>> =
Tensor::from((0..t.len()).map(|i| Value::known(i32_to_felt::<F>(t[i]))));
ta.reshape(t.dims()).unwrap();
ta
}
}
impl<F: PrimeField + TensorType + Clone> From<Tensor<i128>> for Tensor<Value<F>> {
fn from(t: Tensor<i128>) -> Tensor<Value<F>> {
let mut ta: Tensor<Value<F>> = |
Tensor::from((0..t.len()).map(|i| Value::known(i128_to_felt::<F>(t[i]))));
ta.reshape(t.dims()).unwrap();
ta
}
}
impl<T: Clone + TensorType + std::marker::Send + std::marker::Sync>
maybe_rayon::iter::FromParallelIterator<T> for Tensor<T>
{
fn from_par_iter<I>(par_iter: I) -> Self
where
I: maybe_rayon::iter::IntoParallelIterator<Item = T>,
{
let inner: Vec<T> = par_iter.into_par_iter().collect();
Tensor::new(Some(&inner), &[inner.len()]).unwrap()
}
}
impl<T: Clone + TensorType + std::marker::Send + std::marker::Sync>
maybe_rayon::iter::IntoParallelIterator for Tensor<T>
{
type Iter = maybe_rayon::vec::IntoIter<T>;
type Item = T;
fn into_par_iter(self) -> Self::Iter {
self.inner.into_par_iter()
}
}
impl<'data, T: Clone + TensorType + std::marker::Send + std::marker::Sync>
maybe_rayon::iter::IntoParallelRefMutIterator<'data> for Tensor<T>
{
type Iter = maybe_rayon::slice::IterMut<'data, T>;
type Item = &'data mut T;
fn par_iter_mut(&'data mut self) -> Self::Iter {
self.inner.par_iter_mut()
}
}
impl<T: Clone + TensorType> Tensor<T> {
pub fn new(values: Option<&[T]>, dims: &[usize]) -> Result<Self, TensorError> {
let total_dims: usize = if !dims.is_empty() {
dims.iter().product()
} else if values.is_some() {
1
} else {
0
};
match values {
Some(v) => {
if total_dims != v.len() {
return Err(TensorError::DimError(format!(
"Cannot create tensor of length {} with dims {:?}",
v.len(),
dims
)));
}
Ok(Tensor {
inner: Vec::from(v),
dims: Vec::from(dims),
scale: None,
visibility: None,
})
}
None => Ok(Tensor { |
inner: vec![T::zero().unwrap(); total_dims],
dims: Vec::from(dims),
scale: None,
visibility: None,
}),
}
}
pub |
fn set_scale(&mut self, scale: crate::Scale) {
self.scale = Some(scale)
}
pub |
fn set_visibility(&mut self, visibility: &Visibility) {
self.visibility = Some(visibility.clone())
}
pub fn scale(&self) -> Option<crate::Scale> {
self.scale
}
pub fn visibility(&self) -> Option<Visibility> {
self.visibility.clone()
}
pub fn len(&self) -> usize {
self.dims().iter().product::<usize>()
}
pub fn is_empty(&self) -> bool {
self.inner.len() == 0
}
pub fn is_singleton(&self) -> bool {
self.dims().is_empty() && self.len() == 1
}
pub |
fn set(&mut self, indices: &[usize], value: T) {
let index = self.get_index(indices);
self[index] = value;
}
pub fn get(&self, indices: &[usize]) -> T {
let index = self.get_index(indices);
self[index].clone()
}
pub fn get_mut(&mut self, indices: &[usize]) -> &mut T {
assert_eq!(self.dims.len(), indices.len());
let mut index = 0;
let mut d = 1;
for i in (0..indices.len()).rev() {
assert!(self.dims[i] > indices[i]);
index += indices[i] * d;
d *= self.dims[i];
}
&mut self[index]
}
pub fn pad_to_zero_rem(&self, n: usize, pad: T) -> Result<Tensor<T>, TensorError> {
let mut inner = self.inner.clone();
let remainder = self.len() % n;
if remainder != 0 {
inner.resize(self.len() + n - remainder, pad);
}
Tensor::new(Some(&inner), &[inner.len()])
}
pub fn get_flat_index(&self, index: usize) -> T {
self[index].clone()
}
pub fn show(&self) -> String {
if self.len() > 12 {
let start = self[..12].to_vec();
format!(
"[{} ...]",
start.iter().map(|x| format!("{:?}", x)).join(", "),
)
} else {
format!("[{:?}]", self.iter().map(|x| format!("{:?}", x)).join(", "))
}
}
pub fn get_slice(&self, indices: &[Range<usize>]) -> Result<Tensor<T>, TensorError>
where
T: Send + Sync,
{
if indices.is_empty() {
return Ok(self.clone());
}
if self.dims.len() < indices.len() {
return Err(TensorError::DimError(format!(
"The dimensionality of the slice {:?} is greater than the tensor's {:?}", |
indices, self.dims
)));
} else if indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims {
return Ok(self.clone());
}
let mut full_indices = indices.to_vec();
for i in 0..(self.dims.len() - indices.len()) {
full_indices.push(0..self.dims()[indices.len() + i])
}
let cartesian_coord: Vec<Vec<usize>> = full_indices
.iter()
.cloned()
.multi_cartesian_product()
.collect();
let res: Vec<T> = cartesian_coord
.par_iter()
.map(|e| {
let index = self.get_index(e);
self[index].clone()
})
.collect();
let dims: Vec<usize> = full_indices.iter().map(|e| e.end - e.start).collect();
Tensor::new(Some(&res), &dims)
}
pub fn set_slice(
&mut self,
indices: &[Range<usize>],
value: &Tensor<T>,
) -> Result<(), TensorError>
where
T: Send + Sync,
{
if indices.is_empty() {
return Ok(());
}
if self.dims.len() < indices.len() {
return Err(TensorError::DimError(format!(
"The dimensionality of the slice {:?} is greater than the tensor's {:?}",
indices, self.dims
)));
}
let mut full_indices = indices.to_vec();
let omitted_dims = (indices.len()..self.dims.len())
.map(|i| self.dims[i])
.collect::<Vec<_>>();
for dim in &omitted_dims {
full_indices.push(0..*dim);
}
let full_dims = full_indices
.iter()
.map(|x| x.end - x.start)
.collect::<Vec<_>>();
let value = value.expand(&full_dims)?;
let cartesian_coord: Vec<Vec<usize>> = full_indices
.iter()
.cloned()
.multi_cartesian_product() |
.collect();
let _ = cartesian_coord
.iter()
.enumerate()
.map(|(i, e)| {
self.set(e, value[i].clone());
})
.collect::<Vec<_>>();
Ok(())
}
pub fn get_index(&self, indices: &[usize]) -> usize {
assert_eq!(self.dims.len(), indices.len());
let mut index = 0;
let mut d = 1;
for i in (0..indices.len()).rev() {
assert!(self.dims[i] > indices[i]);
index += indices[i] * d;
d *= self.dims[i];
}
index
}
pub fn duplicate_every_n(
&self,
n: usize,
num_repeats: usize,
initial_offset: usize,
) -> Result<Tensor<T>, TensorError> {
let mut inner: Vec<T> = vec![];
let mut offset = initial_offset;
for (i, elem) in self.inner.clone().into_iter().enumerate() {
if (i + offset + 1) % n == 0 {
inner.extend(vec![elem; 1 + num_repeats]);
offset += num_repeats;
} else {
inner.push(elem.clone());
}
}
Tensor::new(Some(&inner), &[inner.len()])
}
pub fn remove_every_n(
&self,
n: usize,
num_repeats: usize,
initial_offset: usize,
) -> Result<Tensor<T>, TensorError> {
let mut inner: Vec<T> = vec![];
let mut indices_to_remove = std::collections::HashSet::new();
for i in 0..self.inner.len() {
if (i + initial_offset + 1) % n == 0 {
for j in 1..(1 + num_repeats) {
indices_to_remove.insert(i + j);
}
}
}
let old_inner = self.inner.clone();
for (i, elem) in old_inner.into_iter().enumerate() {
if !indices_to_remove.contains(&i) {
inner.push(elem.c |
lone());
}
}
Tensor::new(Some(&inner), &[inner.len()])
}
pub fn remove_indices(
&self,
indices: &mut [usize],
is_sorted: bool,
) -> Result<Tensor<T>, TensorError> {
let mut inner: Vec<T> = self.inner.clone();
if !is_sorted {
indices.par_sort_unstable();
}
for elem in indices.iter().rev() {
inner.remove(*elem);
}
Tensor::new(Some(&inner), &[inner.len()])
}
pub fn dims(&self) -> &[usize] {
&self.dims
}
pub fn reshape(&mut self, new_dims: &[usize]) -> Result<(), TensorError> {
if new_dims.is_empty() {
if !(self.len() == 1 || self.is_empty()) {
return Err(TensorError::DimError(
"Cannot reshape to empty tensor".to_string(),
));
}
self.dims = vec![];
} else {
let product = if new_dims != [0] {
new_dims.iter().product::<usize>()
} else {
0
};
if self.len() != product {
return Err(TensorError::DimError(format!(
"Cannot reshape tensor of length {} to {:?}",
self.len(),
new_dims
)));
}
self.dims = Vec::from(new_dims);
}
Ok(())
}
pub fn move_axis(&mut self, source: usize, destination: usize) -> Result<Self, TensorError> {
assert!(source < self.dims.len());
assert!(destination < self.dims.len());
let mut new_dims = self.dims.clone();
new_dims.remove(source);
new_dims.insert(destination, self.dims[source]); |
let cartesian_coords = new_dims
.iter()
.map(|d| 0..*d)
.multi_cartesian_product()
.collect::<Vec<Vec<usize>>>();
let mut output = Tensor::new(None, &new_dims)?;
for coord in cartesian_coords {
let mut old_coord = vec![0; self.dims.len()];
for (i, c) in coord.iter().enumerate() {
if i == destination {
old_coord[source] = *c;
} else if i == source && source < destination {
old_coord[source + 1] = *c;
} else if i == source && source > destination {
old_coord[source - 1] = *c;
} else if (i < source && source < destination)
|| (i < destination && source > destination)
|| (i > source && source > destination)
|| (i > destination && source < destination)
{
old_coord[i] = *c;
} else if i > source && source < destination {
old_coord[i + 1] = *c;
} else if i > destination && source > destination {
old_coord[i - 1] = *c;
} else {
return Err(TensorError::DimError(
"Unknown condition for moving the axis".to_string(),
));
}
}
let value = self.get(&old_coord);
output.set(&coord, value);
}
Ok(output)
}
pub fn swap_axes(&mut self, source: usize, destination: usize) -> Result<Self, TensorError> {
assert!(source < self.dims.len());
assert!(destination < self.dims.len());
let mut new_dims = self.dims.clone();
new_dims[source] = self.dims[destination];
new_dims[destination] = self.dims[source]; |
let cartesian_coords = new_dims
.iter()
.map(|d| 0..*d)
.multi_cartesian_product()
.collect::<Vec<Vec<usize>>>();
let mut output = Tensor::new(None, &new_dims)?;
for coord in cartesian_coords {
let mut old_coord = vec![0; self.dims.len()];
for (i, c) in coord.iter().enumerate() {
if i == destination {
old_coord[source] = *c;
} else if i == source {
old_coord[destination] = *c;
} else {
old_coord[i] = *c;
}
}
output.set(&coord, self.get(&old_coord));
}
Ok(output)
}
pub fn expand(&self, shape: &[usize]) -> Result<Self, TensorError> {
if self.dims().len() > shape.len() {
return Err(TensorError::DimError(format!(
"Cannot expand {:?} to the smaller shape {:?}",
self.dims(),
shape
)));
}
if shape == self.dims() {
return Ok(self.clone());
}
for d in self.dims() {
if !(shape.contains(d) || *d == 1) {
return Err(TensorError::DimError(format!(
"The current dimension {} must be contained in the new shape {:?} or be 1",
d, shape
)));
}
}
let cartesian_coords = shape
.iter()
.map(|d| 0..*d)
.multi_cartesian_product()
.collect::<Vec<Vec<usize>>>();
let mut output = Tensor::new(None, shape)?;
for coord in cartesian_coords {
let mut new_coord = Vec::with_capacity(self.dims().len());
for (i, c) in coord.iter().enumerate() {
if i < self.dims().len() && self.dims()[i] == 1 {
new_coord.push(0);
} else if i >= self. |
dims().len() {
} else {
new_coord.push(*c);
}
}
output.set(&coord, self.get(&new_coord));
}
Ok(output)
}
pub |
fn flatten(&mut self) {
if !self.dims().is_empty() && (self.dims() != [0]) {
self.dims = Vec::from([self.dims.iter().product::<usize>()]);
}
}
pub fn map<F: FnMut(T) -> G, G: TensorType>(&self, mut f: F) -> Tensor<G> {
let mut t = Tensor::from(self.inner.iter().map(|e| f(e.clone())));
t.reshape(self.dims()).unwrap();
t
}
pub fn enum_map<F: FnMut(usize, T) -> Result<G, E>, G: TensorType, E: Error>(
&self,
mut f: F,
) -> Result<Tensor<G>, E> {
let vec: Result<Vec<G>, E> = self
.inner
.iter()
.enumerate()
.map(|(i, e)| f(i, e.clone()))
.collect();
let mut t: Tensor<G> = Tensor::from(vec?.iter().cloned());
t.reshape(self.dims()).unwrap();
Ok(t)
}
pub fn par_enum_map<
F: Fn(usize, T) -> Result<G, E> + std::marker::Send + std::marker::Sync,
G: TensorType + std::marker::Send + std::marker::Sync,
E: Error + std::marker::Send + std::marker::Sync,
>(
&self,
f: F,
) -> Result<Tensor<G>, E>
where
T: std::marker::Send + std::marker::Sync,
{
let vec: Result<Vec<G>, E> = self
.inner
.par_iter()
.enumerate()
.map(move |(i, e)| f(i, e.clone()))
.collect();
let mut t: Tensor<G> = Tensor::from(vec?.iter().cloned());
t.reshape(self.dims()).unwrap();
Ok(t)
}
pub fn par_enum_map_mut_filtered<
F: Fn(usize) -> Result<T, E> + std::marker::Send + std::marker::Sync,
E: Error + std::marker::Send + std::marker::Sync,
>(
&mut self,
filter_indices: &std::collections::HashSet<&usize>,
f: F,
) -> Result<(), E>
where
T: std::marker::Send + std::marker::Sync,
{
se |
lf.inner
.par_iter_mut()
.enumerate()
.filter(|(i, _)| filter_indices.contains(i))
.for_each(move |(i, e)| *e = f(i).unwrap());
Ok(())
}
}
impl<T: Clone + TensorType> Tensor<Tensor<T>> {
pub fn combine(&self) -> Result<Tensor<T>, TensorError> {
let mut dims = 0;
let mut inner = Vec::new();
for t in self.inner.clone().into_iter() {
dims += t.len();
inner.extend(t.inner);
}
Tensor::new(Some(&inner), &[dims])
}
}
impl<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync> Add for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>;
fn add(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() + r;
});
Ok(lhs)
}
}
impl<T: TensorType + Neg<Output = T> + std::marker::Send + std::marker::Sync> Neg for Tensor<T> {
type Output = Tensor<T>;
fn neg(self) -> Self {
let mut output = self;
output.par_iter_mut().for_each(|x| {
*x = x.clone().neg();
});
output
}
}
impl<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync> Sub for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>; |
fn sub(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() - r;
});
Ok(lhs)
}
}
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Mul for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>;
fn mul(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() * r;
});
Ok(lhs)
}
}
impl<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync> Tensor<T> {
pub fn pow(&self, mut exp: u32) -> Result<Self, TensorError> {
let mut base = self.clone();
let mut acc = base.map(|_| T::one().unwrap());
while exp > 1 {
if (exp & 1) == 1 {
acc = acc.mul(base.clone())?;
}
exp /= 2;
base = base.clone().mul(base)?;
}
acc.mul(base)
}
}
impl<T: TensorType + Div<Output = T> + std::marker::Send + std::marker::Sync> Div for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>; |
fn div(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() / r;
});
Ok(lhs)
}
}
impl<T: TensorType + Rem<Output = T> + std::marker::Send + std::marker::Sync> Rem for Tensor<T> {
type Output = Result<Tensor<T>, TensorError>;
fn rem(self, rhs: Self) -> Self::Output {
let broadcasted_shape = get_broadcasted_shape(self.dims(), rhs.dims()).unwrap();
let mut lhs = self.expand(&broadcasted_shape).unwrap();
let rhs = rhs.expand(&broadcasted_shape).unwrap();
lhs.par_iter_mut().zip(rhs).for_each(|(o, r)| {
*o = o.clone() % r;
});
Ok(lhs)
}
}
pub fn get_broadcasted_shape(
shape_a: &[usize],
shape_b: &[usize],
) -> Result<Vec<usize>, Box<dyn Error>> {
let num_dims_a = shape_a.len();
let num_dims_b = shape_b.len();
match (num_dims_a, num_dims_b) {
(a, b) if a == b => {
let mut broadcasted_shape = Vec::with_capacity(num_dims_a);
for (dim_a, dim_b) in shape_a.iter().zip(shape_b.iter()) {
let max_dim = dim_a.max(dim_b);
broadcasted_shape.push(*max_dim);
}
Ok(broadcasted_shape)
}
(a, b) if a < b => Ok(shape_b.to_vec()),
(a, b) if a > b => Ok(shape_a.to_vec()),
_ => Err(Box::new(TensorError::DimError(
"Unknown condition for broadcasting".to_string(),
))),
}
}
mod tests {
use super::*; |
fn test_tensor() {
let data: Vec<f32> = vec![-1.0f32, 0.0, 1.0, 2.5];
let tensor = Tensor::<f32>::new(Some(&data), &[2, 2]).unwrap();
assert_eq!(&tensor[..], &data[..]);
} |
fn tensor_clone() {
let x = Tensor::<i32>::new(Some(&[1, 2, 3]), &[3]).unwrap();
assert_eq!(x, x.clone());
} |
fn tensor_eq() {
let a = Tensor::<i32>::new(Some(&[1, 2, 3]), &[3]).unwrap();
let mut b = Tensor::<i32>::new(Some(&[1, 2, 3]), &[3, 1]).unwrap();
b.reshape(&[3]).unwrap();
let c = Tensor::<i32>::new(Some(&[1, 2, 4]), &[3]).unwrap();
let d = Tensor::<i32>::new(Some(&[1, 2, 4]), &[3, 1]).unwrap();
assert_eq!(a, b);
assert_ne!(a, c);
assert_ne!(a, d);
} |
fn tensor_slice() {
let a = Tensor::<i32>::new(Some(&[1, 2, 3, 4, 5, 6]), &[2, 3]).unwrap();
let b = Tensor::<i32>::new(Some(&[1, 4]), &[2, 1]).unwrap();
assert_eq!(a.get_slice(&[0..2, 0..1]).unwrap(), b);
}
} |
use super::TensorError;
use crate::tensor::{Tensor, TensorType};
use itertools::Itertools;
use maybe_rayon::{iter::ParallelIterator, prelude::IntoParallelRefIterator};
pub use std::ops::{Add, Mul, Neg, Sub};
pub fn trilu<T: TensorType + std::marker::Send + std::marker::Sync>(
a: &Tensor<T>,
k: i32,
upper: bool,
) -> Result<Tensor<T>, TensorError> {
let mut output = a.clone();
let batch_dims = &a.dims()[0..a.dims().len() - 2];
let batch_cartiesian = batch_dims.iter().map(|d| 0..*d).multi_cartesian_product();
for b in batch_cartiesian {
for i in 0..a.dims()[1] {
for j in 0..a.dims()[2] {
let mut coord = b.clone();
coord.push(i);
coord.push(j);
if upper {
if (j as i32) < (i as i32) + k {
output.set(&coord, T::zero().ok_or(TensorError::Unsupported)?);
}
} else {
if (j as i32) > (i as i32) + k {
output.set(&coord, T::zero().ok_or(TensorError::Unsupported)?);
}
}
}
}
}
Ok(output)
}
pub fn resize<T: TensorType + Send + Sync>(
a: &Tensor<T>,
scales: &[usize],
) -> Result<Tensor<T>, TensorError> {
let mut new_shape = vec![];
for (s, d) in scales.iter().zip(a.dims()) {
new_shape.push(s * d);
}
let mut output = Tensor::new(None, &new_shape)?;
let cartesian_coord: Vec<Vec<usize>> = new_shape
.iter()
.map(|d| (0..*d))
.multi_cartesian_product()
.collect();
output = output.par_enum_map(|i, _| {
let mut coord = vec![];
for (j, (c, _d)) in cartesian_coord[i].iter().zip(new_shape.iter()).enumerate() { |
let scale = scales[j];
let fragment = c / scale;
coord.push(fragment);
}
Ok::<_, TensorError>(a.get(&coord))
})?;
Ok(output)
}
pub fn add<T: TensorType + Add<Output = T> + std::marker::Send + std::marker::Sync>(
t: &[Tensor<T>],
) -> Result<Tensor<T>, TensorError> {
let mut output: Tensor<T> = t[0].clone();
for e in t[1..].iter() {
output = output.add(e.clone())?;
}
Ok(output)
}
pub fn sub<T: TensorType + Sub<Output = T> + std::marker::Send + std::marker::Sync>(
t: &[Tensor<T>],
) -> Result<Tensor<T>, TensorError> {
let mut output: Tensor<T> = t[0].clone();
for e in t[1..].iter() {
output = (output - e.clone())?;
}
Ok(output)
}
pub fn mult<T: TensorType + Mul<Output = T> + std::marker::Send + std::marker::Sync>(
t: &[Tensor<T>],
) -> Result<Tensor<T>, TensorError> {
let mut output: Tensor<T> = t[0].clone();
for e in t[1..].iter() {
output = (output * e.clone())?;
}
Ok(output)
}
pub fn downsample<T: TensorType + Send + Sync>(
input: &Tensor<T>,
dim: usize,
stride: usize,
modulo: usize,
) -> Result<Tensor<T>, TensorError> {
let mut output_shape = input.dims().to_vec();
let remainder = (input.dims()[dim] - modulo) % stride;
let div = (input.dims()[dim] - modulo) / stride;
output_shape[dim] = div + (remainder > 0) as usize;
let mut output = Tensor::<T>::new(None, &output_shape)?;
if modulo > input.dims()[dim] {
return Err(TensorError::DimMismatch("downsample".to_string()));
}
let indices = (0..output_shape.len())
.map(|i| {
if i == dim {
let mut index = vec![0; output_shape[i]];
for (i, idx) in index.iter_mut().enumerate() {
*idx = i * stride + modulo;
}
index |
} else {
(0..output_shape[i]).collect_vec()
}
})
.multi_cartesian_product()
.collect::<Vec<_>>();
output = output.par_enum_map(|i, _: T| {
let coord = indices[i].clone();
Ok(input.get(&coord))
})?;
Ok(output)
}
pub fn gather<T: TensorType + Send + Sync>(
input: &Tensor<T>,
index: &Tensor<usize>,
dim: usize,
) -> Result<Tensor<T>, TensorError> {
let mut index_clone = index.clone();
index_clone.flatten();
if index_clone.is_singleton() {
index_clone.reshape(&[1])?;
}
let mut output_size = input.dims().to_vec();
output_size[dim] = index_clone.dims()[0];
let mut output = Tensor::new(None, &output_size)?;
let cartesian_coord = output_size
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
output = output.par_enum_map(|i, _: T| {
let coord = cartesian_coord[i].clone();
let index_val = index_clone.get(&[coord[dim]]);
let new_coord = coord
.iter()
.enumerate()
.map(|(i, x)| if i == dim { index_val } else { *x })
.collect::<Vec<_>>();
Ok(input.get(&new_coord))
})?;
if index.is_singleton() {
output_size.remove(dim);
}
output.reshape(&output_size)?;
Ok(output)
}
pub fn scatter<T: TensorType + Send + Sync>(
input: &Tensor<T>,
index: &Tensor<usize>,
src: &Tensor<T>,
dim: usize,
) -> Result<Tensor<T>, TensorError> {
assert_eq!(index.dims(), src.dims());
let src_size = src.dims().to_vec();
let mut output = input.clone();
let cartesian_coord = src_size
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
cartesian_coord.iter().for_each(|coord| {
let mut new_coord = coord.clone();
let index_val = index.get(coord); |
new_coord[dim] = index_val;
let val = src.get(coord);
output.set(&new_coord, val);
});
Ok(output)
}
pub fn gather_elements<T: TensorType + Send + Sync>(
input: &Tensor<T>,
index: &Tensor<usize>,
dim: usize,
) -> Result<Tensor<T>, TensorError> {
let output_size = index.dims().to_vec();
assert_eq!(input.dims().len(), index.dims().len());
let mut output = Tensor::new(None, &output_size)?;
let cartesian_coord = output_size
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
output = output.par_enum_map(|i, _: T| {
let coord = cartesian_coord[i].clone();
let index_val = index.get(&coord);
let mut new_coord = coord.clone();
new_coord[dim] = index_val;
let val = input.get(&new_coord);
Ok(val)
})?;
output.reshape(&output_size)?;
Ok(output)
}
pub fn gather_nd<T: TensorType + Send + Sync>(
input: &Tensor<T>,
index: &Tensor<usize>,
batch_dims: usize,
) -> Result<Tensor<T>, TensorError> {
let index_dims = index.dims().to_vec();
let input_dims = input.dims().to_vec();
let last_value = index_dims
.last()
.ok_or(TensorError::DimMismatch("gather_nd".to_string()))?;
if last_value > &(input_dims.len() - batch_dims) {
return Err(TensorError::DimMismatch("gather_nd".to_string()));
}
let output_size =
{
let output_rank = input_dims.len() + index_dims.len() - 1 - batch_dims - last_value;
let mut dims = index_dims[..index_dims.len() - 1].to_vec();
let input_offset = batch_dims + last_value;
dims.extend(input_dims[input_offset..input_dims.len()].to_vec());
assert_eq!(output_rank, dims.len());
dims
};
let mut batch_cartesian_coord = input_dims[0..batch_dims |
]
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
if batch_cartesian_coord.is_empty() {
batch_cartesian_coord.push(vec![]);
}
let outputs = batch_cartesian_coord
.par_iter()
.map(|batch_coord| {
let batch_slice = batch_coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
let mut index_slice = index.get_slice(&batch_slice)?;
index_slice.reshape(&index.dims()[batch_dims..])?;
let mut input_slice = input.get_slice(&batch_slice)?;
input_slice.reshape(&input.dims()[batch_dims..])?;
let mut inner_cartesian_coord = index_slice.dims()[0..index_slice.dims().len() - 1]
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
if inner_cartesian_coord.is_empty() {
inner_cartesian_coord.push(vec![]);
}
let output = inner_cartesian_coord
.iter()
.map(|coord| {
let slice = coord
.iter()
.map(|x| *x..*x + 1)
.chain(batch_coord.iter().map(|x| *x..*x + 1))
.collect::<Vec<_>>();
let index_slice = index_slice
.get_slice(&slice)
.unwrap()
.iter()
.map(|x| *x..*x + 1)
.collect::<Vec<_>>();
input_slice.get_slice(&index_slice).unwrap()
})
.collect::<Tensor<_>>();
output.combine()
})
.collect::<Result<Vec<_>, _>>()?;
let mut outputs = outputs.into_iter().flatten().collect::<Tensor<_>>();
outputs.reshape(&output_size)?;
Ok(outputs)
}
pub fn scatter_nd<T: TensorType + Sen |
d + Sync>(
input: &Tensor<T>,
index: &Tensor<usize>,
src: &Tensor<T>,
) -> Result<Tensor<T>, TensorError> {
let index_dims = index.dims().to_vec();
let input_dims = input.dims().to_vec();
let last_value = index_dims
.last()
.ok_or(TensorError::DimMismatch("scatter_nd".to_string()))?;
if last_value > &input_dims.len() {
return Err(TensorError::DimMismatch("scatter_nd".to_string()));
}
let mut output = input.clone();
let cartesian_coord = index_dims[0..index_dims.len() - 1]
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
cartesian_coord
.iter()
.map(|coord| {
let slice = coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
let index_val = index.get_slice(&slice)?;
let index_slice = index_val.iter().map(|x| *x..*x + 1).collect::<Vec<_>>();
let src_val = src.get_slice(&slice)?;
output.set_slice(&index_slice, &src_val)?;
Ok(())
})
.collect::<Result<Vec<_>, _>>()?;
Ok(output)
}
pub fn abs<T: TensorType + Add<Output = T> + std::cmp::Ord + Neg<Output = T>>(
a: &Tensor<T>,
) -> Result<Tensor<T>, TensorError> {
let mut output: Tensor<T> = a.clone();
output.iter_mut().for_each(|a_i| {
if *a_i < T::zero().unwrap() {
*a_i = -a_i.clone();
}
});
Ok(output)
}
pub fn intercalate_values<T: TensorType>(
tensor: &Tensor<T>,
value: T,
stride: usize,
axis: usize,
) -> Result<Tensor<T>, TensorError> {
if stride == 1 {
return Ok(tensor.clone());
}
let mut output_dims = tensor.dims().to_vec();
output_dims[axis] = output_dims[axis] * stride - 1;
let mut output: Tensor<T> = Tensor::new(None, &output_dims)?;
let cartesian_coord = output
.dims()
.iter()
.map(|d| (0..*d))
.multi_cartesian_product()
.collect::<Vec<_>>();
let |
mut tensor_slice_iter = tensor.iter();
output.iter_mut().enumerate().for_each(|(i, o)| {
let coord = &cartesian_coord[i];
if coord[axis] % stride == 0 {
*o = tensor_slice_iter.next().unwrap().clone();
} else {
*o = value.clone();
}
});
Ok(output)
}
pub fn one_hot(
tensor: &Tensor<i128>,
num_classes: usize,
axis: usize,
) -> Result<Tensor<i128>, TensorError> {
let mut output_dims = tensor.dims().to_vec();
output_dims.insert(axis, num_classes);
let mut output: Tensor<i128> = Tensor::new(None, &output_dims)?;
let cartesian_coord = output
.dims()
.iter()
.map(|d| (0..*d))
.multi_cartesian_product()
.collect::<Vec<_>>();
output
.iter_mut()
.enumerate()
.map(|(i, o)| {
let coord = &cartesian_coord[i];
let coord_axis = coord[axis];
let mut coord_without_axis = coord.clone();
coord_without_axis.remove(axis);
let elem = tensor.get(&coord_without_axis) as usize;
if elem > num_classes {
return Err(TensorError::DimMismatch(format!(
"Expected element to be less than num_classes, but got {}",
elem
)));
};
if coord_axis == elem {
*o = 1;
} else {
*o = 0;
}
Ok(())
})
.collect::<Result<Vec<()>, TensorError>>()?;
Ok(output)
}
pub fn pad<T: TensorType>(
image: &Tensor<T>,
padding: Vec<(usize, usize)>,
offset: usize,
) -> Result<Tensor<T>, TensorError> {
let padded_dims = image.dims()[offset..]
.iter()
.enumerate()
.map(|(i, d)| d + padding[i].0 + padding[i].1)
.collect::<Vec<_>>();
let mut output_dims = image.dims()[..offset].to_vec();
output_dims.extend(padded_dims);
let mut output = Tensor::<T>::new(None, &out |
put_dims).unwrap();
let cartesian_coord = image
.dims()
.iter()
.map(|d| (0..*d))
.multi_cartesian_product()
.collect::<Vec<_>>();
for coord in cartesian_coord {
let rest = &coord[offset..];
let mut padded_res = coord[..offset].to_vec();
padded_res.extend(rest.iter().zip(padding.iter()).map(|(c, p)| c + p.0));
let image_val = image.get(&coord);
output.set(&padded_res, image_val);
}
output.reshape(&output_dims)?;
Ok(output)
}
pub fn concat<T: TensorType + Send + Sync>(
inputs: &[&Tensor<T>],
axis: usize,
) -> Result<Tensor<T>, TensorError> {
if inputs.len() == 1 {
return Ok(inputs[0].clone());
}
let mut output_size = inputs[0].dims().to_vec();
output_size[axis] = inputs.iter().map(|x| x.dims()[axis]).sum();
let mut output = Tensor::new(None, &output_size)?;
let cartesian_coord = output_size
.iter()
.map(|x| 0..*x)
.multi_cartesian_product()
.collect::<Vec<_>>();
let get_input_index = |index_along_axis: usize| -> (usize, usize) {
let mut current_idx = 0;
let mut input_idx = 0;
let mut input_coord_at_idx = 0;
for (i, elem) in inputs.iter().enumerate() {
current_idx += elem.dims()[axis];
if index_along_axis < current_idx {
input_idx = i;
input_coord_at_idx = index_along_axis - (current_idx - elem.dims()[axis]);
break;
}
}
(input_idx, input_coord_at_idx)
};
output = output.par_enum_map(|i, _: T| {
let coord = cartesian_coord[i].clone();
let mut index = 0;
let mut input_index = 0;
let mut input_coord = coord.clone();
for (j, x) in coord.iter().enumerate() {
if j == axis {
(input_index, input_coord[j]) = get_input_index(*x);
break;
} |
index += x;
}
Ok(inputs[input_index].get(&input_coord))
})?;
output.reshape(&output_size)?;
Ok(output)
}
pub fn slice<T: TensorType + Send + Sync>(
t: &Tensor<T>,
axis: &usize,
start: &usize,
end: &usize,
) -> Result<Tensor<T>, TensorError> {
let mut slice = vec![];
for (i, d) in t.dims().iter().enumerate() {
if i != *axis {
slice.push(0..*d)
} else {
slice.push(*start..*end)
}
}
t.get_slice(&slice)
}
pub mod nonlinearities {
use super::*;
pub fn ceil(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale;
let rounded = kix.ceil() * scale;
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn floor(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale;
let rounded = kix.floor() * scale;
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn round(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale;
let rounded = kix.round() * scale;
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn round_half_to_even(a: &Tensor<i128>, scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale;
let rounded = kix.round_ties_even() * scale;
Ok::<_, TensorError>(rounded as i128)
})
.unwra |
p()
}
pub fn pow(a: &Tensor<i128>, scale_input: f64, power: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let kix = scale_input * (kix).powf(power);
let rounded = kix.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn kronecker_delta<T: TensorType + std::cmp::PartialEq + Send + Sync>(
a: &Tensor<T>,
) -> Tensor<T> {
a.par_enum_map(|_, a_i| {
if a_i == T::zero().unwrap() {
Ok::<_, TensorError>(T::one().unwrap())
} else {
Ok::<_, TensorError>(T::zero().unwrap())
}
})
.unwrap()
}
pub fn sigmoid(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input / (1.0 + (-kix).exp());
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn hardswish(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let res = if kix <= -3.0 {
0.0
} else if kix >= 3.0 {
kix
} else {
kix * (kix + 3.0) / 6.0
};
let rounded = (res * scale_input).round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
} |
pub fn exp(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.exp();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn ln(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.ln();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn sign(a: &Tensor<i128>) -> Tensor<i128> {
a.par_enum_map(|_, a_i| Ok::<_, TensorError>(a_i.signum()))
.unwrap()
}
pub fn sqrt(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.sqrt();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn rsqrt(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input / (kix.sqrt() + f64::EPSILON);
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
} |
pub fn cos(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.cos();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn acos(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.acos();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn cosh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.cosh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn acosh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.acosh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn sin(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.sin();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
} |
pub fn asin(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.asin();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn sinh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.sinh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn asinh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.asinh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn tan(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.tan();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn atan(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.atan();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
} |
pub fn tanh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.tanh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn atanh(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * kix.atanh();
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn erffunc(a: &Tensor<i128>, scale_input: f64) -> Tensor<i128> {
const NCOEF: usize = 28;
const COF: [f64; 28] = [
-1.3026537197817094,
6.419_697_923_564_902e-1,
1.9476473204185836e-2,
-9.561_514_786_808_63e-3,
-9.46595344482036e-4,
3.66839497852761e-4,
4.2523324806907e-5,
-2.0278578112534e-5,
-1.624290004647e-6,
1.303655835580e-6,
1.5626441722e-8,
-8.5238095915e-8,
6.529054439e-9,
5.059343495e-9,
-9.91364156e-10,
-2.27365122e-10,
9.6467911e-11,
2.394038e-12,
-6.886027e-12,
8.94487e-13,
3.13092e-13,
-1.12708e-13,
3.81e-16,
7.106e-15,
-1.523e-15,
-9.4e-17,
1.21e-16,
-2.8e-17,
];
fn erfccheb(z: f64) -> f64 {
let mut d = 0f64;
let mut dd = 0f64;
assert!(z >= 0f64, "erfccheb requires nonnegati |
ve argument");
let t = 2f64 / (2f64 + z);
let ty = 4f64 * t - 2f64;
for j in (1..NCOEF - 1).rev() {
let tmp = d;
d = ty * d - dd + COF[j];
dd = tmp;
}
t * (-z.powi(2) + 0.5 * (COF[0] + ty * d) - dd).exp()
}
pub fn erf(x: f64) -> f64 {
if x >= 0f64 {
1.0 - erfccheb(x)
} else {
erfccheb(-x) - 1f64
}
}
a.par_enum_map(|_, a_i| {
let kix = (a_i as f64) / scale_input;
let fout = scale_input * erf(kix);
let rounded = fout.round();
Ok::<_, TensorError>(rounded as i128)
})
.unwrap()
}
pub fn leakyrelu(a: &Tensor<i128>, slope: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let rounded = if a_i < 0 {
let d_inv_x = (slope) * (a_i as f64);
d_inv_x.round() as i128
} else {
let d_inv_x = a_i as f64;
d_inv_x.round() as i128
};
Ok::<_, TensorError>(rounded)
})
.unwrap()
}
pub fn max(a: &Tensor<i128>, scale_input: f64, threshold: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let d_inv_x = (a_i as f64) / scale_input;
let rounded = if d_inv_x <= threshold {
(threshold * scale_input).round() as i128
} else {
(d_inv_x * scale_input).round() as i128
};
Ok::<_, TensorError>(rounded)
})
.unwrap()
}
pub fn min(a: &Tensor<i128>, scale_input: f64, threshold: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let |
d_inv_x = (a_i as f64) / scale_input;
let rounded = if d_inv_x >= threshold {
(threshold * scale_input).round() as i128
} else {
(d_inv_x * scale_input).round() as i128
};
Ok::<_, TensorError>(rounded)
})
.unwrap()
}
pub fn const_div(a: &Tensor<i128>, denom: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let d_inv_x = (a_i as f64) / (denom);
Ok::<_, TensorError>(d_inv_x.round() as i128)
})
.unwrap()
}
pub fn recip(a: &Tensor<i128>, input_scale: f64, out_scale: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| {
let rescaled = (a_i as f64) / input_scale;
let denom = (1_f64) / (rescaled + f64::EPSILON);
let d_inv_x = out_scale * denom;
Ok::<_, TensorError>(d_inv_x.round() as i128)
})
.unwrap()
}
pub fn zero_recip(out_scale: f64) -> Tensor<i128> {
let a = Tensor::<i128>::new(Some(&[0]), &[1]).unwrap();
a.par_enum_map(|_, a_i| {
let rescaled = a_i as f64;
let denom = (1_f64) / (rescaled + f64::EPSILON);
let d_inv_x = out_scale * denom;
Ok::<_, TensorError>(d_inv_x.round() as i128)
})
.unwrap()
}
pub fn greater_than(a: &Tensor<i128>, b: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| Ok::<_, TensorError>(i128::from((a_i as f64 - b) > 0_f64)))
.unwrap()
}
pub fn greater_than_equal(a: &Tensor<i128>, b: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i |
| Ok::<_, TensorError>(i128::from((a_i as f64 - b) >= 0_f64)))
.unwrap()
}
pub fn less_than(a: &Tensor<i128>, b: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| Ok::<_, TensorError>(i128::from((a_i as f64 - b) < 0_f64)))
.unwrap()
}
pub fn less_than_equal(a: &Tensor<i128>, b: f64) -> Tensor<i128> {
a.par_enum_map(|_, a_i| Ok::<_, TensorError>(i128::from((a_i as f64 - b) <= 0_f64)))
.unwrap()
}
}
pub mod accumulated {
use super::*;
pub fn dot<T: TensorType + Mul<Output = T> + Add<Output = T>>(
inputs: &[Tensor<T>; 2],
chunk_size: usize,
) -> Result<Tensor<T>, TensorError> {
if inputs[0].clone().len() != inputs[1].clone().len() {
return Err(TensorError::DimMismatch("dot".to_string()));
}
let (a, b): (Tensor<T>, Tensor<T>) = (inputs[0].clone(), inputs[1].clone());
let transcript: Tensor<T> = a
.iter()
.zip(b)
.chunks(chunk_size)
.into_iter()
.scan(T::zero().unwrap(), |acc, chunk| {
let k = chunk.fold(T::zero().unwrap(), |acc, (a_i, b_i)| {
acc.clone() + a_i.clone() * b_i.clone()
});
*acc = acc.clone() + k.clone();
Some(acc.clone())
})
.collect();
Ok(transcript)
}
pub fn sum<T: TensorType + Mul<Output = T> + Add<Output = T>>(
a: &Tensor<T>,
chunk_size: usize,
) -> Result<Tensor<T>, TensorError> {
let transcript: Tensor<T> = a
.iter()
.chunks(chu |
nk_size)
.into_iter()
.scan(T::zero().unwrap(), |acc, chunk| {
let k = chunk.fold(T::zero().unwrap(), |acc, a_i| acc.clone() + a_i.clone());
*acc = acc.clone() + k.clone();
Some(acc.clone())
})
.collect();
Ok(transcript)
}
pub fn prod<T: TensorType + Mul<Output = T> + Add<Output = T>>(
a: &Tensor<T>,
chunk_size: usize,
) -> Result<Tensor<T>, TensorError> {
let transcript: Tensor<T> = a
.iter()
.chunks(chunk_size)
.into_iter()
.scan(T::one().unwrap(), |acc, chunk| {
let k = chunk.fold(T::one().unwrap(), |acc, a_i| acc.clone() * a_i.clone());
*acc = acc.clone() * k.clone();
Some(acc.clone())
})
.collect();
Ok(transcript)
}
} |
use core::{iter::FilterMap, slice::Iter};
use crate::circuit::region::ConstantsMap;
use super::{
ops::{intercalate_values, pad, resize},
*,
};
use halo2_proofs::{arithmetic::Field, circuit::Cell, plonk::Instance};
pub(crate) fn create_constant_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
val: F,
len: usize,
) -> ValTensor<F> {
let mut constant = Tensor::from(vec![ValType::Constant(val); len].into_iter());
constant.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(constant)
}
pub(crate) fn create_unit_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
len: usize,
) -> ValTensor<F> {
let mut unit = Tensor::from(vec![ValType::Constant(F::ONE); len].into_iter());
unit.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(unit)
}
pub(crate) fn create_zero_tensor<
F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd,
>(
len: usize,
) -> ValTensor<F> {
let mut zero = Tensor::from(vec![ValType::Constant(F::ZERO); len].into_iter());
zero.set_visibility(&crate::graph::Visibility::Fixed);
ValTensor::from(zero)
}
pub enum ValType<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> {
Value(Value<F>),
AssignedValue(Value<Assigned<F>>),
PrevAssigned(AssignedCell<F, F>),
Constant(F),
AssignedConstant(AssignedCell<F, F>, F),
}
impl<F: PrimeField + TensorType + std::marker::Send + std::marker::Sync + PartialOrd> ValType<F> {
pub fn cell(&self) -> Option<Cell> {
match self {
ValType::PrevAssigned(cell) => Some(cell.cell()),
ValType::AssignedConstant(cell, _) => Some(cell.cell()),
_ => None,
}
}
pub fn assigned_cell(&self) -> Option<AssignedCell<F, F>> {
match self {
ValType::PrevAssigned(cell) => Some(cell.clone()),
ValType::AssignedC |
onstant(cell, _) => Some(cell.clone()),
_ => None,
}
}
pub fn is_prev_assigned(&self) -> bool {
matches!(
self,
ValType::PrevAssigned(_) | ValType::AssignedConstant(..)
)
}
pub fn is_constant(&self) -> bool {
matches!(self, ValType::Constant(_) | ValType::AssignedConstant(..))
}
pub fn get_felt_eval(&self) -> Option<F> {
let mut res = None;
match self {
ValType::Value(v) => {
v.map(|f| {
res = Some(f);
});
}
ValType::AssignedValue(v) => {
v.map(|f| {
res = Some(f.evaluate());
});
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
v.value_field().map(|f| {
res = Some(f.evaluate());
});
}
ValType::Constant(v) => {
res = Some(*v);
}
}
res
}
pub fn get_prev_assigned(&self) -> Option<AssignedCell<F, F>> {
match self {
ValType::PrevAssigned(v) => Some(v.clone()),
ValType::AssignedConstant(v, _) => Some(v.clone()),
_ => None,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<ValType<F>> for i32 {
fn from(val: ValType<F>) -> Self {
match val {
ValType::Value(v) => {
let mut output = 0_i32;
let mut i = 0;
v.map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
});
output
}
ValType::AssignedValue(v) => {
let mut output = 0_i32;
let mut i = 0;
v.evaluate().map(|y| {
let e = felt_to_i32(y);
output = e;
i += 1;
}); |
output
}
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
let mut output = 0_i32;
let mut i = 0;
v.value().map(|y| {
let e = felt_to_i32(*y);
output = e;
i += 1;
});
output
}
ValType::Constant(v) => felt_to_i32(v),
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<F> for ValType<F> {
fn from(t: F) -> ValType<F> {
ValType::Constant(t)
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Value<F>> for ValType<F> {
fn from(t: Value<F>) -> ValType<F> {
ValType::Value(t)
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Value<Assigned<F>>> for ValType<F> {
fn from(t: Value<Assigned<F>>) -> ValType<F> {
ValType::AssignedValue(t)
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<AssignedCell<F, F>> for ValType<F> {
fn from(t: AssignedCell<F, F>) -> ValType<F> {
ValType::PrevAssigned(t)
}
}
impl<F: PrimeField + TensorType + PartialOrd> TensorType for ValType<F>
where
F: Field,
{
fn zero() -> Option<Self> {
Some(ValType::Constant(<F as Field>::ZERO))
}
fn one() -> Option<Self> {
Some(ValType::Constant(<F as Field>::ONE))
}
}
pub enum ValTensor<F: PrimeField + TensorType + PartialOrd> {
Value {
inner: Tensor<ValType<F>>,
dims: Vec<usize>,
scale: crate::Scale,
},
Instance {
inner: Column<Instance>,
dims: Vec<Vec<usize>>,
idx: usize,
initial_offset: usize,
scale: crate::Scale,
},
}
impl<F: PrimeField + TensorType + PartialOrd> TensorType for ValTensor<F> {
fn zero() -> Option<Self> {
Some(ValTensor::Value {
inner: Tensor::zero()?,
dims: vec![],
scale: 0,
})
}
} |
impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<ValType<F>>> for ValTensor<F> {
fn from(t: Tensor<ValType<F>>) -> ValTensor<F> {
ValTensor::Value {
inner: t.map(|x| x),
dims: t.dims().to_vec(),
scale: 1,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Vec<ValType<F>>> for ValTensor<F> {
fn from(t: Vec<ValType<F>>) -> ValTensor<F> {
ValTensor::Value {
inner: t.clone().into_iter().into(),
dims: vec![t.len()],
scale: 1,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> TryFrom<Tensor<F>> for ValTensor<F> {
type Error = Box<dyn Error>;
fn try_from(t: Tensor<F>) -> Result<ValTensor<F>, Box<dyn Error>> {
let visibility = t.visibility.clone();
let dims = t.dims().to_vec();
let inner = t.into_iter().map(|x| {
if let Some(vis) = &visibility {
match vis {
Visibility::Fixed => Ok(ValType::Constant(x)),
_ => {
Ok(Value::known(x).into())
}
}
}
else {
Err("visibility should be set to convert a tensor of field elements to a ValTensor.".into())
}
}).collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let mut inner: Tensor<ValType<F>> = inner.into_iter().into();
inner.reshape(&dims)?;
Ok(ValTensor::Value {
inner,
dims,
scale: 1,
})
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<Value<F>>> for ValTensor<F> {
fn from(t: Tensor<Value<F>>) -> ValTensor<F> {
ValTensor::Value {
inner: t.map(|x| x.into()),
dims: t.dims().to_vec(),
scale: 1,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<Value<Assigned<F>>>> for ValTensor<F> {
fn from(t: Tensor<Value<Assigned<F>>>) -> ValTensor<F> {
ValTensor::Value { |
inner: t.map(|x| x.into()),
dims: t.dims().to_vec(),
scale: 1,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> From<Tensor<AssignedCell<F, F>>> for ValTensor<F> {
fn from(t: Tensor<AssignedCell<F, F>>) -> ValTensor<F> {
ValTensor::Value {
inner: t.map(|x| x.into()),
dims: t.dims().to_vec(),
scale: 1,
}
}
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ValTensor<F> {
pub fn from_i128_tensor(t: Tensor<i128>) -> ValTensor<F> {
let inner = t.map(|x| ValType::Value(Value::known(i128_to_felt(x))));
inner.into()
}
pub fn new_instance(
cs: &mut ConstraintSystem<F>,
dims: Vec<Vec<usize>>,
scale: crate::Scale,
) -> Self {
let col = cs.instance_column();
cs.enable_equality(col);
ValTensor::Instance {
inner: col,
dims,
initial_offset: 0,
idx: 0,
scale,
}
}
pub fn new_instance_from_col(
dims: Vec<Vec<usize>>,
scale: crate::Scale,
col: Column<Instance>,
) -> Self {
ValTensor::Instance {
inner: col,
dims,
idx: 0,
initial_offset: 0,
scale,
}
}
pub fn get_total_instance_len(&self) -> usize {
match self {
ValTensor::Instance { dims, .. } => dims
.iter()
.map(|x| {
if !x.is_empty() {
x.iter().product::<usize>()
} else {
0
}
})
.sum(),
_ => 0,
}
}
pub fn is_instance(&self) -> bool {
matches!(self, ValTensor::Instance { .. })
}
pub fn reverse(&mut self) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value { inner: v, .. } => {
v.reverse() |
;
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
pub |
fn set_initial_instance_offset(&mut self, offset: usize) {
if let ValTensor::Instance { initial_offset, .. } = self {
*initial_offset = offset;
}
}
pub |
fn increment_idx(&mut self) {
if let ValTensor::Instance { idx, .. } = self {
*idx += 1;
}
}
pub |
fn set_idx(&mut self, val: usize) {
if let ValTensor::Instance { idx, .. } = self {
*idx = val;
}
}
pub fn get_idx(&self) -> usize {
match self {
ValTensor::Instance { idx, .. } => *idx,
_ => 0,
}
}
pub fn any_unknowns(&self) -> Result<bool, Box<dyn Error>> {
match self {
ValTensor::Instance { .. } => Ok(true),
_ => Ok(self.get_inner()?.iter().any(|&x| {
let mut is_empty = true;
x.map(|_| is_empty = false);
is_empty
})),
}
}
pub fn all_prev_assigned(&self) -> bool {
match self {
ValTensor::Value { inner, .. } => inner.iter().all(|x| x.is_prev_assigned()),
ValTensor::Instance { .. } => false,
}
}
pub |
fn set_scale(&mut self, scale: crate::Scale) {
match self {
ValTensor::Value { scale: s, .. } => *s = scale,
ValTensor::Instance { scale: s, .. } => *s = scale,
}
}
pub fn scale(&self) -> crate::Scale {
match self {
ValTensor::Value { scale, .. } => *scale,
ValTensor::Instance { scale, .. } => *scale,
}
}
pub fn create_constants_map_iterator(
&self,
) -> FilterMap<Iter<'_, ValType<F>>, fn(&ValType<F>) -> Option<(F, ValType<F>)>> {
match self {
ValTensor::Value { inner, .. } => inner.iter().filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
None
}
}),
ValTensor::Instance { .. } => {
unreachable!("Instance tensors do not have constants")
}
}
}
pub fn create_constants_map(&self) -> ConstantsMap<F> {
match self {
ValTensor::Value { inner, .. } => inner
.par_iter()
.filter_map(|x| {
if let ValType::Constant(v) = x {
Some((*v, x.clone()))
} else {
None
}
})
.collect(),
ValTensor::Instance { .. } => ConstantsMap::new(),
}
}
pub fn get_felt_evals(&self) -> Result<Tensor<F>, Box<dyn Error>> {
let mut felt_evals: Vec<F> = vec![];
match self {
ValTensor::Value {
inner: v, dims: _, ..
} => {
let _ = v.map(|vaf| {
if let Some(f) = vaf.get_felt_eval() {
felt_evals.push(f);
}
});
}
_ => return Err(Box::new(TensorError::WrongMethod)),
};
let mut res: Tensor<F> = felt_evals.i |
nto_iter().into();
res.reshape(self.dims())?;
Ok(res)
}
pub fn is_singleton(&self) -> bool {
match self {
ValTensor::Value { inner, .. } => inner.is_singleton(),
ValTensor::Instance { .. } => false,
}
}
pub fn get_int_evals(&self) -> Result<Tensor<i128>, Box<dyn Error>> {
let mut integer_evals: Vec<i128> = vec![];
match self {
ValTensor::Value {
inner: v, dims: _, ..
} => {
let _ = v.map(|vaf| match vaf {
ValType::Value(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i128(f));
}),
ValType::AssignedValue(v) => v.map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate()));
}),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
v.value_field().map(|f| {
integer_evals.push(crate::fieldutils::felt_to_i128(f.evaluate()));
})
}
ValType::Constant(v) => {
integer_evals.push(crate::fieldutils::felt_to_i128(v));
Value::unknown()
}
});
}
_ => return Err(Box::new(TensorError::WrongMethod)),
};
let mut tensor: Tensor<i128> = integer_evals.into_iter().into();
match tensor.reshape(self.dims()) {
_ => {}
};
Ok(tensor)
}
pub fn pad_to_zero_rem(&mut self, n: usize, pad: ValType<F>) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.pad_to_zero_rem(n, pad)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => { |
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
pub fn get_slice(&self, indices: &[Range<usize>]) -> Result<ValTensor<F>, Box<dyn Error>> {
if indices.iter().map(|x| x.end - x.start).collect::<Vec<_>>() == self.dims() {
return Ok(self.clone());
}
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = v.get_slice(indices)?;
let dims = inner.dims().to_vec();
ValTensor::Value {
inner,
dims,
scale: *scale,
}
}
_ => return Err(Box::new(TensorError::WrongMethod)),
};
Ok(slice)
}
pub fn get_single_elem(&self, index: usize) -> Result<ValTensor<F>, Box<dyn Error>> {
let slice = match self {
ValTensor::Value {
inner: v,
dims: _,
scale,
} => {
let inner = Tensor::from(vec![v.get_flat_index(index)].into_iter());
ValTensor::Value {
inner,
dims: vec![1],
scale: *scale,
}
}
_ => return Err(Box::new(TensorError::WrongMethod)),
};
Ok(slice)
}
pub fn get_inner_tensor(&self) -> Result<&Tensor<ValType<F>>, TensorError> {
Ok(match self {
ValTensor::Value { inner: v, .. } => v,
ValTensor::Instance { .. } => return Err(TensorError::WrongMethod),
})
}
pub fn get_inner_tensor_mut(&mut self) -> Result<&mut Tensor<ValType<F>>, TensorError> {
Ok(match self {
ValTensor::Value { inner: v, .. } => v,
ValTensor::Instance { .. } => return Err(TensorError::WrongMethod),
})
}
pub fn get_inner(&self) -> Result<Tensor<Value<F>>, Tenso |
rError> {
Ok(match self {
ValTensor::Value { inner: v, .. } => v.map(|x| match x {
ValType::Value(v) => v,
ValType::AssignedValue(v) => v.evaluate(),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
v.value_field().evaluate()
}
ValType::Constant(v) => Value::known(v),
}),
ValTensor::Instance { .. } => return Err(TensorError::WrongMethod),
})
}
pub fn expand(&mut self, dims: &[usize]) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.expand(dims)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
pub fn move_axis(&mut self, source: usize, destination: usize) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.move_axis(source, destination)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
pub fn reshape(&mut self, new_dims: &[usize]) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
v.reshape(new_dims)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { dims: d, idx, .. } => {
if d[*idx].iter().product::<usize>() != new_dims.iter().product::<usize>() {
return Err(Box::new(TensorError::DimError(format!(
"Cannot reshape {:?} to {:?} as they have number of elements", |
d[*idx], new_dims
))));
}
d[*idx] = new_dims.to_vec();
}
};
Ok(())
}
pub fn slice(
&mut self,
axis: &usize,
start: &usize,
end: &usize,
) -> Result<(), Box<dyn Error>> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = crate::tensor::ops::slice(v, axis, start, end)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(())
}
pub |
fn flatten(&mut self) {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
v.flatten();
*d = v.dims().to_vec();
}
ValTensor::Instance { dims: d, idx, .. } => {
d[*idx] = vec![d[*idx].iter().product()];
}
}
}
pub fn duplicate_every_n(
&mut self,
n: usize,
num_repeats: usize,
initial_offset: usize,
) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.duplicate_every_n(n, num_repeats, initial_offset)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
pub fn get_const_zero_indices(&self) -> Result<Vec<usize>, TensorError> {
match self {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(r) = e {
if *r == F::ZERO {
indices.push(i);
}
} else if let ValType::AssignedConstant(_, r) = e {
if *r == F::ZERO {
indices.push(i);
}
}
}
Ok(indices)
}
ValTensor::Instance { .. } => Ok(vec![]),
}
}
pub fn get_const_indices(&self) -> Result<Vec<usize>, TensorError> {
match self {
ValTensor::Value { inner: v, .. } => {
let mut indices = vec![];
for (i, e) in v.iter().enumerate() {
if let ValType::Constant(_) = e {
indices.push(i);
} else if let ValTyp |
e::AssignedConstant(_, _) = e {
indices.push(i);
}
}
Ok(indices)
}
ValTensor::Instance { .. } => Ok(vec![]),
}
}
pub fn remove_indices(
&mut self,
indices: &mut [usize],
is_sorted: bool,
) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.remove_indices(indices, is_sorted)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
if indices.is_empty() {
return Ok(());
} else {
return Err(TensorError::WrongMethod);
}
}
}
Ok(())
}
pub fn remove_every_n(
&mut self,
n: usize,
num_repeats: usize,
initial_offset: usize,
) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.remove_every_n(n, num_repeats, initial_offset)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
pub fn intercalate_values(
&mut self,
value: ValType<F>,
stride: usize,
axis: usize,
) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = intercalate_values(v, value, stride, axis)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
pub fn resize(&mut self, scales: &[usize]) -> Result<(), TensorError> {
match self |
{
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = resize(v, scales)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
};
Ok(())
}
pub fn pad(&mut self, padding: Vec<(usize, usize)>, offset: usize) -> Result<(), TensorError> {
match self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = pad(v, padding, offset)?;
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(TensorError::WrongMethod);
}
}
Ok(())
}
pub fn len(&self) -> usize {
match self {
ValTensor::Value { dims, .. } => {
if !dims.is_empty() && (dims != &[0]) {
dims.iter().product::<usize>()
} else {
0
}
}
ValTensor::Instance { dims, idx, .. } => {
let dims = dims[*idx].clone();
if !dims.is_empty() && (dims != [0]) {
dims.iter().product::<usize>()
} else {
0
}
}
}
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
pub fn concat(&self, other: Self) -> Result<Self, TensorError> {
let res = match (self, other) {
(ValTensor::Value { inner: v1, .. }, ValTensor::Value { inner: v2, .. }) => {
ValTensor::from(Tensor::new(Some(&[v1.clone(), v2]), &[2])?.combine()?)
}
_ => {
return Err(TensorError::WrongMethod);
}
};
Ok(res)
}
pub fn concat_axis(&self, other: Self, axis: &usize) -> Result<Self, TensorError> {
let res = match (self, other) {
(ValTensor::Value |
{ inner: v1, .. }, ValTensor::Value { inner: v2, .. }) => {
let v = crate::tensor::ops::concat(&[v1, &v2], *axis)?;
ValTensor::from(v)
}
_ => {
return Err(TensorError::WrongMethod);
}
};
Ok(res)
}
pub fn dims(&self) -> &[usize] {
match self {
ValTensor::Value { dims: d, .. } => d,
ValTensor::Instance { dims: d, idx, .. } => &d[*idx],
}
}
pub fn show(&self) -> String {
match self.clone() {
ValTensor::Value {
inner: v, dims: _, ..
} => {
let r: Tensor<i32> = v.map(|x| x.into());
if r.len() > 10 {
let start = r[..5].to_vec();
let end = r[r.len() - 5..].to_vec();
format!(
"[{} ... {}]",
start.iter().map(|x| format!("{}", x)).join(", "),
end.iter().map(|x| format!("{}", x)).join(", ")
)
} else {
format!("{:?}", r)
}
}
_ => "ValTensor not PrevAssigned".into(),
}
}
}
impl<F: PrimeField + TensorType + PartialOrd> ValTensor<F> {
pub fn inverse(&self) -> Result<ValTensor<F>, Box<dyn Error>> {
let mut cloned_self = self.clone();
match &mut cloned_self {
ValTensor::Value {
inner: v, dims: d, ..
} => {
*v = v.map(|x| match x {
ValType::AssignedValue(v) => ValType::AssignedValue(v.invert()),
ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => {
ValType::AssignedValue(v.value_field().invert())
}
ValType::Value(v) => ValType::Value(v.map(|x| x.invert().unwrap_or(F::ZERO))),
ValType::Constant(v) => ValType::Constant(v |
.invert().unwrap_or(F::ZERO)),
});
*d = v.dims().to_vec();
}
ValTensor::Instance { .. } => {
return Err(Box::new(TensorError::WrongMethod));
}
};
Ok(cloned_self)
}
} |
use std::collections::HashSet;
use log::{debug, error, warn};
use crate::circuit::{region::ConstantsMap, CheckMode};
use super::*;
pub enum VarTensor {
Advice {
inner: Vec<Vec<Column<Advice>>>,
num_inner_cols: usize,
col_size: usize,
},
Dummy {
num_inner_cols: usize,
col_size: usize,
},
Empty,
}
impl VarTensor {
pub fn is_advice(&self) -> bool {
matches!(self, VarTensor::Advice { .. })
}
pub fn max_rows<F: PrimeField>(cs: &ConstraintSystem<F>, logrows: usize) -> usize {
let base = 2u32;
base.pow(logrows as u32) as usize - cs.blinding_factors() - 1
}
pub fn new_unblinded_advice<F: PrimeField>(
cs: &mut ConstraintSystem<F>,
logrows: usize,
num_inner_cols: usize,
capacity: usize,
) -> Self {
let max_rows = Self::max_rows(cs, logrows) * num_inner_cols;
let mut modulo = (capacity / max_rows) + 1;
modulo = ((capacity + modulo) / max_rows) + 1;
let mut advices = vec![];
if modulo > 1 {
warn!(
"using column duplication for {} unblinded advice blocks",
modulo - 1
);
}
for _ in 0..modulo {
let mut inner = vec![];
for _ in 0..num_inner_cols {
let col = cs.unblinded_advice_column();
cs.enable_equality(col);
inner.push(col);
}
advices.push(inner);
}
VarTensor::Advice {
inner: advices,
num_inner_cols,
col_size: max_rows,
}
}
pub fn new_advice<F: PrimeField>(
cs: &mut ConstraintSystem<F>,
logrows: usize,
num_inner_cols: usize,
capacity: usize,
) -> Self {
let max_rows = Self::max_rows(cs, logrows);
let max_assignments = Sel |
f::max_rows(cs, logrows) * num_inner_cols;
let mut modulo = (capacity / max_assignments) + 1;
modulo = ((capacity + modulo) / max_assignments) + 1;
let mut advices = vec![];
if modulo > 1 {
debug!("using column duplication for {} advice blocks", modulo - 1);
}
for _ in 0..modulo {
let mut inner = vec![];
for _ in 0..num_inner_cols {
let col = cs.advice_column();
cs.enable_equality(col);
inner.push(col);
}
advices.push(inner);
}
VarTensor::Advice {
inner: advices,
num_inner_cols,
col_size: max_rows,
}
}
pub fn constant_cols<F: PrimeField>(
cs: &mut ConstraintSystem<F>,
logrows: usize,
num_constants: usize,
module_requires_fixed: bool,
) -> usize {
if num_constants == 0 && !module_requires_fixed {
return 0;
} else if num_constants == 0 && module_requires_fixed {
let col = cs.fixed_column();
cs.enable_constant(col);
return 1;
}
let max_rows = Self::max_rows(cs, logrows);
let mut modulo = num_constants / max_rows + 1;
modulo = (num_constants + modulo) / max_rows + 1;
if modulo > 1 {
debug!("using column duplication for {} fixed columns", modulo - 1);
}
for _ in 0..modulo {
let col = cs.fixed_column();
cs.enable_constant(col);
}
modulo
}
pub fn dummy(logrows: usize, num_inner_cols: usize) -> Self {
let base = 2u32;
let max_rows = base.pow(logrows as u32) as usize - 6;
VarTensor::Dummy {
col_size: max_rows,
num_inner_cols,
}
}
pub fn num_blocks(&self) -> usize {
match self {
VarTensor::Advice { inner, .. } => inner.len(),
_ => 0, |
}
}
pub fn num_inner_cols(&self) -> usize {
match self {
VarTensor::Advice { num_inner_cols, .. } | VarTensor::Dummy { num_inner_cols, .. } => {
*num_inner_cols
}
_ => 0,
}
}
pub fn num_cols(&self) -> usize {
match self {
VarTensor::Advice { inner, .. } => inner[0].len() * inner.len(),
_ => 0,
}
}
pub fn col_size(&self) -> usize {
match self {
VarTensor::Advice { col_size, .. } | VarTensor::Dummy { col_size, .. } => *col_size,
_ => 0,
}
}
pub fn block_size(&self) -> usize {
match self {
VarTensor::Advice {
num_inner_cols,
col_size,
..
}
| VarTensor::Dummy {
col_size,
num_inner_cols,
..
} => *col_size * num_inner_cols,
_ => 0,
}
}
pub fn cartesian_coord(&self, linear_coord: usize) -> (usize, usize, usize) {
let x = linear_coord / self.block_size();
let y = linear_coord % self.num_inner_cols();
let z = (linear_coord - x * self.block_size()) / self.num_inner_cols();
(x, y, z)
}
}
impl VarTensor {
pub fn query_rng<F: PrimeField>(
&self,
meta: &mut VirtualCells<'_, F>,
x: usize,
y: usize,
z: i32,
rng: usize,
) -> Result<Tensor<Expression<F>>, halo2_proofs::plonk::Error> {
match &self {
VarTensor::Advice { inner: advices, .. } => {
let c = Tensor::from(
(0..rng).map(|i| meta.query_advice(advices[x][y], Rotation(z + i as i32))),
);
Ok(c)
}
_ => {
error!("VarTensor was not initialized");
Err(halo2_proofs::plonk::Error::Synthesis) |
}
}
}
pub fn query_whole_block<F: PrimeField>(
&self,
meta: &mut VirtualCells<'_, F>,
x: usize,
z: i32,
rng: usize,
) -> Result<Tensor<Expression<F>>, halo2_proofs::plonk::Error> {
match &self {
VarTensor::Advice { inner: advices, .. } => {
let c = Tensor::from({
let cartesian = (0..rng).cartesian_product(0..self.num_inner_cols());
cartesian.map(|(i, y)| meta.query_advice(advices[x][y], Rotation(z + i as i32)))
});
Ok(c)
}
_ => {
error!("VarTensor was not initialized");
Err(halo2_proofs::plonk::Error::Synthesis)
}
}
}
pub fn assign_constant<F: PrimeField + TensorType + PartialOrd>(
&self,
region: &mut Region<F>,
offset: usize,
coord: usize,
constant: F,
) -> Result<AssignedCell<F, F>, halo2_proofs::plonk::Error> {
let (x, y, z) = self.cartesian_coord(offset + coord);
match &self {
VarTensor::Advice { inner: advices, .. } => {
region.assign_advice_from_constant(|| "constant", advices[x][y], z, constant)
}
_ => {
error!("VarTensor was not initialized");
Err(halo2_proofs::plonk::Error::Synthesis)
}
}
}
pub fn assign_with_omissions<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
omissions: &HashSet<&usize>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut assigned_coord = 0;
let mut res: ValTensor<F> = match values {
ValTensor::Instance { .. } => {
unimplemented!("cannot assign instance to advice columns with omissions") |
}
ValTensor::Value { inner: v, .. } => Ok::<ValTensor<F>, halo2_proofs::plonk::Error>(
v.enum_map(|coord, k| {
if omissions.contains(&coord) {
return Ok::<_, halo2_proofs::plonk::Error>(k);
}
let cell =
self.assign_value(region, offset, k.clone(), assigned_coord, constants)?;
assigned_coord += 1;
Ok::<_, halo2_proofs::plonk::Error>(cell)
})?
.into(),
),
}?;
res.set_scale(values.scale());
Ok(res)
}
pub fn assign<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
values: &ValTensor<F>,
constants: &mut ConstantsMap<F>,
) -> Result<ValTensor<F>, halo2_proofs::plonk::Error> {
let mut res: ValTensor<F> = match values {
ValTensor::Instance {
inner: instance,
dims,
idx,
initial_offset,
..
} => match &self {
VarTensor::Advice { inner: v, .. } => {
let total_offset: usize = initial_offset
+ dims[..*idx]
.iter()
.map(|x| x.iter().product::<usize>())
.sum::<usize>();
let dims = &dims[*idx];
let t: Tensor<i32> = Tensor::new(None, dims).unwrap();
Ok(t.enum_map(|coord, _| {
let (x, y, z) = self.cartesian_coord(offset + coord);
region.assign_advice_from_instance(
|| "pub input anchor",
*instance,
coord + total_offset,
v[x][y],
z, |
)
})?
.into())
}
_ => {
error!("Instance is only supported for advice columns");
Err(halo2_proofs::plonk::Error::Synthesis)
}
},
ValTensor::Value { inner: v, .. } => Ok(v
.enum_map(|coord, k| {
self.assign_value(region, offset, k.clone(), coord, constants)
})?
.into()),
}?;
res.set_scale(values.scale());
Ok(res)
}
pub fn dummy_assign_with_duplication<
F: PrimeField + TensorType + PartialOrd + std::hash::Hash,
>(
&self,
row: usize,
offset: usize,
values: &ValTensor<F>,
single_inner_col: bool,
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
match values {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you |
require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
let duplication_freq = if single_inner_col {
self.col_size()
} else {
self.block_size()
};
let num_repeats = if single_inner_col {
1
} else {
self.num_inner_cols()
};
let duplication_offset = if single_inner_col {
row
} else {
offset
};
let mut res: ValTensor<F> = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap().into();
let constants_map = res.create_constants_map();
constants.extend(constants_map);
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
Ok((res, total_used_len))
}
}
}
pub fn assign_with_duplication<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
row: usize,
offset: usize,
values: &ValTensor<F>,
check_mode: &CheckMode,
single_inner_col: bool,
constants: &mut ConstantsMap<F>,
) -> Result<(ValTensor<F>, usize), halo2_proofs::plonk::Error> {
let mut prev_cell = None;
match values {
ValTensor::Instance { .. } => unimplemented!("duplication is not supported on instance columns. increase K if you |
require more rows."),
ValTensor::Value { inner: v, dims , ..} => {
let duplication_freq = if single_inner_col {
self.col_size()
} else {
self.block_size()
};
let num_repeats = if single_inner_col {
1
} else {
self.num_inner_cols()
};
let duplication_offset = if single_inner_col {
row
} else {
offset
};
let v = v.duplicate_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
let mut res: ValTensor<F> = {
v.enum_map(|coord, k| {
let step = if !single_inner_col {
1
} else {
self.num_inner_cols()
};
let (x, y, z) = self.cartesian_coord(offset + coord * step);
if matches!(check_mode, CheckMode::SAFE) && coord > 0 && z == 0 && y == 0 {
assert_eq!(Into::<i32>::into(k.clone()), Into::<i32>::into(v[coord - 1].clone()));
};
let cell = self.assign_value(region, offset, k.clone(), coord * step, constants)?;
if single_inner_col {
if z == 0 {
prev_cell = Some(cell.clone());
} else if coord > 0 && z == 0 && single_inner_col {
if let Some(prev_cell) = prev_cell.as_ref() {
let cell = cell.cell().ok_or({
error!("Error getting cell: {:?}", (x,y));
halo2_proofs::plonk::Error::Synthesis})?;
let prev_cell = prev_cell.cell().ok_or({
error!("Error getting cell: { |
:?}", (x,y));
halo2_proofs::plonk::Error::Synthesis})?;
region.constrain_equal(prev_cell,cell)?;
} else {
error!("Error copy-constraining previous value: {:?}", (x,y));
return Err(halo2_proofs::plonk::Error::Synthesis);
}
}}
Ok(cell)
})?.into()};
let total_used_len = res.len();
res.remove_every_n(duplication_freq, num_repeats, duplication_offset).unwrap();
res.reshape(dims).unwrap();
res.set_scale(values.scale());
if matches!(check_mode, CheckMode::SAFE) {
let is_assigned = !Into::<Tensor<i32>>::into(res.clone().get_inner().unwrap())
.iter()
.all(|&x| x == 0);
if is_assigned {
assert_eq!(
Into::<Tensor<i32>>::into(values.get_inner().unwrap()),
Into::<Tensor<i32>>::into(res.get_inner().unwrap())
)};
}
Ok((res, total_used_len))
}
}
}
fn assign_value<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>(
&self,
region: &mut Region<F>,
offset: usize,
k: ValType<F>,
coord: usize,
constants: &mut ConstantsMap<F>,
) -> Result<ValType<F>, halo2_proofs::plonk::Error> {
let (x, y, z) = self.cartesian_coord(offset + coord);
let res = match k {
ValType::Value(v) => match &self {
VarTensor::Advice { inner: advices, .. } => {
ValType::PrevAssigned(region.assign_advice(|| "k", advices[x][y], z, || v)?)
}
_ => unimplemented!(),
},
ValType::PrevAssigned(v) => match &self { |
VarTensor::Advice { inner: advices, .. } => {
ValType::PrevAssigned(v.copy_advice(|| "k", region, advices[x][y], z)?)
}
_ => unimplemented!(),
},
ValType::AssignedConstant(v, val) => match &self {
VarTensor::Advice { inner: advices, .. } => {
ValType::AssignedConstant(v.copy_advice(|| "k", region, advices[x][y], z)?, val)
}
_ => unimplemented!(),
},
ValType::AssignedValue(v) => match &self {
VarTensor::Advice { inner: advices, .. } => ValType::PrevAssigned(
region
.assign_advice(|| "k", advices[x][y], z, || v)?
.evaluate(),
),
_ => unimplemented!(),
},
ValType::Constant(v) => {
if let std::collections::hash_map::Entry::Vacant(e) = constants.entry(v) {
let value = ValType::AssignedConstant(
self.assign_constant(region, offset, coord, v)?,
v,
);
e.insert(value.clone());
value
} else {
let cell = constants.get(&v).unwrap();
self.assign_value(region, offset, cell.clone(), coord, constants)?
}
}
};
Ok(res)
}
} |
use crate::circuit::modules::polycommit::PolyCommitChip;
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use crate::circuit::modules::poseidon::PoseidonChip;
use crate::circuit::modules::Module;
use crate::fieldutils::felt_to_i128;
use crate::fieldutils::i128_to_felt;
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::quantize_float;
use crate::graph::scale_to_multiplier;
use crate::graph::{GraphCircuit, GraphSettings};
use crate::pfsys::create_proof_circuit;
use crate::pfsys::evm::aggregation_kzg::AggregationCircuit;
use crate::pfsys::evm::aggregation_kzg::PoseidonTranscript;
use crate::pfsys::verify_proof_circuit;
use crate::pfsys::TranscriptType;
use crate::tensor::TensorType;
use crate::CheckMode;
use crate::Commitments;
use console_error_panic_hook;
use halo2_proofs::plonk::*;
use halo2_proofs::poly::commitment::{CommitmentScheme, ParamsProver};
use halo2_proofs::poly::ipa::multiopen::{ProverIPA, VerifierIPA};
use halo2_proofs::poly::ipa::{
commitment::{IPACommitmentScheme, ParamsIPA},
strategy::SingleStrategy as IPASingleStrategy,
};
use halo2_proofs::poly::kzg::multiopen::ProverSHPLONK;
use halo2_proofs::poly::kzg::multiopen::VerifierSHPLONK;
use halo2_proofs::poly::kzg::{
commitment::{KZGCommitmentScheme, ParamsKZG},
strategy::SingleStrategy as KZGSingleStrategy,
};
use halo2_proofs::poly::VerificationStrategy;
use halo2_solidity_verifier::encode_calldata;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
use halo2curves::ff::{FromUniformBytes, PrimeField};
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use std::str::FromStr;
use wasm_bindgen::prelude::*;
use wasm_bindgen_console_logger::DEFAULT_LOGGER;
pub use wasm_bindgen_rayon::init_thread_pool;
pub |
fn init_logger() {
log::set_logger(&DEFAULT_LOGGER).unwrap();
}
pub |
fn init_panic_hook() {
console_error_panic_hook::set_once();
}
pub fn encodeVerifierCalldata(
proof: wasm_bindgen::Clamped<Vec<u8>>,
vk_address: Option<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
let snark: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let vk_address: Option<[u8; 20]> = if let Some(vk_address) = vk_address {
let array: [u8; 20] = serde_json::from_slice(&vk_address[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize vk address: {}", e)))?;
Some(array)
} else {
None
};
let flattened_instances = snark.instances.into_iter().flatten();
let encoded = encode_calldata(
vk_address,
&snark.proof,
&flattened_instances.collect::<Vec<_>>(),
);
Ok(encoded)
}
pub fn feltToBigEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(format!("{:?}", felt))
}
pub fn feltToLittleEndian(array: wasm_bindgen::Clamped<Vec<u8>>) -> Result<String, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let repr = serde_json::to_string(&felt).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
Ok(b)
}
pub fn feltToInt(
array: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&felt_to_i128(felt))
.map_err(|e| JsError::new(&format!("Failed to serialize integer: {}", e)))?,
))
}
pub fn feltToFloat(
array: wasm_bindgen::Clamped<Vec< |
u8>>,
scale: crate::Scale,
) -> Result<f64, JsError> {
let felt: Fr = serde_json::from_slice(&array[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize field element: {}", e)))?;
let int_rep = felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
Ok(int_rep as f64 / multiplier)
}
pub fn floatToFelt(
input: f64,
scale: crate::Scale,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let int_rep =
quantize_float(&input, 0.0, scale).map_err(|e| JsError::new(&format!("{}", e)))?;
let felt = i128_to_felt(int_rep);
let vec = crate::pfsys::field_to_string::<halo2curves::bn256::Fr>(&felt);
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&vec).map_err(
|e| JsError::new(&format!("Failed to serialize a float to felt{}", e)),
)?))
}
pub fn kzgCommit(
message: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let mut reader = std::io::BufReader::new(¶ms_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings,
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let output = PolyCommitChip::commit::<KZGCommitmentScheme<Bn256>>( |
message,
(vk.cs().blinding_factors() + 1) as u32,
¶ms,
);
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&output).map_err(|e| JsError::new(&format!("{}", e)))?,
))
}
pub fn bufferToVecOfFelt(
buffer: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let buffer: &[u8] = &buffer;
let chunks = buffer.chunks_exact(16);
let remainder = chunks.remainder();
let mut remainder = remainder.to_vec();
let chunks: Result<Vec<[u8; 16]>, JsError> = chunks
.map(|slice| {
let array: [u8; 16] = slice
.try_into()
.map_err(|_| JsError::new("failed to slice input chunks"))?;
Ok(array)
})
.collect();
let mut chunks = chunks?;
if remainder.len() != 0 {
remainder.resize(16, 0);
let remainder_array: [u8; 16] = remainder
.try_into()
.map_err(|_| JsError::new("failed to slice remainder"))?;
chunks.push(remainder_array);
}
let field_elements: Vec<Fr> = chunks
.iter()
.map(|x| PrimeField::from_u128(u8_array_to_u128_le(*x)))
.collect();
Ok(wasm_bindgen::Clamped(
serde_json::to_vec(&field_elements)
.map_err(|e| JsError::new(&format!("Failed to serialize field elements: {}", e)))?,
))
}
pub fn poseidonHash(
message: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<wasm_bindgen::Clamped<Vec<u8>>, JsError> {
let message: Vec<Fr> = serde_json::from_slice(&message[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize message: {}", e)))?;
let output =
PoseidonChip::<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>::run(
message.clone(),
)
.map_err(|e| JsError::new(&format!("{}", e)))?;
Ok(wasm_bindgen::Clamped(serde_json::to_vec(&output).map_err(
|e| JsError::new(&format!("Failed to serialize poseidon has |
h output: {}", e)),
)?))
}
pub fn genWitness(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
input: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
let input: crate::graph::input::GraphData = serde_json::from_slice(&input[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize input: {}", e)))?;
let mut input = circuit
.load_graph_input(&input)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let witness = circuit
.forward::<KZGCommitmentScheme<Bn256>>(&mut input, None, None, false)
.map_err(|e| JsError::new(&format!("{}", e)))?;
serde_json::to_vec(&witness)
.map_err(|e| JsError::new(&format!("Failed to serialize witness: {}", e)))
}
pub fn genVk(
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
compress_selectors: bool,
) -> Result<Vec<u8>, JsError> {
let mut reader = std::io::BufReader::new(¶ms_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
let vk = create_vk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(
&circuit,
¶ms,
compress_selectors,
)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create verifying key: {}", e)))?;
let mut serialized_vk = Vec::new();
vk.write(&mut serialized_vk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| JsError::new(&format!("Failed to serialize vk: |
{}", e)))?;
Ok(serialized_vk)
}
pub fn genPk(
vk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
params_ser: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
let mut reader = std::io::BufReader::new(¶ms_ser[..]);
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize compiled model: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize verifying key: {}", e)))?;
let pk = create_pk_wasm::<KZGCommitmentScheme<Bn256>, Fr, GraphCircuit>(vk, &circuit, ¶ms)
.map_err(Box::<dyn std::error::Error>::from)
.map_err(|e| JsError::new(&format!("Failed to create proving key: {}", e)))?;
let mut serialized_pk = Vec::new();
pk.write(&mut serialized_pk, halo2_proofs::SerdeFormat::RawBytes)
.map_err(|e| JsError::new(&format!("Failed to serialize pk: {}", e)))?;
Ok(serialized_pk)
}
pub fn verify(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
settings: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<bool, JsError> {
let circuit_settings: GraphSettings = serde_json::from_slice(&settings[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize settings: {}", e)))?;
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof_js[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let mu |
t reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit_settings.clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let orig_n = 1 << circuit_settings.run_args.logrows;
let commitment = circuit_settings.run_args.commitment.into();
let mut reader = std::io::BufReader::new(&srs[..]);
let result = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = KZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, ¶ms, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, ¶ms, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type { |
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, ¶ms, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, ¶ms, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true),
Err(e) => Err(JsError::new(&format!("{}", e))),
}
}
pub fn verifyAggr(
proof_js: wasm_bindgen::Clamped<Vec<u8>>,
vk: wasm_bindgen::Clamped<Vec<u8>>,
logrows: u64,
srs: wasm_bindgen::Clamped<Vec<u8>>,
commitment: &str,
) -> Result<bool, JsError> {
let proof: crate::pfsys::Snark<Fr, G1Affine> = serde_json::from_slice(&proof_js[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize proof: {}", e)))?;
let mut reader = std::io::BufReader::new(&vk[..]);
let vk = VerifyingKey::<G1Affine>::read::<_, AggregationCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize vk: {}", e)))?;
let commit = Commitments::from_str(commitment).map_err(|e| JsError::new(&format!("{}", e)))?;
let orig_n = 1 << logrows;
let mut reader = std::io::BufReader::new(&srs[..]);
let result = match commit {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = K |
ZGSingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, ¶ms, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierSHPLONK<'_, Bn256>,
KZGCommitmentScheme<Bn256>,
KZGSingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, ¶ms, &vk, strategy, orig_n)
}
}
}
Commitments::IPA => {
let params: ParamsIPA<_> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize params: {}", e)))?;
let strategy = IPASingleStrategy::new(params.verifier_params());
match proof.transcript_type {
TranscriptType::EVM => verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(&proof, ¶ms, &vk, strategy, orig_n),
TranscriptType::Poseidon => {
verify_proof_circuit::<
VerifierIPA<_>,
IPACommitmentScheme<G1Affine>,
IPASingleStrategy<_>,
_,
PoseidonTranscript<NativeLoader, _>,
>(&proof, ¶ms, &vk, strategy, orig_n)
}
}
}
};
match result {
Ok(_) => Ok(true), |
Err(e) => Err(JsError::new(&format!("{}", e))),
}
}
pub fn prove(
witness: wasm_bindgen::Clamped<Vec<u8>>,
pk: wasm_bindgen::Clamped<Vec<u8>>,
compiled_circuit: wasm_bindgen::Clamped<Vec<u8>>,
srs: wasm_bindgen::Clamped<Vec<u8>>,
) -> Result<Vec<u8>, JsError> {
log::set_max_level(log::LevelFilter::Debug);
log::set_max_level(log::LevelFilter::Info);
let mut circuit: crate::graph::GraphCircuit = bincode::deserialize(&compiled_circuit[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize circuit: {}", e)))?;
let data: crate::graph::GraphWitness = serde_json::from_slice(&witness[..])
.map_err(|e| JsError::new(&format!("Failed to deserialize witness: {}", e)))?;
let mut reader = std::io::BufReader::new(&pk[..]);
let pk = ProvingKey::<G1Affine>::read::<_, GraphCircuit>(
&mut reader,
halo2_proofs::SerdeFormat::RawBytes,
circuit.settings().clone(),
)
.map_err(|e| JsError::new(&format!("Failed to deserialize proving key: {}", e)))?;
circuit
.load_graph_witness(&data)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let public_inputs = circuit
.prepare_public_inputs(&data)
.map_err(|e| JsError::new(&format!("{}", e)))?;
let proof_split_commits: Option<crate::pfsys::ProofSplitCommit> = data.into();
let mut reader = std::io::BufReader::new(&srs[..]);
let commitment = circuit.settings().run_args.commitment.into();
let proof = match commitment {
Commitments::KZG => {
let params: ParamsKZG<Bn256> =
halo2_proofs::poly::commitment::Params::<'_, G1Affine>::read(&mut reader)
.map_err(|e| JsError::new(&format!("Failed to deserialize srs: {}", e)))?;
create_proof_circuit::<
KZGCommitmentScheme<Bn256>,
_,
ProverSHPLONK<_>,
VerifierSHPLONK<_>,
KZGSingleStrategy<_>,
_, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.