text
stringlengths 1
2.05k
|
---|
})?
};
if let Some(mut vt) = res {
vt.reshape(&node.out_dims()[0])?;
results.insert(*idx, vec![vt.clone()]);
debug!("------------ output node {:?}: {:?}", idx, vt.show());
}
}
NodeType::SubGraph {
model,
inputs,
output_mappings,
input_mappings,
..
} => {
let original_values = values.clone();
let input_mappings = input_mappings.clone();
let input_dims = values.iter().map(|inp| inp.dims());
let num_iter = number_of_iterations(&input_mappings, input_dims.collect());
debug!(
"{} iteration(s) in a subgraph with inputs {:?}, sources {:?}, and outputs {:?}",
num_iter, inputs, model.graph.inputs, model.graph.outputs
);
let mut full_results: Vec<ValTensor<Fp>> = vec![];
for i in 0..num_iter {
debug!(" -------------- subgraph iteration: {}", i);
for ((mapping, inp), og_inp) in
input_mappings.iter().zip(&mut values).zip(&original_values)
{
if let InputMapping::Stacked { axis, chunk } = mapping {
let start = i * chunk;
let end = (i + 1) * chunk;
let mut sliced_input = og_inp.clone();
sliced_input.slice(axis, &start, &end)?;
*inp = sliced_input;
}
}
let mut subgraph_results = BTreeMap::from_it |
er(
model
.graph
.inputs
.clone()
.into_iter()
.zip(values.clone().into_iter().map(|v| vec![v])),
);
let res = model.layout_nodes(config, region, &mut subgraph_results)?;
let mut outlets = BTreeMap::new();
let mut stacked_outlets = BTreeMap::new();
for (mappings, outlet_res) in output_mappings.iter().zip(res) {
for mapping in mappings {
match mapping {
OutputMapping::Single { outlet, .. } => {
outlets.insert(outlet, outlet_res.clone());
}
OutputMapping::Stacked { outlet, axis, .. } => {
if !full_results.is_empty() {
let stacked_res = full_results[*outlet]
.clone()
.concat_axis(outlet_res.clone(), axis)?;
stacked_outlets.insert(outlet, stacked_res);
}
outlets.insert(outlet, outlet_res.clone());
}
}
}
}
let mut pre_stacked_outlets = outlets.clone();
pre_stacked_outlets.extend(stacked_outlets);
let outlets = outlets.into_values().collect_vec();
full_results = pre_stacked_outlets.into_values().collect_vec();
let output_states = output_state_idx(o |
utput_mappings);
let input_states = input_state_idx(&input_mappings);
assert_eq!(
input_states.len(),
output_states.len(),
"input and output states must be the same length, got {:?} and {:?}",
input_mappings,
output_mappings
);
for (input_idx, output_idx) in input_states.iter().zip(output_states) {
assert_eq!(
values[*input_idx].dims(),
outlets[output_idx].dims(),
"input and output dims must be the same, got {:?} and {:?}",
values[*input_idx].dims(),
outlets[output_idx].dims()
);
values[*input_idx] = outlets[output_idx].clone();
}
}
trace!(
"------------ output subgraph node {:?}: {:?}",
idx,
full_results.iter().map(|x| x.show()).collect_vec()
);
results.insert(*idx, full_results);
}
}
}
results.extend(orig_inputs);
let output_nodes = self.graph.outputs.iter();
debug!(
"model outputs are nodes: {:?}",
output_nodes.clone().collect_vec()
);
let outputs = output_nodes
.map(|(idx, outlet)| {
Ok(results.get(idx).ok_or(GraphError::MissingResults)?[*outlet].clone())
})
.collect::<Result<Vec<_>, GraphError>>()?;
Ok(outputs)
}
pub fn dummy_layout(
&self,
run_args: &RunArgs,
inputs: &[ValTensor<Fp>],
witness_gen: bool,
) -> Res |
ult<DummyPassRes, Box<dyn Error>> {
debug!("calculating num of constraints using dummy model layout...");
let start_time = instant::Instant::now();
let mut results = BTreeMap::<usize, Vec<ValTensor<Fp>>>::new();
for (i, input_idx) in self.graph.inputs.iter().enumerate() {
results.insert(*input_idx, vec![inputs[i].clone()]);
}
let mut dummy_config =
PolyConfig::dummy(run_args.logrows as usize, run_args.num_inner_cols);
let mut model_config = ModelConfig {
base: dummy_config.clone(),
vars: ModelVars::new_dummy(),
};
let mut region = RegionCtx::new_dummy(0, run_args.num_inner_cols, witness_gen);
let outputs = self.layout_nodes(&mut model_config, &mut region, &mut results)?;
if self.visibility.output.is_public() || self.visibility.output.is_fixed() {
let output_scales = self.graph.get_output_scales()?;
let res = outputs
.iter()
.enumerate()
.map(|(i, output)| {
let mut comparator: ValTensor<Fp> = (0..output.len())
.map(|_| {
if !self.visibility.output.is_fixed() {
ValType::Value(Value::<Fp>::unknown())
} else {
ValType::Constant(Fp::random(&mut rand::thread_rng()))
}
})
.collect::<Vec<_>>()
.into();
comparator.reshape(output.dims())?;
let mut tolerance = run_args.tolerance;
tolerance.scale = scale_to_multiplier(output_scales[i]).into();
dummy_config.layout(
&mut region,
&[output.clone(), comparator],
Box::new(HybridOp::RangeCheck(tolerance)),
)
}) |
.collect::<Result<Vec<_>, _>>();
res?;
} else if !self.visibility.output.is_private() {
for output in &outputs {
region.update_constants(output.create_constants_map());
}
}
let duration = start_time.elapsed();
trace!("dummy model layout took: {:?}", duration);
region.debug_report();
let outputs = outputs
.iter()
.map(|x| {
x.get_felt_evals()
.unwrap_or(Tensor::new(Some(&[Fp::ZERO]), &[1]).unwrap())
})
.collect();
let res = DummyPassRes {
num_rows: region.row(),
linear_coord: region.linear_coord(),
total_const_size: region.total_constants(),
lookup_ops: region.used_lookups(),
range_checks: region.used_range_checks(),
max_lookup_inputs: region.max_lookup_inputs(),
min_lookup_inputs: region.min_lookup_inputs(),
max_range_size: region.max_range_size(),
num_dynamic_lookups: region.dynamic_lookup_index(),
dynamic_lookup_col_coord: region.dynamic_lookup_col_coord(),
num_shuffles: region.shuffle_index(),
shuffle_col_coord: region.shuffle_col_coord(),
outputs,
};
Ok(res)
}
pub fn get_all_params(&self) -> Vec<Tensor<Fp>> {
let mut params = vec![];
for node in self.graph.nodes.values() {
match node {
NodeType::Node(_) => {
if let Some(constant) = extract_const_quantized_values(node.opkind()) {
params.push(constant);
}
}
NodeType::SubGraph { model, .. } => {
params.extend(model.get_all_params());
}
}
}
params
}
pub fn const_shapes(&self) -> Vec<Vec<usize>> {
let mut const_shapes = vec![];
for node i |
n self.graph.nodes.values() {
match node {
NodeType::Node(_) => {
if let Some(constant) = extract_const_quantized_values(node.opkind()) {
const_shapes.push(constant.dims().to_vec());
};
}
NodeType::SubGraph { model, .. } => {
const_shapes.extend(model.const_shapes());
}
}
}
const_shapes
}
pub fn replace_consts(&mut self, consts: &[ValTensor<Fp>]) -> usize {
let mut const_idx = 0;
for node in self.graph.nodes.values_mut() {
match node {
NodeType::Node(n) => {
if let SupportedOp::Constant(c) = &n.opkind {
let mut op = crate::circuit::Constant::new(
c.quantized_values.clone(),
c.raw_values.clone(),
);
op.pre_assign(consts[const_idx].clone());
n.opkind = SupportedOp::Constant(op);
const_idx += 1;
}
}
NodeType::SubGraph { model, .. } => {
let total_consts = model.replace_consts(&consts[const_idx..]);
const_idx += total_consts;
}
}
}
const_idx
}
pub fn instance_shapes(&self) -> Result<Vec<Vec<usize>>, Box<dyn Error>> {
let mut instance_shapes = vec![];
if self.visibility.input.is_public() {
instance_shapes.extend(self.graph.input_shapes()?);
}
if self.visibility.output.is_public() {
instance_shapes.extend(self.graph.output_shapes()?);
}
Ok(instance_shapes)
}
} |
use crate::circuit::modules::polycommit::{PolyCommitChip, PolyCommitConfig};
use crate::circuit::modules::poseidon::spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH};
use crate::circuit::modules::poseidon::{PoseidonChip, PoseidonConfig};
use crate::circuit::modules::Module;
use crate::circuit::region::ConstantsMap;
use crate::tensor::{Tensor, ValTensor};
use halo2_proofs::circuit::Layouter;
use halo2_proofs::plonk::{Column, ConstraintSystem, Error, Instance, VerifyingKey};
use halo2_proofs::poly::commitment::CommitmentScheme;
use halo2curves::bn256::{Fr as Fp, G1Affine};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use super::{VarVisibility, Visibility};
pub const POSEIDON_LEN_GRAPH: usize = 32;
pub const POSEIDON_INSTANCES: usize = 1;
pub type ModulePoseidon =
PoseidonChip<PoseidonSpec, POSEIDON_WIDTH, POSEIDON_RATE, POSEIDON_LEN_GRAPH>;
pub type ModulePoseidonConfig = PoseidonConfig<POSEIDON_WIDTH, POSEIDON_RATE>;
pub |
struct ModuleConfigs {
polycommit: Vec<PolyCommitConfig>,
poseidon: Option<ModulePoseidonConfig>,
pub instance: Option<Column<Instance>>,
}
impl ModuleConfigs {
pub fn from_visibility(
cs: &mut ConstraintSystem<Fp>,
module_size: ModuleSizes,
logrows: usize,
) -> Self {
let mut config = Self::default();
for size in module_size.polycommit {
config
.polycommit
.push(PolyCommitChip::configure(cs, (logrows, size)));
}
config
}
pub |
fn configure_complex_modules(
&mut self,
cs: &mut ConstraintSystem<Fp>,
visibility: VarVisibility,
module_size: ModuleSizes,
) {
if (visibility.input.is_hashed()
|| visibility.output.is_hashed()
|| visibility.params.is_hashed())
&& module_size.poseidon.1[0] > 0
{
if visibility.input.is_hashed_public()
|| visibility.output.is_hashed_public()
|| visibility.params.is_hashed_public()
{
if let Some(inst) = self.instance {
self.poseidon = Some(ModulePoseidon::configure_with_optional_instance(
cs,
Some(inst),
));
} else {
let poseidon = ModulePoseidon::configure(cs, ());
self.instance = poseidon.instance;
self.poseidon = Some(poseidon);
}
} else if visibility.input.is_hashed_private()
|| visibility.output.is_hashed_private()
|| visibility.params.is_hashed_private()
{
self.poseidon = Some(ModulePoseidon::configure_with_optional_instance(cs, None));
}
};
}
}
pub |
struct ModuleForwardResult {
pub poseidon_hash: Option<Vec<Fp>>,
pub polycommit: Option<Vec<Vec<G1Affine>>>,
}
impl ModuleForwardResult {
pub fn get_result(&self, vis: Visibility) -> Vec<Vec<Fp>> {
if vis.is_hashed() {
self.poseidon_hash
.clone()
.unwrap()
.into_iter()
.map(|x| vec![x])
.collect()
} else {
vec![]
}
}
pub fn get_instances(&self) -> Vec<Vec<Fp>> {
if let Some(poseidon) = &self.poseidon_hash {
poseidon.iter().map(|x| vec![*x]).collect()
} else {
vec![]
}
}
}
pub |
struct ModuleSizes {
polycommit: Vec<usize>,
poseidon: (usize, Vec<usize>),
}
impl ModuleSizes {
pub fn new() -> Self {
ModuleSizes {
polycommit: vec![],
poseidon: (
0,
vec![0; crate::circuit::modules::poseidon::NUM_INSTANCE_COLUMNS],
),
}
}
pub fn max_constraints(&self) -> usize {
self.poseidon.0
}
pub fn num_instances(&self) -> Vec<usize> {
self.poseidon.1.clone()
}
}
pub |
struct GraphModules {
polycommit_idx: usize,
}
impl GraphModules {
pub fn new() -> GraphModules {
GraphModules { polycommit_idx: 0 }
}
pub |
fn reset_index(&mut self) {
self.polycommit_idx = 0;
}
}
impl GraphModules { |
fn num_constraint_given_shapes(
visibility: Visibility,
shapes: Vec<Vec<usize>>,
sizes: &mut ModuleSizes,
) {
for shape in shapes {
let total_len = shape.iter().product::<usize>();
if total_len > 0 {
if visibility.is_polycommit() {
sizes.polycommit.push(total_len);
} else if visibility.is_hashed() {
sizes.poseidon.0 += ModulePoseidon::num_rows(total_len);
sizes.poseidon.1[0] += 1;
}
}
}
}
pub fn num_constraints_and_instances(
input_shapes: Vec<Vec<usize>>,
params_shapes: Vec<Vec<usize>>,
output_shapes: Vec<Vec<usize>>,
visibility: VarVisibility,
) -> ModuleSizes {
let mut module_sizes = ModuleSizes::new();
Self::num_constraint_given_shapes(visibility.input, input_shapes, &mut module_sizes);
Self::num_constraint_given_shapes(visibility.params, params_shapes, &mut module_sizes);
Self::num_constraint_given_shapes(visibility.output, output_shapes, &mut module_sizes);
module_sizes
}
fn layout_module(
module: &impl Module<Fp>,
layouter: &mut impl Layouter<Fp>,
x: &mut Vec<ValTensor<Fp>>,
instance_offset: &mut usize,
constants: &mut ConstantsMap<Fp>,
) -> Result<(), Error> {
let cloned_x = (*x).clone();
x[0] = module
.layout(layouter, &cloned_x, instance_offset.to_owned(), constants)
.unwrap();
for inc in module.instance_increment_input().iter() {
*instance_offset += inc;
}
Ok(())
}
pub fn layout(
&mut self,
layouter: &mut impl Layouter<Fp>,
configs: &mut ModuleConfigs,
values: &mut [ValTensor<Fp>],
element_visibility: &Visibility,
instance_offset: &mut usize,
constants: &m |
ut ConstantsMap<Fp>,
) -> Result<(), Error> {
if element_visibility.is_polycommit() && !values.is_empty() {
let mut inputs = values.iter_mut().map(|x| vec![x.clone()]).collect_vec();
inputs.iter_mut().for_each(|x| {
let chip = PolyCommitChip::new(configs.polycommit[self.polycommit_idx].clone());
let module_offset = 3 + self.polycommit_idx;
layouter
.assign_region(|| format!("_enter_module_{}", module_offset), |_| Ok(()))
.unwrap();
Self::layout_module(&chip, layouter, x, instance_offset, constants).unwrap();
self.polycommit_idx += 1;
});
values.iter_mut().enumerate().for_each(|(i, x)| {
x.clone_from(&inputs[i][0]);
});
}
if element_visibility.is_hashed() && !values.is_empty() {
if let Some(config) = &mut configs.poseidon {
layouter.assign_region(|| "_enter_module_0", |_| Ok(()))?;
let chip = ModulePoseidon::new(config.clone());
let mut inputs = values.iter_mut().map(|x| vec![x.clone()]).collect_vec();
inputs.iter_mut().for_each(|x| {
Self::layout_module(&chip, layouter, x, instance_offset, constants).unwrap();
});
values.iter_mut().enumerate().for_each(|(i, x)| {
x.clone_from(&inputs[i][0]);
});
} else {
log::error!("Poseidon config not initialized");
return Err(Error::Synthesis);
}
}
Ok(())
}
pub fn forward<Scheme: CommitmentScheme<Scalar = Fp, Curve = G1Affine>>(
inputs: &[Tensor<Scheme::Scalar>],
element_visibility: &Visibility, |
vk: Option<&VerifyingKey<G1Affine>>,
srs: Option<&Scheme::ParamsProver>,
) -> Result<ModuleForwardResult, Box<dyn std::error::Error>> {
let mut poseidon_hash = None;
let mut polycommit = None;
if element_visibility.is_hashed() {
let field_elements = inputs.iter().fold(vec![], |mut acc, x| {
let res = ModulePoseidon::run(x.to_vec()).unwrap()[0].clone();
acc.extend(res);
acc
});
poseidon_hash = Some(field_elements);
}
if element_visibility.is_polycommit() {
if let Some(vk) = vk {
if let Some(srs) = srs {
let commitments = inputs.iter().fold(vec![], |mut acc, x| {
let res = PolyCommitChip::commit::<Scheme>(
x.to_vec(),
(vk.cs().blinding_factors() + 1) as u32,
srs,
);
acc.push(res);
acc
});
polycommit = Some(commitments);
} else {
log::warn!("no srs provided for polycommit. processed value will be none");
}
} else {
log::debug!(
"no verifying key provided for polycommit. processed value will be none"
);
}
}
Ok(ModuleForwardResult {
poseidon_hash,
polycommit,
})
}
} |
use super::scale_to_multiplier;
use super::utilities::node_output_shapes;
use super::VarScales;
use super::Visibility;
use crate::circuit::hybrid::HybridOp;
use crate::circuit::lookup::LookupOp;
use crate::circuit::poly::PolyOp;
use crate::circuit::Constant;
use crate::circuit::Input;
use crate::circuit::Op;
use crate::circuit::Unknown;
use crate::graph::new_op_from_onnx;
use crate::tensor::TensorError;
use halo2curves::bn256::Fr as Fp;
use log::trace;
use serde::Deserialize;
use serde::Serialize;
use std::collections::BTreeMap;
use std::error::Error;
use std::fmt;
use tabled::Tabled;
use tract_onnx::{
self,
prelude::{Node as OnnxNode, SymbolValues, TypedFact, TypedOp},
};
fn display_vector<T: fmt::Debug>(v: &Vec<T>) -> String {
if !v.is_empty() {
format!("{:?}", v)
} else {
String::new()
}
}
fn display_opkind(v: &SupportedOp) -> String {
v.as_string()
}
pub |
struct Rescaled {
pub inner: Box<SupportedOp>,
pub scale: Vec<(usize, u128)>,
}
impl Op<Fp> for Rescaled {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_string(&self) -> String {
format!("RESCALED INPUT ({})", self.inner.as_string())
}
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
let in_scales = in_scales
.into_iter()
.zip(self.scale.iter())
.map(|(a, b)| a + crate::graph::multiplier_to_scale(b.1 as f64))
.collect();
Op::<Fp>::out_scale(&*self.inner, in_scales)
}
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
region: &mut crate::circuit::region::RegionCtx<Fp>,
values: &[crate::tensor::ValTensor<Fp>],
) -> Result<Option<crate::tensor::ValTensor<Fp>>, Box<dyn Error>> {
if self.scale.len() != values.len() {
return Err(Box::new(TensorError::DimMismatch(
"rescaled inputs".to_string(),
)));
}
let res =
&crate::circuit::layouts::rescale(config, region, values[..].try_into()?, &self.scale)?
[..];
self.inner.layout(config, region, res)
}
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
Box::new(self.clone())
}
}
pub |
struct RebaseScale {
pub inner: Box<SupportedOp>,
pub rebase_op: HybridOp,
pub target_scale: i32,
pub original_scale: i32,
pub multiplier: f64,
}
impl RebaseScale {
pub fn rebase(
inner: SupportedOp,
global_scale: crate::Scale,
op_out_scale: crate::Scale,
scale_rebase_multiplier: u32,
div_rebasing: bool,
) -> SupportedOp {
if (op_out_scale > (global_scale * scale_rebase_multiplier as i32))
&& !inner.is_constant()
&& !inner.is_input()
{
let multiplier =
scale_to_multiplier(op_out_scale - global_scale * scale_rebase_multiplier as i32);
if let Some(op) = inner.get_rebased() {
let multiplier = op.multiplier * multiplier;
SupportedOp::RebaseScale(RebaseScale {
inner: op.inner.clone(),
target_scale: op.target_scale,
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op.original_scale,
})
} else {
SupportedOp::RebaseScale(RebaseScale {
inner: Box::new(inner),
target_scale: global_scale * scale_rebase_multiplier as i32,
multiplier,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
original_scale: op_out_scale,
})
}
} else {
inner
}
}
pub fn rebase_up(
inner: SupportedOp,
target_scale: crate::Scale,
op_out_scale: crate::Scale,
div_rebasing: bool,
) -> SupportedOp { |
if (op_out_scale < (target_scale)) && !inner.is_constant() && !inner.is_input() {
let multiplier = scale_to_multiplier(op_out_scale - target_scale);
if let Some(op) = inner.get_rebased() {
let multiplier = op.multiplier * multiplier;
SupportedOp::RebaseScale(RebaseScale {
inner: op.inner.clone(),
target_scale: op.target_scale,
multiplier,
original_scale: op.original_scale,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32((multiplier) as f32),
use_range_check_for_int: !div_rebasing,
},
})
} else {
SupportedOp::RebaseScale(RebaseScale {
inner: Box::new(inner),
target_scale,
multiplier,
original_scale: op_out_scale,
rebase_op: HybridOp::Div {
denom: crate::circuit::utils::F32(multiplier as f32),
use_range_check_for_int: !div_rebasing,
},
})
}
} else {
inner
}
}
}
impl Op<Fp> for RebaseScale {
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn as_string(&self) -> String {
format!(
"REBASED (div={:?}, rebasing_op={}) ({})",
self.multiplier,
<HybridOp as Op<Fp>>::as_string(&self.rebase_op),
self.inner.as_string()
)
}
fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
Ok(self.target_scale)
}
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
region: &mut crate::circuit::region::RegionCtx<Fp>,
values: &[crate::tensor::ValTensor<Fp>],
) -> Result<Option<crate::tensor::ValTensor<Fp>>, Box<dyn Error>> {
let original_res |
= self
.inner
.layout(config, region, values)?
.ok_or("no inner layout")?;
self.rebase_op.layout(config, region, &[original_res])
}
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
Box::new(self.clone())
}
}
pub enum SupportedOp {
Linear(PolyOp),
Nonlinear(LookupOp),
Hybrid(HybridOp),
Input(Input),
Constant(Constant<Fp>),
Unknown(Unknown),
Rescaled(Rescaled),
RebaseScale(RebaseScale),
}
impl SupportedOp {
pub fn is_lookup(&self) -> bool {
match self {
SupportedOp::Nonlinear(_) => true,
SupportedOp::RebaseScale(op) => op.inner.is_lookup(),
_ => false,
}
}
pub fn get_input(&self) -> Option<Input> {
match self {
SupportedOp::Input(op) => Some(op.clone()),
_ => None,
}
}
pub fn get_rebased(&self) -> Option<&RebaseScale> {
match self {
SupportedOp::RebaseScale(op) => Some(op),
_ => None,
}
}
pub fn get_lookup(&self) -> Option<&LookupOp> {
match self {
SupportedOp::Nonlinear(op) => Some(op),
_ => None,
}
}
pub fn get_constant(&self) -> Option<&Constant<Fp>> {
match self {
SupportedOp::Constant(op) => Some(op),
_ => None,
}
}
pub fn get_mutable_constant(&mut self) -> Option<&mut Constant<Fp>> {
match self {
SupportedOp::Constant(op) => Some(op),
_ => None,
}
}
fn homogenous_rescale(
&self,
in_scales: Vec<crate::Scale>,
) -> Result<Box<dyn Op<Fp>>, Box<dyn Error>> {
let inputs_to_scale = self.requires_homogenous_input_scales();
let op = self.clone_dyn();
super::homogenize_input_scales(op, in_scales, inputs_to_scale)
}
fn as_op(&self) -> &dyn Op<Fp> {
match self {
Su |
pportedOp::Linear(op) => op,
SupportedOp::Nonlinear(op) => op,
SupportedOp::Hybrid(op) => op,
SupportedOp::Input(op) => op,
SupportedOp::Constant(op) => op,
SupportedOp::Unknown(op) => op,
SupportedOp::Rescaled(op) => op,
SupportedOp::RebaseScale(op) => op,
}
}
}
impl From<Box<dyn Op<Fp>>> for SupportedOp {
fn from(value: Box<dyn Op<Fp>>) -> Self {
if let Some(op) = value.as_any().downcast_ref::<PolyOp>() {
return SupportedOp::Linear(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<LookupOp>() {
return SupportedOp::Nonlinear(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<HybridOp>() {
return SupportedOp::Hybrid(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<Input>() {
return SupportedOp::Input(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<Constant<Fp>>() {
return SupportedOp::Constant(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<Unknown>() {
return SupportedOp::Unknown(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<Rescaled>() {
return SupportedOp::Rescaled(op.clone());
};
if let Some(op) = value.as_any().downcast_ref::<RebaseScale>() {
return SupportedOp::RebaseScale(op.clone());
};
log::error!("Unsupported op type");
log::warn!("defaulting to Unknown");
SupportedOp::Unknown(Unknown {})
}
}
impl Op<Fp> for SupportedOp {
fn layout(
&self,
config: &mut crate::circuit::BaseConfig<Fp>,
region: &mut crate::circuit::region::RegionCtx<Fp>,
values: &[crate::tensor::ValTensor<Fp>],
) -> Result<Option<crate::tensor::ValTensor<Fp>>, Box<dyn Error>> {
self.as_op().layout(config, region, values)
}
fn is_input(&self |
) -> bool {
self.as_op().is_input()
}
fn is_constant(&self) -> bool {
self.as_op().is_constant()
}
fn requires_homogenous_input_scales(&self) -> Vec<usize> {
self.as_op().requires_homogenous_input_scales()
}
fn clone_dyn(&self) -> Box<dyn Op<Fp>> {
self.as_op().clone_dyn()
}
fn as_string(&self) -> String {
self.as_op().as_string()
}
fn as_any(&self) -> &dyn std::any::Any {
self
}
fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> {
self.as_op().out_scale(in_scales)
}
}
pub type Outlet = (usize, usize);
pub |
struct Node {
pub opkind: SupportedOp,
pub out_scale: i32,
pub inputs: Vec<Outlet>,
pub out_dims: Vec<usize>,
pub idx: usize,
pub num_uses: usize,
}
impl Tabled for Node {
const LENGTH: usize = 6;
fn headers() -> Vec<std::borrow::Cow<'static, str>> {
let mut headers = Vec::with_capacity(Self::LENGTH);
for i in ["idx", "opkind", "out_scale", "inputs", "out_dims"] {
headers.push(std::borrow::Cow::Borrowed(i));
}
headers
}
fn fields(&self) -> Vec<std::borrow::Cow<'_, str>> {
let mut fields = Vec::with_capacity(Self::LENGTH);
fields.push(std::borrow::Cow::Owned(self.idx.to_string()));
fields.push(std::borrow::Cow::Owned(display_opkind(&self.opkind)));
fields.push(std::borrow::Cow::Owned(self.out_scale.to_string()));
fields.push(std::borrow::Cow::Owned(display_vector(&self.inputs)));
fields.push(std::borrow::Cow::Owned(display_vector(&self.out_dims)));
fields
}
}
impl PartialEq for Node {
fn eq(&self, other: &Node) -> bool {
(self.out_scale == other.out_scale)
&& (self.inputs == other.inputs)
&& (self.out_dims == other.out_dims)
&& (self.idx == other.idx)
&& (self.opkind.as_string() == other.opkind.as_string())
}
}
impl Node {
pub fn new(
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
other_nodes: &mut BTreeMap<usize, super::NodeType>,
scales: &VarScales,
param_visibility: &Visibility,
idx: usize,
symbol_values: &SymbolValues,
div_rebasing: bool,
rebase_frac_zero_constants: bool,
) -> Result<Self, Box<dyn Error>> {
trace!("Create {:?}", node);
trace!("Create op {:?}", node.op);
let num_uses = std::cmp::max(
node.outputs
.iter()
.map(|outlet| outlet.successors.len())
.sum::<usiz |
e>(),
1,
);
let mut inputs = vec![];
let mut input_ids = node
.inputs
.iter()
.map(|i| (i.node, i.slot))
.collect::<Vec<_>>();
input_ids
.iter()
.map(|(i, _)| {
inputs.push(other_nodes.get(i).ok_or("input not found")?.clone());
Ok(())
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let (mut opkind, deleted_indices) = new_op_from_onnx(
idx,
scales,
param_visibility,
node.clone(),
&mut inputs,
symbol_values,
rebase_frac_zero_constants,
)?;
other_nodes.extend(
inputs
.iter()
.map(|i| (i.idx(), i.clone()))
.collect::<BTreeMap<_, _>>(),
);
input_ids.iter_mut().enumerate().for_each(|(i, (idx, _))| {
if deleted_indices.contains(&i) {
*idx = usize::MAX;
}
});
input_ids.retain(|(idx, _)| *idx != usize::MAX);
let mut in_scales: Vec<crate::Scale> = input_ids
.iter()
.map(|(idx, outlet)| {
let idx = inputs
.iter()
.position(|x| *idx == x.idx())
.ok_or("input not found")?;
Ok(inputs[idx].out_scales()[*outlet])
})
.collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let homogenous_inputs = opkind.requires_homogenous_input_scales();
for input in homogenous_inputs
.into_iter()
.filter(|i| !deleted_indices.contains(i))
{
if inputs.len() > input {
let input_node = other_nodes
.get_mut(&inputs[input].idx())
.ok_or("input not found")?;
let input_opkind = &mut input_ |
node.opkind();
if let Some(constant) = input_opkind.get_mutable_constant() {
rescale_const_with_single_use(
constant,
in_scales.clone(),
param_visibility,
input_node.num_uses(),
)?;
input_node.replace_opkind(constant.clone_dyn().into());
let out_scale = input_opkind.out_scale(vec![])?;
input_node.bump_scale(out_scale);
in_scales[input] = out_scale;
}
}
}
opkind = opkind.homogenous_rescale(in_scales.clone())?.into();
let mut out_scale = opkind.out_scale(in_scales.clone())?;
let global_scale = scales.get_max();
opkind = RebaseScale::rebase(
opkind,
global_scale,
out_scale,
scales.rebase_multiplier,
div_rebasing,
);
out_scale = opkind.out_scale(in_scales)?;
let out_dims = node_output_shapes(&node, symbol_values)?;
let mut out_dims = out_dims[0].clone();
if out_dims.is_empty() {
out_dims = vec![1];
}
Ok(Node {
idx,
opkind,
inputs: input_ids,
out_dims,
out_scale,
num_uses,
})
}
}
fn rescale_const_with_single_use(
constant: &mut Constant<Fp>,
in_scales: Vec<crate::Scale>,
param_visibility: &Visibility,
num_uses: usize,
) -> Result<(), Box<dyn Error>> {
if num_uses == 1 {
let current_scale = constant.out_scale(vec![])?;
let scale_max = in_scales.iter().max().ok_or("no scales")?;
if scale_max > ¤t_scale {
let raw_values = constant.raw_values.clone();
constant.quantized_values =
super::quantize_tensor(raw_values, *scale_max, param_visibility)?;
}
}
Ok(())
} |
use super::GraphError;
use super::VarScales;
use super::{Rescaled, SupportedOp, Visibility};
use crate::circuit::hybrid::HybridOp;
use crate::circuit::lookup::LookupOp;
use crate::circuit::poly::PolyOp;
use crate::circuit::Op;
use crate::tensor::{Tensor, TensorError, TensorType};
use halo2curves::bn256::Fr as Fp;
use halo2curves::ff::PrimeField;
use itertools::Itertools;
use log::{debug, warn};
use std::error::Error;
use std::sync::Arc;
use tract_onnx::prelude::{DatumType, Node as OnnxNode, TypedFact, TypedOp};
use tract_onnx::tract_core::ops::{
array::{
Gather, GatherElements, GatherNd, MultiBroadcastTo, OneHot, ScatterElements, ScatterNd,
Slice, Topk,
},
change_axes::AxisOp,
cnn::{Conv, Deconv},
einsum::EinSum,
element_wise::ElementWiseOp,
nn::{LeakyRelu, Reduce, Softmax},
Downsample,
};
use tract_onnx::tract_hir::{
internal::DimLike,
ops::array::{Pad, PadMode, TypedConcat},
ops::cnn::PoolSpec,
ops::konst::Const,
ops::nn::DataFormat,
tract_core::ops::cast::Cast,
tract_core::ops::cnn::{conv::KernelFormat, MaxPool, PaddingSpec, SumPool},
};
pub fn quantize_float(elem: &f64, shift: f64, scale: crate::Scale) -> Result<i128, TensorError> {
let mult = scale_to_multiplier(scale);
let max_value = ((i128::MAX as f64 - shift) / mult).round();
if *elem > max_value {
return Err(TensorError::SigBitTruncationError);
}
let scaled = (mult * *elem + shift).round() as i128;
Ok(scaled)
}
pub fn dequantize(felt: Fp, scale: crate::Scale, shift: f64) -> f64 {
let int_rep = crate::fieldutils::felt_to_i128(felt);
let multiplier = scale_to_multiplier(scale);
int_rep as f64 / multiplier - shift
}
pub fn scale_to_multiplier(scale: crate::Scale) -> f64 {
f64::powf(2., scale as f64)
}
pub fn multiplier_to_scale(mult: f64) -> crate::Scale {
mult.log2().round() as crate::Scale
}
pub fn node_output_shapes(
node: &OnnxNode<TypedFact, Box<dyn TypedOp>>,
symbol_values: &Sym |
bolValues,
) -> Result<Vec<Vec<usize>>, Box<dyn std::error::Error>> {
let mut shapes = Vec::new();
let outputs = node.outputs.to_vec();
for output in outputs {
let shape = output.fact.shape;
let shape = shape.eval_to_usize(symbol_values)?;
let mv = shape.to_vec();
shapes.push(mv)
}
Ok(shapes)
}
use tract_onnx::prelude::SymbolValues;
pub fn extract_tensor_value(
input: Arc<tract_onnx::prelude::Tensor>,
) -> Result<Tensor<f32>, Box<dyn std::error::Error>> {
use maybe_rayon::prelude::{IntoParallelRefIterator, ParallelIterator};
let dt = input.datum_type();
let dims = input.shape().to_vec();
let mut const_value: Tensor<f32>;
if dims.is_empty() && input.len() == 0 {
const_value = Tensor::<f32>::new(None, &dims)?;
return Ok(const_value);
}
match dt {
DatumType::F16 => {
let vec = input.as_slice::<tract_onnx::prelude::f16>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| (*x).into()).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::F32 => {
let vec = input.as_slice::<f32>()?.to_vec();
const_value = Tensor::<f32>::new(Some(&vec), &dims)?;
}
DatumType::F64 => {
let vec = input.as_slice::<f64>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::I64 => {
let vec = input.as_slice::<i64>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::I32 => {
let vec = input.as_slice::<i32>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType |
::I16 => {
let vec = input.as_slice::<i16>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::I8 => {
let vec = input.as_slice::<i8>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::U8 => {
let vec = input.as_slice::<u8>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::U16 => {
let vec = input.as_slice::<u16>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::U32 => {
let vec = input.as_slice::<u32>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::U64 => {
let vec = input.as_slice::<u64>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::Bool => {
let vec = input.as_slice::<bool>()?.to_vec();
let cast: Vec<f32> = vec.par_iter().map(|x| *x as usize as f32).collect();
const_value = Tensor::<f32>::new(Some(&cast), &dims)?;
}
DatumType::TDim => {
let vec = input.as_slice::<tract_onnx::prelude::TDim>()?.to_vec();
let cast: Result<Vec<f32>, &str> = vec
.par_iter()
.map(|x| match x.to_i64() {
Ok(v) => Ok(v as f32), |
Err(_) => match x.to_i64() {
Ok(v) => Ok(v as f32),
Err(_) => Err("could not evaluate tdim"),
},
})
.collect();
const_value = Tensor::<f32>::new(Some(&cast?), &dims)?;
}
_ => return Err("unsupported data type".into()),
}
const_value.reshape(&dims)?;
Ok(const_value)
}
fn load_op<C: tract_onnx::prelude::Op + Clone>(
op: &dyn tract_onnx::prelude::Op,
idx: usize,
name: String,
) -> Result<C, Box<dyn std::error::Error>> {
let op: &C = match op.downcast_ref::<C>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, name)));
}
};
Ok(op.clone())
}
pub fn new_op_from_onnx(
idx: usize,
scales: &VarScales,
param_visibility: &Visibility,
node: OnnxNode<TypedFact, Box<dyn TypedOp>>,
inputs: &mut [super::NodeType],
symbol_values: &SymbolValues,
rebase_frac_zero_constants: bool,
) -> Result<(SupportedOp, Vec<usize>), Box<dyn std::error::Error>> {
use tract_onnx::tract_core::ops::array::Trilu;
use crate::circuit::InputType;
let input_scales = inputs
.iter()
.flat_map(|x| x.out_scales())
.collect::<Vec<_>>();
let mut replace_const = |scale: crate::Scale,
index: usize,
default_op: SupportedOp|
-> Result<SupportedOp, Box<dyn std::error::Error>> {
let mut constant = inputs[index].opkind();
let constant = constant.get_mutable_constant();
if let Some(c) = constant {
inputs[index].bump_scale(scale);
c.rebase_scale(scale)?;
inputs[index].replace_opkind(SupportedOp::Constant(c.clone()));
Ok(SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(scale),
}))
} else {
Ok(default_op)
}
};
debug!("Loading node: {:?}", n |
ode);
let mut deleted_indices = vec![];
let node = match node.op().name().as_ref() {
"ShiftLeft" => {
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
let raw_values = &c.raw_values;
if raw_values.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"shift left".to_string(),
)));
}
SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] - raw_values[0] as i32),
})
} else {
return Err(Box::new(GraphError::OpMismatch(
idx,
"ShiftLeft".to_string(),
)));
}
}
"ShiftRight" => {
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
let raw_values = &c.raw_values;
if raw_values.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"shift right".to_string(),
)));
}
SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] + raw_values[0] as i32),
})
} else {
return Err(Box::new(GraphError::OpMismatch(
idx,
"ShiftRight".to_string(),
)));
}
}
"MultiBroadcastTo" => {
let op = load_op::<MultiBroadcastTo>(node.op(), idx, node.op().name().to_string())?;
let shape = op.shape.clone();
let shape = shape
.iter()
.map(|x| x.to_usize())
.collect::<Result<Vec<_>, _>>()? |
;
SupportedOp::Linear(PolyOp::MultiBroadcastTo { shape })
}
"Range" => {
let mut input_ops = vec![];
for (i, input) in inputs.iter_mut().enumerate() {
if !input.opkind().is_constant() {
return Err("Range only supports constant inputs in a zk circuit".into());
} else {
input.decrement_use();
deleted_indices.push(i);
input_ops.push(input.opkind().clone());
}
}
assert_eq!(input_ops.len(), 3, "Range requires 3 inputs");
let input_ops = input_ops
.iter()
.map(|x| x.get_constant().ok_or("Range requires constant inputs"))
.collect::<Result<Vec<_>, _>>()?;
let start = input_ops[0].raw_values.map(|x| x as usize)[0];
let end = input_ops[1].raw_values.map(|x| x as usize)[0];
let delta = input_ops[2].raw_values.map(|x| x as usize)[0];
let range = (start..end).step_by(delta).collect::<Vec<_>>();
let raw_value = range.iter().map(|x| *x as f32).collect::<Tensor<_>>();
let quantized_value = quantize_tensor(raw_value.clone(), 0, &Visibility::Fixed)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
SupportedOp::Constant(c)
}
"Trilu" => {
let op = load_op::<Trilu>(node.op(), idx, node.op().name().to_string())?;
let upper = op.upper;
let diagonal = if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
let raw_values = &c.raw_values;
if raw_values.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "trilu".to_string())));
}
raw_values[0] as i32
} else { |
return Err("we only support constant inputs for trilu diagonal".into());
};
SupportedOp::Linear(PolyOp::Trilu { upper, k: diagonal })
}
"Gather" => {
if inputs.len() != 2 {
return Err(Box::new(GraphError::InvalidDims(idx, "gather".to_string())));
};
let op = load_op::<Gather>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let mut op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::Gather {
dim: axis,
constant_idx: None,
});
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(inputs.len() - 1);
op = SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::Gather {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| {
if x == -1.0 {
inputs[0].out_dims()[0][axis] - 1
} else {
x as usize
}
})),
});
}
if inputs[1].opkind().is_input() {
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
}));
inputs[1].bump_scale(0);
}
op
}
"Topk" => {
let op = load_op::<Topk>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let k = if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(inputs.len() - 1);
c.raw_values.map(|x| x as usize)[0]
} else {
op.fallback_k.to_i64()? as usiz |
e
};
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::TopK {
dim: axis,
k,
largest: op.largest,
})
}
"Onehot" => {
let op = load_op::<OneHot>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let num_classes = op.dim;
SupportedOp::Hybrid(crate::circuit::ops::hybrid::HybridOp::OneHot {
dim: axis,
num_classes,
})
}
"ScatterElements" => {
if inputs.len() != 3 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"scatter elements".to_string(),
)));
};
let op = load_op::<ScatterElements>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
dim: axis,
constant_idx: None,
});
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
}
if inputs[1].opkind().is_input() {
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
}));
inputs[1].bump_scale(0);
}
op
}
"ScatterNd" => {
if inputs.len() != 3 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"scatter nd".to_string(),
) |
));
};
let _op = load_op::<ScatterNd>(node.op(), idx, node.op().name().to_string())?;
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterND {
constant_idx: None,
});
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::ScatterND {
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
}
if inputs[1].opkind().is_input() {
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
}));
inputs[1].bump_scale(0);
}
op
}
"GatherNd" => {
if inputs.len() != 2 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"gather nd".to_string(),
)));
};
let op = load_op::<GatherNd>(node.op(), idx, node.op().name().to_string())?;
let batch_dims = op.batch_dims;
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherND {
batch_dims,
indices: None,
});
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherND {
batch_dims,
indices: Some(c.raw_values.map(|x| x as usize)),
})
}
if inputs[1].opkind().is_input() {
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input { |
scale: 0,
datum_type: InputType::TDim,
}));
inputs[1].bump_scale(0);
}
op
}
"GatherElements" => {
if inputs.len() != 2 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"gather elements".to_string(),
)));
};
let op = load_op::<GatherElements>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
let mut op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
dim: axis,
constant_idx: None,
});
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
op = SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::GatherElements {
dim: axis,
constant_idx: Some(c.raw_values.map(|x| x as usize)),
})
}
if inputs[1].opkind().is_input() {
inputs[1].replace_opkind(SupportedOp::Input(crate::circuit::ops::Input {
scale: 0,
datum_type: InputType::TDim,
}));
inputs[1].bump_scale(0);
}
op
}
"MoveAxis" => {
let op = load_op::<AxisOp>(node.op(), idx, node.op().name().to_string())?;
match op {
AxisOp::Move(from, to) => {
let source = from.to_usize()?;
let destination = to.to_usize()?;
SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::MoveAxis {
source,
destination,
})
}
_ => {
return Err(Box::new(GraphError::OpMismatch( |
idx,
"MoveAxis".to_string(),
)))
}
}
}
"Concat" | "InferenceConcat" => {
let op = load_op::<TypedConcat>(node.op(), idx, node.op().name().to_string())?;
let axis = op.axis;
SupportedOp::Linear(crate::circuit::ops::poly::PolyOp::Concat { axis })
}
"Slice" => {
let slice = load_op::<Slice>(node.op(), idx, node.op().name().to_string())?;
let axis = slice.axis;
let start = slice.start.to_usize()?;
let end = slice.end.to_usize()?;
SupportedOp::Linear(PolyOp::Slice { axis, start, end })
}
"Const" => {
let op: Const = load_op::<Const>(node.op(), idx, node.op().name().to_string())?;
let dt = op.0.datum_type();
let raw_value = extract_tensor_value(op.0)?;
let mut constant_scale = match dt {
DatumType::Bool
| DatumType::TDim
| DatumType::I64
| DatumType::I32
| DatumType::I16
| DatumType::I8
| DatumType::U8
| DatumType::U16
| DatumType::U32
| DatumType::U64 => 0,
DatumType::F16 | DatumType::F32 | DatumType::F64 => scales.params,
_ => return Err(Box::new(GraphError::UnsupportedDataType)),
};
let all_round = raw_value.iter().all(|x| (x).fract() == 0.0);
if all_round && rebase_frac_zero_constants {
constant_scale = 0;
}
let quantized_value =
quantize_tensor(raw_value.clone(), constant_scale, param_visibility)?;
let c = crate::circuit::ops::Constant::new(quantized_value, raw_value);
SupportedOp::Constant(c)
}
"Reduce<ArgMax(false)>" => {
if inputs.len() != 1 { |
return Err(Box::new(GraphError::InvalidDims(idx, "argmax".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes: Vec<usize> = op.axes.into_iter().collect();
assert_eq!(axes.len(), 1, "only support argmax over one axis");
SupportedOp::Hybrid(HybridOp::ReduceArgMax { dim: axes[0] })
}
"Reduce<ArgMin(false)>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "argmin".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes: Vec<usize> = op.axes.into_iter().collect();
assert_eq!(axes.len(), 1, "only support argmin over one axis");
SupportedOp::Hybrid(HybridOp::ReduceArgMin { dim: axes[0] })
}
"Reduce<Min>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "min".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes = op.axes.into_iter().collect();
SupportedOp::Hybrid(HybridOp::ReduceMin { axes })
}
"Reduce<Max>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "max".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes = op.axes.into_iter().collect();
SupportedOp::Hybrid(HybridOp::ReduceMax { axes })
}
"Reduce<Prod>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "prod".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes: Vec<usize> = op.axes.into_iter().collect();
let len_prod = inputs[0].out_dims()[0] |
.iter()
.enumerate()
.filter(|(i, _)| axes.contains(i))
.map(|(_, v)| v)
.product::<usize>();
SupportedOp::Linear(PolyOp::Prod { axes, len_prod })
}
"Reduce<Sum>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "sum".to_string())));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes = op.axes.into_iter().collect();
SupportedOp::Linear(PolyOp::Sum { axes })
}
"Reduce<MeanOfSquares>" => {
if inputs.len() != 1 {
return Err(Box::new(GraphError::InvalidDims(
idx,
"mean of squares".to_string(),
)));
};
let op = load_op::<Reduce>(node.op(), idx, node.op().name().to_string())?;
let axes = op.axes.into_iter().collect();
SupportedOp::Linear(PolyOp::MeanOfSquares { axes })
}
"Max" => {
let const_inputs = inputs
.iter()
.enumerate()
.filter(|(_, n)| n.is_constant())
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_inputs.len() != 1 {
return Err(Box::new(GraphError::OpMismatch(idx, "Max".to_string())));
}
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
if c.len() == 1 {
c[0]
} else {
return Err(Box::new(GraphError::InvalidDims(idx, "max".to_string())));
}
} else {
return Err(Box::new(GraphError::OpMismatch(idx, "Max".to_string())));
};
if inputs.len() == 2 {
if let Some(node) = |
inputs.get_mut(const_idx) {
node.decrement_use();
deleted_indices.push(const_idx);
}
if unit == 0. {
SupportedOp::Nonlinear(LookupOp::ReLU)
} else {
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Max {
scale: scale_to_multiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
}
} else {
return Err(Box::new(GraphError::InvalidDims(idx, "max".to_string())));
}
}
"Min" => {
let const_inputs = inputs
.iter()
.enumerate()
.filter(|(_, n)| n.is_constant())
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_inputs.len() != 1 {
return Err(Box::new(GraphError::OpMismatch(idx, "Min".to_string())));
}
let const_idx = const_inputs[0];
let boxed_op = inputs[const_idx].opkind();
let unit = if let Some(c) = extract_const_raw_values(boxed_op) {
if c.len() == 1 {
c[0]
} else {
return Err(Box::new(GraphError::InvalidDims(idx, "min".to_string())));
}
} else {
return Err(Box::new(GraphError::OpMismatch(idx, "Min".to_string())));
};
if inputs.len() == 2 {
if let Some(node) = inputs.get_mut(const_idx) {
node.decrement_use();
deleted_indices.push(const_idx);
}
let non_const_idx = if const_idx == 0 { 1 } else { 0 };
SupportedOp::Nonlinear(LookupOp::Min {
scale: scale_to_mul |
tiplier(inputs[non_const_idx].out_scales()[0]).into(),
a: crate::circuit::utils::F32(unit),
})
} else {
return Err(Box::new(GraphError::InvalidDims(idx, "min".to_string())));
}
}
"Recip" => {
let in_scale = inputs[0].out_scales()[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale);
SupportedOp::Hybrid(HybridOp::Recip {
input_scale: (scale_to_multiplier(in_scale) as f32).into(),
output_scale: (scale_to_multiplier(max_scale) as f32).into(),
use_range_check_for_int: true,
})
}
"LeakyRelu" => {
let leaky_op = load_op::<ElementWiseOp>(node.op(), idx, node.op().name().to_string())?;
let leaky_op: &LeakyRelu = match leaky_op.0.downcast_ref::<LeakyRelu>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(
idx,
"leaky relu".to_string(),
)));
}
};
SupportedOp::Nonlinear(LookupOp::LeakyReLU {
slope: crate::circuit::utils::F32(leaky_op.alpha),
})
}
"Scan" => {
return Err("scan should never be analyzed explicitly".into());
}
"QuantizeLinearU8" | "DequantizeLinearF32" => {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
"Abs" => SupportedOp::Nonlinear(LookupOp::Abs),
"Neg" => SupportedOp::Linear(PolyOp::Neg),
"HardSwish" => SupportedOp::Nonlinear(LookupOp::HardSwish {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sigmoid" => SupportedOp::Nonlinear(LookupOp::Sigmoid {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sqrt" => SupportedOp::Nonlinear(LookupOp::Sqrt { |
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Rsqrt" => SupportedOp::Nonlinear(LookupOp::Rsqrt {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Exp" => SupportedOp::Nonlinear(LookupOp::Exp {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Ln" => SupportedOp::Nonlinear(LookupOp::Ln {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sin" => SupportedOp::Nonlinear(LookupOp::Sin {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Cos" => SupportedOp::Nonlinear(LookupOp::Cos {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Tan" => SupportedOp::Nonlinear(LookupOp::Tan {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Asin" => SupportedOp::Nonlinear(LookupOp::ASin {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Acos" => SupportedOp::Nonlinear(LookupOp::ACos {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Atan" => SupportedOp::Nonlinear(LookupOp::ATan {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sinh" => SupportedOp::Nonlinear(LookupOp::Sinh {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Cosh" => SupportedOp::Nonlinear(LookupOp::Cosh {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Tanh" => SupportedOp::Nonlinear(LookupOp::Tanh {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Asinh" => SupportedOp::Nonlinear(LookupOp::ASinh {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Acosh" => SupportedOp::Nonlinear(LookupOp::ACosh {
scale: scale_to_multiplier(inputs |
[0].out_scales()[0]).into(),
}),
"Atanh" => SupportedOp::Nonlinear(LookupOp::ATanh {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Erf" => SupportedOp::Nonlinear(LookupOp::Erf {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Source" => {
let (scale, datum_type) = match node.outputs[0].fact.datum_type {
DatumType::Bool => (0, InputType::Bool),
DatumType::TDim => (0, InputType::TDim),
DatumType::I64
| DatumType::I32
| DatumType::I16
| DatumType::I8
| DatumType::U8
| DatumType::U16
| DatumType::U32
| DatumType::U64 => (0, InputType::Int),
DatumType::F16 => (scales.input, InputType::F16),
DatumType::F32 => (scales.input, InputType::F32),
DatumType::F64 => (scales.input, InputType::F64),
_ => return Err(Box::new(GraphError::UnsupportedDataType)),
};
SupportedOp::Input(crate::circuit::ops::Input { scale, datum_type })
}
"Cast" => {
let op = load_op::<Cast>(node.op(), idx, node.op().name().to_string())?;
let dt = op.to;
assert_eq!(input_scales.len(), 1);
match dt {
DatumType::Bool
| DatumType::TDim
| DatumType::I64
| DatumType::I32
| DatumType::I16
| DatumType::I8
| DatumType::U8
| DatumType::U16
| DatumType::U32
| DatumType::U64 => {
if input_scales[0] != 0 {
replace_const(
0,
0,
SupportedOp::Nonlinear(LookupOp::Cast {
scale: crate::circuit::utils::F32(scale_to_multiplier( |
input_scales[0],
)
as f32),
}),
)?
} else {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
}
DatumType::F16 | DatumType::F32 | DatumType::F64 => {
SupportedOp::Linear(PolyOp::Identity { out_scale: None })
}
_ => return Err(Box::new(GraphError::UnsupportedDataType)),
}
}
"Add" => SupportedOp::Linear(PolyOp::Add),
"Sub" => SupportedOp::Linear(PolyOp::Sub),
"Mul" => {
let mut op = SupportedOp::Linear(PolyOp::Mult);
let const_idx = inputs
.iter()
.enumerate()
.filter(|(_, n)| n.is_constant())
.map(|(i, _)| i)
.collect::<Vec<_>>();
if const_idx.len() > 1 {
return Err(Box::new(GraphError::InvalidDims(idx, "mul".to_string())));
}
if const_idx.len() == 1 {
let const_idx = const_idx[0];
if let Some(c) = inputs[const_idx].opkind().get_mutable_constant() {
if c.raw_values.len() == 1 && c.raw_values[0] < 1. {
let raw_values = 1.0 / c.raw_values[0];
if raw_values.log2().fract() == 0.0 {
inputs[const_idx].decrement_use();
deleted_indices.push(const_idx);
op = SupportedOp::Linear(PolyOp::Identity {
out_scale: Some(input_scales[0] + raw_values.log2() as i32),
});
}
}
}
}
op
}
"Iff" => SupportedOp::Linear(PolyOp::Iff),
"Less" => {
if input |
s.len() == 2 {
SupportedOp::Hybrid(HybridOp::Less)
} else {
return Err(Box::new(GraphError::InvalidDims(idx, "less".to_string())));
}
}
"LessEqual" => {
if inputs.len() == 2 {
SupportedOp::Hybrid(HybridOp::LessEqual)
} else {
return Err(Box::new(GraphError::InvalidDims(
idx,
"less equal".to_string(),
)));
}
}
"Greater" => {
if inputs.len() == 2 {
SupportedOp::Hybrid(HybridOp::Greater)
} else {
return Err(Box::new(GraphError::InvalidDims(
idx,
"greater".to_string(),
)));
}
}
"GreaterEqual" => {
if inputs.len() == 2 {
SupportedOp::Hybrid(HybridOp::GreaterEqual)
} else {
return Err(Box::new(GraphError::InvalidDims(
idx,
"greater equal".to_string(),
)));
}
}
"EinSum" => {
let op: &EinSum = match node.op().downcast_ref::<EinSum>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "einsum".to_string())));
}
};
let axes = &op.axes;
SupportedOp::Linear(PolyOp::Einsum {
equation: axes.to_string(),
})
}
"Softmax" => {
let softmax_op: &Softmax = match node.op().downcast_ref::<Softmax>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "softmax".to_string())));
}
};
let in_scale = inputs[0].out_scales()[0];
let max_scale = std::cmp::max(scales.get_max(), in_scale); |
SupportedOp::Hybrid(HybridOp::Softmax {
input_scale: scale_to_multiplier(in_scale).into(),
output_scale: scale_to_multiplier(max_scale).into(),
axes: softmax_op.axes.to_vec(),
})
}
"MaxPool" => {
let op = Box::new(node.op());
let sumpool_node: &MaxPool = match op.downcast_ref() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "Maxpool".to_string())));
}
};
let pool_spec: &PoolSpec = &sumpool_node.pool_spec;
if pool_spec.data_format != DataFormat::NCHW {
return Err(Box::new(GraphError::MissingParams(
"data in wrong format".to_string(),
)));
}
let stride = pool_spec
.strides
.clone()
.ok_or(GraphError::MissingParams("stride".to_string()))?;
let padding = match &pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(Box::new(GraphError::MissingParams("padding".to_string())));
}
};
let kernel_shape = &pool_spec.kernel_shape;
SupportedOp::Hybrid(HybridOp::MaxPool {
padding,
stride: stride.to_vec(),
pool_dims: kernel_shape.to_vec(),
})
}
"Ceil" => SupportedOp::Nonlinear(LookupOp::Ceil {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Floor" => SupportedOp::Nonlinear(LookupOp::Floor {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Round" => SupportedOp::Nonlinear(LookupOp::Round {
scale: scale_t |
o_multiplier(inputs[0].out_scales()[0]).into(),
}),
"RoundHalfToEven" => SupportedOp::Nonlinear(LookupOp::RoundHalfToEven {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
}),
"Sign" => SupportedOp::Nonlinear(LookupOp::Sign),
"Pow" => {
if let Some(c) = inputs[1].opkind().get_mutable_constant() {
inputs[1].decrement_use();
deleted_indices.push(1);
if c.raw_values.len() > 1 {
unimplemented!("only support scalar pow")
}
SupportedOp::Nonlinear(LookupOp::Pow {
scale: scale_to_multiplier(inputs[0].out_scales()[0]).into(),
a: crate::circuit::utils::F32(c.raw_values[0]),
})
} else {
unimplemented!("only support constant pow for now")
}
}
"Cube" => SupportedOp::Linear(PolyOp::Pow(3)),
"Square" => SupportedOp::Linear(PolyOp::Pow(2)),
"Conv" => {
let conv_node: &Conv = match node.op().downcast_ref::<Conv>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "conv".to_string())));
}
};
if let Some(dilations) = &conv_node.pool_spec.dilations {
if dilations.iter().any(|x| *x != 1) {
return Err(Box::new(GraphError::MisformedParams(
"non unit dilations not supported".to_string(),
)));
}
}
if ((conv_node.pool_spec.data_format != DataFormat::NCHW)
&& (conv_node.pool_spec.data_format != DataFormat::CHW))
|| (conv_node.kernel_fmt != KernelFormat::OIHW)
{
return Err(Box::new(GraphError::MisformedParams(
"data or kernel in wrong format".to_string(),
)));
} |
let stride = match conv_node.pool_spec.strides.clone() {
Some(s) => s.to_vec(),
None => {
return Err(Box::new(GraphError::MissingParams("strides".to_string())));
}
};
let padding = match &conv_node.pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(Box::new(GraphError::MissingParams("padding".to_string())));
}
};
if input_scales.len() == 3 {
let bias_scale = input_scales[2];
let input_scale = input_scales[0];
let kernel_scale = input_scales[1];
let output_scale = input_scale + kernel_scale;
if bias_scale != output_scale {
replace_const(
output_scale,
2,
SupportedOp::Unknown(crate::circuit::Unknown),
)?;
}
}
SupportedOp::Linear(PolyOp::Conv { padding, stride })
}
"Not" => SupportedOp::Linear(PolyOp::Not),
"And" => SupportedOp::Linear(PolyOp::And),
"Or" => SupportedOp::Linear(PolyOp::Or),
"Xor" => SupportedOp::Linear(PolyOp::Xor),
"Equals" => SupportedOp::Hybrid(HybridOp::Equals),
"Deconv" => {
let deconv_node: &Deconv = match node.op().downcast_ref::<Deconv>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "deconv".to_string())));
}
};
if let Some(dilations) = &deconv_node.pool_spec.dilations {
if dilations.iter().any(|x| *x != 1) {
return Err(Box::new(GraphError::MisformedParams( |
"non unit dilations not supported".to_string(),
)));
}
}
if (deconv_node.pool_spec.data_format != DataFormat::NCHW)
|| (deconv_node.kernel_format != KernelFormat::OIHW)
{
return Err(Box::new(GraphError::MisformedParams(
"data or kernel in wrong format".to_string(),
)));
}
let stride = match deconv_node.pool_spec.strides.clone() {
Some(s) => s.to_vec(),
None => {
return Err(Box::new(GraphError::MissingParams("strides".to_string())));
}
};
let padding = match &deconv_node.pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(Box::new(GraphError::MissingParams("padding".to_string())));
}
};
if input_scales.len() == 3 {
let bias_scale = input_scales[2];
let input_scale = input_scales[0];
let kernel_scale = input_scales[1];
let output_scale = input_scale + kernel_scale;
if bias_scale != output_scale {
replace_const(
output_scale,
2,
SupportedOp::Unknown(crate::circuit::Unknown),
)?;
}
}
SupportedOp::Linear(PolyOp::DeConv {
padding,
output_padding: deconv_node.adjustments.to_vec(),
stride,
})
}
"Downsample" => {
let downsample_node: Downsample = match node.op().downcast_ref::<Downsample>() {
Some(b) => b.clone(),
None => {
return Err(Box::new(Gra |
phError::OpMismatch(
idx,
"downsample".to_string(),
)));
}
};
SupportedOp::Linear(PolyOp::Downsample {
axis: downsample_node.axis,
stride: downsample_node.stride as usize,
modulo: downsample_node.modulo,
})
}
"Resize" => {
let resize_node = format!("{:?}", node);
if !resize_node.contains("interpolator: Nearest")
&& !resize_node.contains("nearest: Floor")
{
unimplemented!("Only nearest neighbor interpolation is supported")
}
if inputs.len() != 2 && inputs.len() != 3 {
return Err(Box::new(GraphError::OpMismatch(idx, "Resize".to_string())));
}
let scale_factor_node =
if resize_node.contains("optional_scales_input: None") {
None
} else {
Some(resize_node
.split("optional_scales_input: ")
.collect::<Vec<_>>()[1]
.split("Some(")
.collect::<Vec<_>>()[1]
.split(')')
.collect::<Vec<_>>()[0]
.parse::<usize>()?)
};
let scale_factor = if let Some(scale_factor_node) = scale_factor_node {
let boxed_op = inputs[scale_factor_node].opkind();
if let Some(c) = extract_const_raw_values(boxed_op) {
c.map(|x| x as usize).into_iter().collect::<Vec<usize>>()
} else {
return Err(Box::new(GraphError::OpMismatch(idx, "Resize".to_string())));
}
} else {
vec![1]
};
for i in 1..inputs.len() {
if let Some(node) = inputs.get_mut(i) {
node.decrement_use(); |
deleted_indices.push(i);
}
}
SupportedOp::Linear(PolyOp::Resize { scale_factor })
}
"SumPool" => {
let op = Box::new(node.op());
let sumpool_node: &SumPool = match op.downcast_ref() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "sumpool".to_string())));
}
};
let pool_spec: &PoolSpec = &sumpool_node.pool_spec;
if pool_spec.data_format != DataFormat::NCHW {
return Err(Box::new(GraphError::MissingParams(
"data in wrong format".to_string(),
)));
}
let stride = pool_spec
.strides
.clone()
.ok_or(GraphError::MissingParams("stride".to_string()))?;
let padding = match &pool_spec.padding {
PaddingSpec::Explicit(b, a) | PaddingSpec::ExplicitOnnxPool(b, a, _) => {
b.iter().zip(a.iter()).map(|(b, a)| (*b, *a)).collect()
}
_ => {
return Err(Box::new(GraphError::MissingParams("padding".to_string())));
}
};
SupportedOp::Hybrid(HybridOp::SumPool {
padding,
stride: stride.to_vec(),
kernel_shape: pool_spec.kernel_shape.to_vec(),
normalized: sumpool_node.normalize,
})
}
"Pad" => {
let pad_node: &Pad = match node.op().downcast_ref::<Pad>() {
Some(b) => b,
None => {
return Err(Box::new(GraphError::OpMismatch(idx, "pad".to_string())));
}
};
if pad_node.mode
!= PadMode::Constant(tract_onnx::prelude::Arc::new(
tract_onnx::prelude::Tensor::zer |
o::<f32>(&[])?,
))
{
return Err(Box::new(GraphError::MisformedParams(
"pad mode or pad type".to_string(),
)));
}
SupportedOp::Linear(PolyOp::Pad(pad_node.pads.to_vec()))
}
"RmAxis" | "Reshape" | "AddAxis" => {
let shapes = node_output_shapes(&node, symbol_values)?;
let mut output_shape = shapes[0].clone();
if output_shape.is_empty() {
output_shape = vec![1];
}
SupportedOp::Linear(PolyOp::Reshape(output_shape))
}
"Flatten" => {
let new_dims: Vec<usize> = vec![inputs[0].out_dims()[0].iter().product::<usize>()];
SupportedOp::Linear(PolyOp::Flatten(new_dims))
}
c => {
warn!("Unknown op: {}", c);
SupportedOp::Unknown(crate::circuit::ops::Unknown)
}
};
Ok((node, deleted_indices))
}
pub fn extract_const_raw_values(op: SupportedOp) -> Option<Tensor<f32>> {
match op {
SupportedOp::Constant(crate::circuit::ops::Constant { raw_values, .. }) => Some(raw_values),
_ => None,
}
}
pub fn extract_const_quantized_values(op: SupportedOp) -> Option<Tensor<Fp>> {
match op {
SupportedOp::Constant(crate::circuit::ops::Constant {
quantized_values, ..
}) => Some(quantized_values),
_ => None,
}
}
pub fn quantize_tensor<F: PrimeField + TensorType + PartialOrd>(
const_value: Tensor<f32>,
scale: crate::Scale,
visibility: &Visibility,
) -> Result<Tensor<F>, Box<dyn std::error::Error>> {
let mut value: Tensor<F> = const_value.par_enum_map(|_, x| {
Ok::<_, TensorError>(crate::fieldutils::i128_to_felt::<F>(quantize_float(
&(x).into(),
0.0,
scale,
)?))
})?;
value.set_scale(scale);
value.set_visibility(visibility);
Ok(value)
}
use crate::tensor::ValTensor;
pub(crate) fn split_valtensor( |
values: &ValTensor<Fp>,
shapes: Vec<Vec<usize>>,
) -> Result<Vec<ValTensor<Fp>>, Box<dyn std::error::Error>> {
let mut tensors: Vec<ValTensor<Fp>> = Vec::new();
let mut start = 0;
for shape in shapes {
let end = start + shape.iter().product::<usize>();
let mut tensor = values.get_slice(&[start..end])?;
tensor.reshape(&shape)?;
tensors.push(tensor);
start = end;
}
Ok(tensors)
}
pub fn homogenize_input_scales(
op: Box<dyn Op<Fp>>,
input_scales: Vec<crate::Scale>,
inputs_to_scale: Vec<usize>,
) -> Result<Box<dyn Op<Fp>>, Box<dyn Error>> {
let relevant_input_scales = input_scales
.clone()
.into_iter()
.enumerate()
.filter(|(idx, _)| inputs_to_scale.contains(idx))
.map(|(_, scale)| scale)
.collect_vec();
if inputs_to_scale.is_empty() {
return Ok(op);
}
if relevant_input_scales.windows(2).all(|w| w[0] == w[1]) {
return Ok(op);
}
let mut multipliers: Vec<u128> = vec![1; input_scales.len()];
let max_scale = input_scales.iter().max().ok_or("no max scale")?;
let _ = input_scales
.iter()
.enumerate()
.map(|(idx, input_scale)| {
if !inputs_to_scale.contains(&idx) {
return;
}
let scale_diff = max_scale - input_scale;
if scale_diff > 0 {
let mult = crate::graph::scale_to_multiplier(scale_diff);
multipliers[idx] = mult as u128;
}
})
.collect_vec();
if multipliers.iter().any(|&x| x > 1) {
Ok(Box::new(Rescaled {
inner: Box::new(op.into()),
scale: (0..input_scales.len()).zip(multipliers).collect_vec(),
}))
} else {
Ok(op)
}
}
pub mod tests {
use super::*; |
fn test_flatten_valtensors() {
let tensor1: Tensor<Fp> = (0..10).map(|x| x.into()).into();
let tensor2: Tensor<Fp> = (10..20).map(|x| x.into()).into();
let tensor3: Tensor<Fp> = (20..30).map(|x| x.into()).into();
let mut tensor = Tensor::new(Some(&[tensor1, tensor2, tensor3]), &[3])
.unwrap()
.combine()
.unwrap();
tensor.set_visibility(&Visibility::Public);
let flattened: ValTensor<Fp> = tensor.try_into().unwrap();
assert_eq!(flattened.len(), 30);
let split = split_valtensor(&flattened, vec![vec![2, 5], vec![10], vec![5, 2]]).unwrap();
assert_eq!(split.len(), 3);
assert_eq!(split[0].len(), 10);
assert_eq!(split[0].dims(), vec![2, 5]);
assert_eq!(split[1].len(), 10);
assert_eq!(split[1].dims(), vec![10]);
assert_eq!(split[2].dims(), vec![5, 2]);
assert_eq!(split[2].len(), 10);
}
} |
use std::error::Error;
use std::fmt::Display;
use crate::tensor::TensorType;
use crate::tensor::{ValTensor, VarTensor};
use crate::RunArgs;
use halo2_proofs::plonk::{Column, ConstraintSystem, Instance};
use halo2curves::ff::PrimeField;
use itertools::Itertools;
use log::debug;
use pyo3::{
exceptions::PyValueError, types::PyString, FromPyObject, IntoPy, PyAny, PyObject, PyResult,
PyTryFrom, Python, ToPyObject,
};
use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;
use super::*;
pub enum Visibility {
Private,
Public,
Hashed {
hash_is_public: bool,
outlets: Vec<usize>,
},
KZGCommit,
Fixed,
}
impl Display for Visibility {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Visibility::KZGCommit => write!(f, "polycommit"),
Visibility::Private => write!(f, "private"),
Visibility::Public => write!(f, "public"),
Visibility::Fixed => write!(f, "fixed"),
Visibility::Hashed {
hash_is_public,
outlets,
} => {
if *hash_is_public {
write!(f, "hashed/public")
} else {
write!(f, "hashed/private/{}", outlets.iter().join(","))
}
}
}
}
}
impl ToFlags for Visibility {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl<'a> From<&'a str> for Visibility {
fn from(s: &'a str) -> Self {
if s.contains("hashed/private") {
let (_, outlets) = s.split_at(s.rfind('/').unwrap());
let outlets = outlets
.trim_start_matches('/')
.split(',')
.map(|s| s.parse::<usize>().unwrap())
.collect_vec();
return Visibility::Hashed {
hash_is_public: false,
outlets,
}; |
}
match s {
"private" => Visibility::Private,
"public" => Visibility::Public,
"polycommit" => Visibility::KZGCommit,
"fixed" => Visibility::Fixed,
"hashed" | "hashed/public" => Visibility::Hashed {
hash_is_public: true,
outlets: vec![],
},
_ => {
log::error!("Invalid value for Visibility: {}", s);
log::warn!("Defaulting to private");
Visibility::Private
}
}
}
}
impl IntoPy<PyObject> for Visibility {
fn into_py(self, py: Python) -> PyObject {
match self {
Visibility::Private => "private".to_object(py),
Visibility::Public => "public".to_object(py),
Visibility::Fixed => "fixed".to_object(py),
Visibility::KZGCommit => "polycommit".to_object(py),
Visibility::Hashed {
hash_is_public,
outlets,
} => {
if hash_is_public {
"hashed/public".to_object(py)
} else {
let outlets = outlets
.iter()
.map(|o| o.to_string())
.collect_vec()
.join(",");
format!("hashed/private/{}", outlets).to_object(py)
}
}
}
}
}
impl<'source> FromPyObject<'source> for Visibility {
fn extract(ob: &'source PyAny) -> PyResult<Self> {
let trystr = <PyString as PyTryFrom>::try_from(ob)?;
let strval = trystr.to_string();
let strval = strval.as_str();
if strval.contains("hashed/private") {
let (_, outlets) = strval.split_at(strval.rfind('/').unwrap());
let outlets = outlets
.trim_start_matches('/')
.split(',')
.map(|s| s.parse::<usize>().unwrap())
.collect_vec();
return Ok |
(Visibility::Hashed {
hash_is_public: false,
outlets,
});
}
match strval.to_lowercase().as_str() {
"private" => Ok(Visibility::Private),
"public" => Ok(Visibility::Public),
"polycommit" => Ok(Visibility::KZGCommit),
"hashed" => Ok(Visibility::Hashed {
hash_is_public: true,
outlets: vec![],
}),
"hashed/public" => Ok(Visibility::Hashed {
hash_is_public: true,
outlets: vec![],
}),
"fixed" => Ok(Visibility::Fixed),
_ => Err(PyValueError::new_err("Invalid value for Visibility")),
}
}
}
impl Visibility {
pub fn is_fixed(&self) -> bool {
matches!(&self, Visibility::Fixed)
}
pub fn is_private(&self) -> bool {
matches!(&self, Visibility::Private) || self.is_hashed_private()
}
pub fn is_public(&self) -> bool {
matches!(&self, Visibility::Public)
}
pub fn is_hashed(&self) -> bool {
matches!(&self, Visibility::Hashed { .. })
}
pub fn is_polycommit(&self) -> bool {
matches!(&self, Visibility::KZGCommit)
}
pub fn is_hashed_public(&self) -> bool {
if let Visibility::Hashed {
hash_is_public: true,
..
} = self
{
return true;
}
false
}
pub fn is_hashed_private(&self) -> bool {
if let Visibility::Hashed {
hash_is_public: false,
..
} = self
{
return true;
}
false
}
pub fn requires_processing(&self) -> bool {
matches!(&self, Visibility::Hashed { .. }) | matches!(&self, Visibility::KZGCommit)
}
pub fn overwrites_inputs(&self) -> Vec<usize> {
if let Visibility::Hashed { outlets, .. } = self {
return outlets.clone();
}
vec![]
}
}
pub |
struct VarScales {
pub input: crate::Scale,
pub params: crate::Scale,
pub rebase_multiplier: u32,
}
impl std::fmt::Display for VarScales {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "(inputs: {}, params: {})", self.input, self.params)
}
}
impl VarScales {
pub fn get_max(&self) -> crate::Scale {
std::cmp::max(self.input, self.params)
}
pub fn get_min(&self) -> crate::Scale {
std::cmp::min(self.input, self.params)
}
pub fn from_args(args: &RunArgs) -> Result<Self, Box<dyn Error>> {
Ok(Self {
input: args.input_scale,
params: args.param_scale,
rebase_multiplier: args.scale_rebase_multiplier,
})
}
}
pub |
struct VarVisibility {
pub input: Visibility,
pub params: Visibility,
pub output: Visibility,
}
impl std::fmt::Display for VarVisibility {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
"(inputs: {}, params: {}, outputs: {})",
self.input, self.params, self.output
)
}
}
impl Default for VarVisibility {
fn default() -> Self {
Self {
input: Visibility::Private,
params: Visibility::Private,
output: Visibility::Public,
}
}
}
impl VarVisibility {
pub fn from_args(args: &RunArgs) -> Result<Self, Box<dyn Error>> {
let input_vis = &args.input_visibility;
let params_vis = &args.param_visibility;
let output_vis = &args.output_visibility;
if params_vis.is_public() {
return Err(
"public visibility for params is deprecated, please use `fixed` instead".into(),
);
}
if !output_vis.is_public()
& !params_vis.is_public()
& !input_vis.is_public()
& !output_vis.is_fixed()
& !params_vis.is_fixed()
& !input_vis.is_fixed()
& !output_vis.is_hashed()
& !params_vis.is_hashed()
& !input_vis.is_hashed()
& !output_vis.is_polycommit()
& !params_vis.is_polycommit()
& !input_vis.is_polycommit()
{
return Err(Box::new(GraphError::Visibility));
}
Ok(Self {
input: input_vis.clone(),
params: params_vis.clone(),
output: output_vis.clone(),
})
}
}
pub struct ModelVars<F: PrimeField + TensorType + PartialOrd> {
pub advices: Vec<VarTensor>,
pub instance: Option<ValTensor<F>>,
}
impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> ModelVars<F> {
pub fn get_instance_col(&self) -> Option<&Column<Instance>> {
if let Some |
(instance) = &self.instance {
match instance {
ValTensor::Instance { inner, .. } => Some(inner),
_ => None,
}
} else {
None
}
}
pub |
fn set_initial_instance_offset(&mut self, offset: usize) {
if let Some(instance) = &mut self.instance {
instance.set_initial_instance_offset(offset);
}
}
pub fn get_instance_len(&self) -> usize {
if let Some(instance) = &self.instance {
instance.get_total_instance_len()
} else {
0
}
}
pub |
fn increment_instance_idx(&mut self) {
if let Some(instance) = &mut self.instance {
instance.increment_idx();
}
}
pub |
fn set_instance_idx(&mut self, val: usize) {
if let Some(instance) = &mut self.instance {
instance.set_idx(val);
}
}
pub fn get_instance_idx(&self) -> usize {
if let Some(instance) = &self.instance {
instance.get_idx()
} else {
0
}
}
pub |
fn instantiate_instance(
&mut self,
cs: &mut ConstraintSystem<F>,
instance_dims: Vec<Vec<usize>>,
instance_scale: crate::Scale,
existing_instance: Option<Column<Instance>>,
) {
debug!("model uses {:?} instance dims", instance_dims);
self.instance = if let Some(existing_instance) = existing_instance {
debug!("using existing instance");
Some(ValTensor::new_instance_from_col(
instance_dims,
instance_scale,
existing_instance,
))
} else {
Some(ValTensor::new_instance(cs, instance_dims, instance_scale))
};
}
pub fn new(cs: &mut ConstraintSystem<F>, params: &GraphSettings) -> Self {
debug!("number of blinding factors: {}", cs.blinding_factors());
let logrows = params.run_args.logrows as usize;
let var_len = params.total_assignments;
let num_inner_cols = params.run_args.num_inner_cols;
let num_constants = params.total_const_size;
let module_requires_fixed = params.module_requires_fixed();
let requires_dynamic_lookup = params.requires_dynamic_lookup();
let requires_shuffle = params.requires_shuffle();
let dynamic_lookup_and_shuffle_size = params.dynamic_lookup_and_shuffle_col_size();
let mut advices = (0..3)
.map(|_| VarTensor::new_advice(cs, logrows, num_inner_cols, var_len))
.collect_vec();
if requires_dynamic_lookup || requires_shuffle {
let num_cols = if requires_dynamic_lookup { 3 } else { 2 };
for _ in 0..num_cols {
let dynamic_lookup =
VarTensor::new_advice(cs, logrows, 1, dynamic_lookup_and_shuffle_size);
if dynamic_lookup.num_blocks() > 1 {
panic!("dynamic lookup or shuffle should only have one block");
};
advices.push(dynamic_lookup);
}
}
debug!(
"mod |
el uses {} advice blocks (size={})",
advices.iter().map(|v| v.num_blocks()).sum::<usize>(),
num_inner_cols
);
let num_const_cols =
VarTensor::constant_cols(cs, logrows, num_constants, module_requires_fixed);
debug!("model uses {} fixed columns", num_const_cols);
ModelVars {
advices,
instance: None,
}
}
pub fn new_dummy() -> Self {
ModelVars {
advices: vec![],
instance: None,
}
}
} |
bad_style,
dead_code,
improper_ctypes,
non_shorthand_field_patterns,
no_mangle_generic_items,
overflowing_literals,
path_statements,
patterns_in_fns_without_body,
unconditional_recursion,
unused,
unused_allocation,
unused_comparisons,
unused_parens,
while_true,
missing_docs,
trivial_casts,
trivial_numeric_casts,
unused_extern_crates,
unused_import_braces,
missing_debug_implementations,
unsafe_code
)]
use std::str::FromStr;
use circuit::{table::Range, CheckMode, Tolerance};
use clap::Args;
use graph::Visibility;
use halo2_proofs::poly::{
ipa::commitment::IPACommitmentScheme, kzg::commitment::KZGCommitmentScheme,
};
use halo2curves::bn256::{Bn256, G1Affine};
use serde::{Deserialize, Serialize};
use tosubcommand::ToFlags;
pub mod circuit;
pub mod commands;
pub mod eth;
pub mod execute;
pub mod fieldutils;
pub mod graph;
pub mod logger;
pub mod pfsys;
pub mod python;
pub mod srs_sha;
pub mod tensor;
pub mod wasm;
use lazy_static::lazy_static;
pub type Scale = i32;
lazy_static! {
pub static ref EZKL_BUF_CAPACITY: usize = std::env::var("EZKL_BUF_CAPACITY")
.unwrap_or("8000".to_string())
.parse()
.unwrap();
pub static ref EZKL_KEY_FORMAT: String = std::env::var("EZKL_KEY_FORMAT")
.unwrap_or("raw-bytes".to_string());
}
const EZKL_KEY_FORMAT: &str = "raw-bytes";
const EZKL_BUF_CAPACITY: &usize = &8000;
Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, Default, Copy,
)]
pub enum Commitments {
KZG,
IPA,
}
impl From<Option<Commitments>> for Commitments {
fn from(value: Option<Commitments>) -> Self {
value.unwrap_or(Commitments::KZG)
}
}
impl FromStr for Commitments {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"kzg" => Ok(Commitments::KZG),
"ipa" => Ok(Commitments::IPA), |
_ => Err("Invalid value for Commitments".to_string()),
}
}
}
impl From<KZGCommitmentScheme<Bn256>> for Commitments {
fn from(_value: KZGCommitmentScheme<Bn256>) -> Self {
Commitments::KZG
}
}
impl From<IPACommitmentScheme<G1Affine>> for Commitments {
fn from(_value: IPACommitmentScheme<G1Affine>) -> Self {
Commitments::IPA
}
}
impl std::fmt::Display for Commitments {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Commitments::KZG => write!(f, "kzg"),
Commitments::IPA => write!(f, "ipa"),
}
}
}
impl ToFlags for Commitments {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<String> for Commitments {
fn from(value: String) -> Self {
match value.to_lowercase().as_str() {
"kzg" => Commitments::KZG,
"ipa" => Commitments::IPA,
_ => {
log::error!("Invalid value for Commitments");
log::warn!("defaulting to KZG");
Commitments::KZG
}
}
}
}
pub |
struct RunArgs {
pub tolerance: Tolerance,
pub input_scale: Scale,
pub param_scale: Scale,
pub scale_rebase_multiplier: u32,
pub lookup_range: Range,
pub logrows: u32,
pub num_inner_cols: usize,
pub variables: Vec<(String, usize)>,
pub input_visibility: Visibility,
pub output_visibility: Visibility,
pub param_visibility: Visibility,
pub div_rebasing: bool,
pub rebase_frac_zero_constants: bool,
pub check_mode: CheckMode,
pub commitment: Option<Commitments>,
}
impl Default for RunArgs {
fn default() -> Self {
Self {
tolerance: Tolerance::default(),
input_scale: 7,
param_scale: 7,
scale_rebase_multiplier: 1,
lookup_range: (-32768, 32768),
logrows: 17,
num_inner_cols: 2,
variables: vec![("batch_size".to_string(), 1)],
input_visibility: Visibility::Private,
output_visibility: Visibility::Public,
param_visibility: Visibility::Private,
div_rebasing: false,
rebase_frac_zero_constants: false,
check_mode: CheckMode::UNSAFE,
commitment: None,
}
}
}
impl RunArgs {
pub fn validate(&self) -> Result<(), Box<dyn std::error::Error>> {
if self.param_visibility == Visibility::Public {
return Err(
"params cannot be public instances, you are probably trying to use `fixed` or `kzgcommit`"
.into(),
);
}
if self.scale_rebase_multiplier < 1 {
return Err("scale_rebase_multiplier must be >= 1".into());
}
if self.lookup_range.0 > self.lookup_range.1 {
return Err("lookup_range min is greater than max".into());
}
if self.logrows < 1 {
return Err("logrows must be >= 1".into()); |
}
if self.num_inner_cols < 1 {
return Err("num_inner_cols must be >= 1".into());
}
if self.tolerance.val > 0.0 && self.output_visibility != Visibility::Public {
return Err("tolerance > 0.0 requires output_visibility to be public".into());
}
Ok(())
}
pub fn as_json(&self) -> Result<String, Box<dyn std::error::Error>> {
let serialized = match serde_json::to_string(&self) {
Ok(s) => s,
Err(e) => {
return Err(Box::new(e));
}
};
Ok(serialized)
}
pub fn from_json(arg_json: &str) -> Result<Self, serde_json::Error> {
serde_json::from_str(arg_json)
}
}
fn parse_key_val<T, U>(
s: &str,
) -> Result<(T, U), Box<dyn std::error::Error + Send + Sync + 'static>>
where
T: std::str::FromStr + std::fmt::Debug,
T::Err: std::error::Error + Send + Sync + 'static,
U: std::str::FromStr + std::fmt::Debug,
U::Err: std::error::Error + Send + Sync + 'static,
{
let pos = s
.find("->")
.ok_or_else(|| format!("invalid x->y: no `->` found in `{s}`"))?;
let a = s[..pos].parse()?;
let b = s[pos + 2..].parse()?;
Ok((a, b))
} |
use colored::*;
use env_logger::Builder;
use log::{Level, LevelFilter, Record};
use std::env;
use std::fmt::Formatter;
use std::io::Write;
pub fn level_color(level: &log::Level, msg: &str) -> String {
match level {
Level::Error => msg.red(),
Level::Warn => msg.yellow(),
Level::Info => msg.blue(),
Level::Debug => msg.green(),
Level::Trace => msg.magenta(),
}
.bold()
.to_string()
}
pub fn level_text_color(level: &log::Level, msg: &str) -> String {
match level {
Level::Error => msg.red(),
Level::Warn => msg.yellow(),
Level::Info => msg.white(),
Level::Debug => msg.white(),
Level::Trace => msg.white(),
}
.bold()
.to_string()
}
fn level_token(level: &Level) -> &str {
match *level {
Level::Error => "E",
Level::Warn => "W",
Level::Info => "*",
Level::Debug => "D",
Level::Trace => "T",
}
}
fn prefix_token(level: &Level) -> String {
format!(
"{}{}{}",
"[".blue().bold(),
level_color(level, level_token(level)),
"]".blue().bold()
)
}
pub fn format(buf: &mut Formatter, record: &Record<'_>) -> Result<(), std::fmt::Error> {
let sep = format!("\n{} ", " | ".white().bold());
let level = record.level();
writeln!(
buf,
"{} {}",
prefix_token(&level),
level_color(&level, record.args().as_str().unwrap()).replace('\n', &sep),
)
}
pub |
fn init_logger() {
let mut builder = Builder::new();
builder.format(move |buf, record| {
writeln!(
buf,
"{} [{}, {}] - {}",
prefix_token(&record.level()),
chrono::Utc::now()
.format("%Y-%m-%d %H:%M:%S")
.to_string()
.bright_magenta(),
record.metadata().target(),
level_text_color(&record.level(), &format!("{}", record.args()))
.replace('\n', &format!("\n{} ", " | ".white().bold()))
)
});
builder.target(env_logger::Target::Stdout);
builder.filter(None, LevelFilter::Info);
if env::var("RUST_LOG").is_ok() {
builder.parse_filters(&env::var("RUST_LOG").unwrap());
}
builder.init();
} |
use crate::graph::CircuitSize;
use crate::pfsys::{Snark, SnarkWitness};
use colored_json::ToColoredJson;
use halo2_proofs::circuit::AssignedCell;
use halo2_proofs::plonk::{self};
use halo2_proofs::{
circuit::{Layouter, SimpleFloorPlanner, Value},
plonk::{Circuit, ConstraintSystem},
};
use halo2_wrong_ecc::{
integer::rns::Rns,
maingate::{
MainGate, MainGateConfig, MainGateInstructions, RangeChip, RangeConfig, RangeInstructions,
RegionCtx,
},
EccConfig,
};
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine};
use halo2curves::ff::PrimeField;
use itertools::Itertools;
use log::debug;
use log::trace;
use rand::rngs::OsRng;
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::loader::EcPointLoader;
use snark_verifier::{
loader,
pcs::{
kzg::{
Bdfg21, KzgAccumulator, KzgAs, KzgSuccinctVerifyingKey, LimbsEncoding,
LimbsEncodingInstructions,
},
AccumulationScheme, AccumulationSchemeProver,
},
system,
util::arithmetic::fe_to_limbs,
verifier::{self, SnarkVerifier},
};
use std::rc::Rc;
use thiserror::Error;
const LIMBS: usize = 4;
const BITS: usize = 68;
type As = KzgAs<Bn256, Bdfg21>;
type PlonkSuccinctVerifier = verifier::plonk::PlonkSuccinctVerifier<As, LimbsEncoding<LIMBS, BITS>>;
const T: usize = 5;
const RATE: usize = 4;
const R_F: usize = 8;
const R_P: usize = 60;
type Svk = KzgSuccinctVerifyingKey<G1Affine>;
type BaseFieldEccChip = halo2_wrong_ecc::BaseFieldEccChip<G1Affine, LIMBS, BITS>;
type Halo2Loader<'a> = loader::halo2::Halo2Loader<'a, G1Affine, BaseFieldEccChip>;
pub type PoseidonTranscript<L, S> =
system::halo2::transcript::halo2::PoseidonTranscript<G1Affine, L, S, T, RATE, R_F, R_P>;
pub enum AggregationError {
KZGProofVerification,
ProofRead,
ProofVerify,
ProofCreate,
}
type AggregationResult<'a> = (
KzgAccumulator<G1Affine, Rc<Halo2Loader<'a>>>,
Vec<Vec<AssignedCell<Fr, Fr>>>,
);
type LoadedPr |
oof<'a> = verifier::plonk::PlonkProof<
G1Affine,
Rc<
loader::halo2::Halo2Loader<
'a,
G1Affine,
halo2_wrong_ecc::BaseFieldEccChip<G1Affine, 4, 68>,
>,
>,
KzgAs<Bn256, Bdfg21>,
>;
pub fn aggregate<'a>(
svk: &Svk,
loader: &Rc<Halo2Loader<'a>>,
snarks: &[SnarkWitness<Fr, G1Affine>],
as_proof: Value<&'_ [u8]>,
split_proofs: bool,
) -> Result<AggregationResult<'a>, plonk::Error> {
let assign_instances = |instances: &[Vec<Value<Fr>>]| {
instances
.iter()
.map(|instances| {
instances
.iter()
.map(|instance| loader.assign_scalar(*instance))
.collect_vec()
})
.collect_vec()
};
let mut accumulators = vec![];
let mut snark_instances = vec![];
let mut proofs: Vec<LoadedProof<'_>> = vec![];
for snark in snarks.iter() {
let protocol = snark.protocol.as_ref().unwrap().loaded(loader);
let instances = assign_instances(&snark.instances);
snark_instances.extend(instances.iter().map(|instance| {
instance
.iter()
.map(|v| v.clone().into_assigned())
.collect_vec()
}));
let mut transcript = PoseidonTranscript::<Rc<Halo2Loader>, _>::new(loader, snark.proof());
let proof = PlonkSuccinctVerifier::read_proof(svk, &protocol, &instances, &mut transcript)
.map_err(|_| plonk::Error::Synthesis)?;
if split_proofs {
let previous_proof = proofs.last();
let split_commit = match snark.clone().split {
Some(split) => split,
None => {
log::error!("Failed to split KZG commit for sequential proofs");
return Err(plonk::Error::Synthesis);
}
};
if let Some(previous_proof) = previous_proof {
let output = |
&previous_proof.witnesses[split_commit.start..split_commit.end];
let split_commit_len = split_commit.end - split_commit.start;
let input = &proof.witnesses[..split_commit_len];
for (output, input) in output.iter().zip(input.iter()) {
loader
.ec_point_assert_eq("assert commits match", output, input)
.map_err(|e| {
log::error!(
"Failed to match KZG commits for sequential proofs: {:?}",
e
);
plonk::Error::Synthesis
})?;
}
}
proofs.push(proof.clone());
}
let mut accum = PlonkSuccinctVerifier::verify(svk, &protocol, &instances, &proof)
.map_err(|_| plonk::Error::Synthesis)?;
accumulators.append(&mut accum);
}
let accumulator = {
let mut transcript = PoseidonTranscript::<Rc<Halo2Loader>, _>::new(loader, as_proof);
let proof = As::read_proof(&Default::default(), &accumulators, &mut transcript).unwrap();
As::verify(&Default::default(), &accumulators, &proof).map_err(|_| plonk::Error::Synthesis)
}?;
Ok((accumulator, snark_instances))
}
pub |
struct AggregationConfig {
main_gate_config: MainGateConfig,
range_config: RangeConfig,
}
impl AggregationConfig {
pub fn configure<F: PrimeField>(
meta: &mut ConstraintSystem<F>,
composition_bits: Vec<usize>,
overflow_bits: Vec<usize>,
) -> Self {
let main_gate_config = MainGate::<F>::configure(meta);
let range_config =
RangeChip::<F>::configure(meta, &main_gate_config, composition_bits, overflow_bits);
{
let circuit_size = CircuitSize::from_cs(meta, 23);
debug!(
"circuit size: \n {}",
circuit_size
.as_json()
.unwrap()
.to_colored_json_auto()
.unwrap()
);
}
AggregationConfig {
main_gate_config,
range_config,
}
}
pub fn main_gate(&self) -> MainGate<Fr> {
MainGate::new(self.main_gate_config.clone())
}
pub fn range_chip(&self) -> RangeChip<Fr> {
RangeChip::new(self.range_config.clone())
}
pub fn ecc_chip(&self) -> BaseFieldEccChip {
BaseFieldEccChip::new(EccConfig::new(
self.range_config.clone(),
self.main_gate_config.clone(),
))
}
}
pub |
struct AggregationCircuit {
svk: Svk,
snarks: Vec<SnarkWitness<Fr, G1Affine>>,
instances: Vec<Fr>,
as_proof: Value<Vec<u8>>,
split_proof: bool,
}
impl AggregationCircuit {
pub fn new(
svk: &KzgSuccinctVerifyingKey<G1Affine>,
snarks: impl IntoIterator<Item = Snark<Fr, G1Affine>>,
split_proof: bool,
) -> Result<Self, AggregationError> {
let snarks = snarks.into_iter().collect_vec();
let mut accumulators = vec![];
for snark in snarks.iter() {
trace!("Aggregating with snark instances {:?}", snark.instances);
let mut transcript = PoseidonTranscript::<NativeLoader, _>::new(snark.proof.as_slice());
let proof = PlonkSuccinctVerifier::read_proof(
svk,
snark.protocol.as_ref().unwrap(),
&snark.instances,
&mut transcript,
)
.map_err(|e| {
log::error!("{:?}", e);
AggregationError::ProofRead
})?;
let mut accum = PlonkSuccinctVerifier::verify(
svk,
snark.protocol.as_ref().unwrap(),
&snark.instances,
&proof,
)
.map_err(|_| AggregationError::ProofVerify)?;
accumulators.append(&mut accum);
}
trace!("Accumulator");
let (accumulator, as_proof) = {
let mut transcript = PoseidonTranscript::<NativeLoader, _>::new(Vec::new());
let accumulator =
As::create_proof(&Default::default(), &accumulators, &mut transcript, OsRng)
.map_err(|_| AggregationError::ProofCreate)?;
(accumulator, transcript.finalize())
};
trace!("KzgAccumulator");
let KzgAccumulator { lhs, rhs } = accumulator;
let instances = [lhs.x, lhs.y, rhs.x, rhs.y]
.map(fe_to_limbs::<_, _, LIMBS, BITS>)
.concat();
Ok(Self {
svk: *svk,
snarks |
: snarks.into_iter().map_into().collect(),
instances,
as_proof: Value::known(as_proof),
split_proof,
})
}
pub fn num_limbs() -> usize {
LIMBS
}
pub fn num_bits() -> usize {
BITS
}
pub fn accumulator_indices() -> Vec<(usize, usize)> {
(0..4 * LIMBS).map(|idx| (0, idx)).collect()
}
pub fn num_instance(orginal_circuit_instances: usize) -> Vec<usize> {
let accumulation_instances = 4 * LIMBS;
vec![accumulation_instances + orginal_circuit_instances]
}
pub fn instances(&self) -> Vec<Fr> {
let mut snark_instances: Vec<Vec<Vec<Value<Fr>>>> = self
.snarks
.iter()
.map(|snark| snark.instances.clone())
.collect_vec();
let mut instances: Vec<Fr> = self.instances.clone();
for snark_instance in snark_instances.iter_mut() {
for instance in snark_instance.iter_mut() {
let mut felt_evals = vec![];
for value in instance.iter_mut() {
value.map(|v| felt_evals.push(v));
}
instances.extend(felt_evals);
}
}
instances
}
fn as_proof(&self) -> Value<&[u8]> {
self.as_proof.as_ref().map(Vec::as_slice)
}
}
impl Circuit<Fr> for AggregationCircuit {
type Config = AggregationConfig;
type FloorPlanner = SimpleFloorPlanner;
type Params = ();
fn without_witnesses(&self) -> Self {
Self {
svk: self.svk,
snarks: self
.snarks
.iter()
.map(SnarkWitness::without_witnesses)
.collect(),
instances: Vec::new(),
as_proof: Value::unknown(),
split_proof: self.split_proof,
}
}
fn configure(meta: &mut ConstraintSystem<Fr>) -> Self::Config {
AggregationConfig::configure(
meta,
vec![BITS |
/ LIMBS],
Rns::<Fq, Fr, LIMBS, BITS>::construct().overflow_lengths(),
)
}
fn synthesize(
&self,
config: Self::Config,
mut layouter: impl Layouter<Fr>,
) -> Result<(), plonk::Error> {
let main_gate = config.main_gate();
let range_chip = config.range_chip();
range_chip.load_table(&mut layouter)?;
let (accumulator_limbs, snark_instances) = layouter.assign_region(
|| "",
|region| {
let ctx = RegionCtx::new(region, 0);
let ecc_chip = config.ecc_chip();
let loader = Halo2Loader::new(ecc_chip, ctx);
let (accumulator, snark_instances) = aggregate(
&self.svk,
&loader,
&self.snarks,
self.as_proof(),
self.split_proof,
)?;
let accumulator_limbs = [accumulator.lhs, accumulator.rhs]
.iter()
.map(|ec_point| {
loader
.ecc_chip()
.assign_ec_point_to_limbs(&mut loader.ctx_mut(), ec_point.assigned())
})
.collect::<Result<Vec<_>, plonk::Error>>()?
.into_iter()
.flatten();
Ok((accumulator_limbs, snark_instances))
},
)?;
let mut instance_offset = 0;
for limb in accumulator_limbs {
main_gate.expose_public(layouter.namespace(|| ""), limb, instance_offset)?;
instance_offset += 1;
}
for instance in snark_instances.into_iter() {
for elem in instance.into_iter() {
main_gate.expose_public(layouter.namespace(|| ""), elem, instance_offset)?;
instance_offset += 1;
}
}
Ok(())
}
} |
use thiserror::Error;
/// Aggregate proof generation for EVM using KZG
pub mod aggregation_kzg;
#[derive(Error, Debug)]
/// Errors related to evm verification
pub enum EvmVerificationError {
/// If the Solidity verifier worked but returned false
#[error("Solidity verifier found the proof invalid")]
InvalidProof,
/// If the Solidity verifier threw and error (e.g. OutOfGas)
#[error("Execution of Solidity code failed")]
SolidityExecution,
/// EVM execution errors
#[error("EVM execution of raw code failed")]
RawExecution,
/// EVM verify errors
#[error("evm verification reverted")]
Reverted,
/// EVM verify errors
#[error("evm deployment failed")]
Deploy,
/// Invalid Visibilit
#[error("Invalid visibility")]
InvalidVisibility,
}
|
pub mod evm;
pub mod srs;
use crate::circuit::CheckMode;
use crate::graph::GraphWitness;
use crate::pfsys::evm::aggregation_kzg::PoseidonTranscript;
use crate::{Commitments, EZKL_BUF_CAPACITY, EZKL_KEY_FORMAT};
use clap::ValueEnum;
use halo2_proofs::circuit::Value;
use halo2_proofs::plonk::{
create_proof, keygen_pk, keygen_vk_custom, verify_proof, Circuit, ProvingKey, VerifyingKey,
};
use halo2_proofs::poly::commitment::{CommitmentScheme, Params, ParamsProver, Prover, Verifier};
use halo2_proofs::poly::ipa::commitment::IPACommitmentScheme;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2_proofs::poly::VerificationStrategy;
use halo2_proofs::transcript::{EncodedChallenge, TranscriptReadBuffer, TranscriptWriterBuffer};
use halo2curves::ff::{FromUniformBytes, PrimeField, WithSmallOrderMulGroup};
use halo2curves::serde::SerdeObject;
use halo2curves::CurveAffine;
use instant::Instant;
use log::{debug, info, trace};
use rand::rngs::OsRng;
use rand::rngs::StdRng;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use snark_verifier::loader::native::NativeLoader;
use snark_verifier::system::halo2::transcript::evm::EvmTranscript;
use snark_verifier::verifier::plonk::PlonkProtocol;
use std::error::Error;
use std::fs::File;
use std::io::{self, BufReader, BufWriter, Cursor, Write};
use std::ops::Deref;
use std::path::PathBuf;
use thiserror::Error as thisError;
use tosubcommand::ToFlags;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
fn serde_format_from_str(s: &str) -> halo2_proofs::SerdeFormat {
match s {
"processed" => halo2_proofs::SerdeFormat::Processed,
"raw-bytes-unchecked" => halo2_proofs::SerdeFormat::RawBytesUnchecked,
"raw-bytes" => halo2_proofs::SerdeFormat::RawBytes,
_ => panic!("invalid serde format"),
}
}
ValueEnum, Copy, Clone, Default, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
)]
pub enum ProofType {
Single,
ForAggr,
}
impl std::fmt::Display for ProofType {
fn fmt(&self, f: &mu |
t std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
ProofType::Single => "single",
ProofType::ForAggr => "for-aggr",
}
)
}
}
impl ToFlags for ProofType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl From<ProofType> for TranscriptType {
fn from(val: ProofType) -> Self {
match val {
ProofType::Single => TranscriptType::EVM,
ProofType::ForAggr => TranscriptType::Poseidon,
}
}
}
impl From<ProofType> for StrategyType {
fn from(val: ProofType) -> Self {
match val {
ProofType::Single => StrategyType::Single,
ProofType::ForAggr => StrategyType::Accum,
}
}
}
impl ToPyObject for ProofType {
fn to_object(&self, py: Python) -> PyObject {
match self {
ProofType::Single => "Single".to_object(py),
ProofType::ForAggr => "ForAggr".to_object(py),
}
}
}
impl<'source> pyo3::FromPyObject<'source> for ProofType {
fn extract(ob: &'source pyo3::PyAny) -> pyo3::PyResult<Self> {
let trystr = <pyo3::types::PyString as pyo3::PyTryFrom>::try_from(ob)?;
let strval = trystr.to_string();
match strval.to_lowercase().as_str() {
"single" => Ok(ProofType::Single),
"for-aggr" => Ok(ProofType::ForAggr),
_ => Err(pyo3::exceptions::PyValueError::new_err(
"Invalid value for ProofType",
)),
}
}
}
pub enum StrategyType {
Single,
Accum,
}
impl std::fmt::Display for StrategyType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.to_possible_value()
.expect("no values are skipped")
.get_name()
.fmt(f)
}
}
impl pyo3::IntoPy<PyObject> for StrategyType {
fn into_py(self, py: Python) -> PyObject {
match self {
StrategyType::Single => |
"single".to_object(py),
StrategyType::Accum => "accum".to_object(py),
}
}
}
impl<'source> pyo3::FromPyObject<'source> for StrategyType {
fn extract(ob: &'source pyo3::PyAny) -> pyo3::PyResult<Self> {
let trystr = <pyo3::types::PyString as pyo3::PyTryFrom>::try_from(ob)?;
let strval = trystr.to_string();
match strval.to_lowercase().as_str() {
"single" => Ok(StrategyType::Single),
"accum" => Ok(StrategyType::Accum),
_ => Err(pyo3::exceptions::PyValueError::new_err(
"Invalid value for StrategyType",
)),
}
}
}
pub enum PfSysError {
PackingExponent,
}
ValueEnum, Default, Copy, Clone, Debug, PartialEq, Eq, Deserialize, Serialize, PartialOrd,
)]
pub enum TranscriptType {
Poseidon,
EVM,
}
impl std::fmt::Display for TranscriptType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
match self {
TranscriptType::Poseidon => "poseidon",
TranscriptType::EVM => "evm",
}
)
}
}
impl ToFlags for TranscriptType {
fn to_flags(&self) -> Vec<String> {
vec![format!("{}", self)]
}
}
impl ToPyObject for TranscriptType {
fn to_object(&self, py: Python) -> PyObject {
match self {
TranscriptType::Poseidon => "Poseidon".to_object(py),
TranscriptType::EVM => "EVM".to_object(py),
}
}
}
pub |
fn g1affine_to_pydict(g1affine_dict: &PyDict, g1affine: &G1Affine) {
let g1affine_x = field_to_string(&g1affine.x);
let g1affine_y = field_to_string(&g1affine.y);
g1affine_dict.set_item("x", g1affine_x).unwrap();
g1affine_dict.set_item("y", g1affine_y).unwrap();
}
use halo2curves::bn256::G1;
pub |
fn g1_to_pydict(g1_dict: &PyDict, g1: &G1) {
let g1_x = field_to_string(&g1.x);
let g1_y = field_to_string(&g1.y);
let g1_z = field_to_string(&g1.z);
g1_dict.set_item("x", g1_x).unwrap();
g1_dict.set_item("y", g1_y).unwrap();
g1_dict.set_item("z", g1_z).unwrap();
}
pub fn field_to_string<F: PrimeField + SerdeObject + Serialize>(fp: &F) -> String {
let repr = serde_json::to_string(&fp).unwrap();
let b: String = serde_json::from_str(&repr).unwrap();
b
}
pub fn string_to_field<F: PrimeField + SerdeObject + Serialize + DeserializeOwned>(
b: &String,
) -> F {
let repr = serde_json::to_string(&b).unwrap();
let fp: F = serde_json::from_str(&repr).unwrap();
fp
}
pub |
struct PrettyElements {
pub rescaled_inputs: Vec<Vec<String>>,
pub inputs: Vec<Vec<String>>,
pub processed_inputs: Vec<Vec<String>>,
pub processed_params: Vec<Vec<String>>,
pub processed_outputs: Vec<Vec<String>>,
pub rescaled_outputs: Vec<Vec<String>>,
pub outputs: Vec<Vec<String>>,
}
pub struct Snark<F: PrimeField + SerdeObject, C: CurveAffine>
where
C::Scalar: Serialize + DeserializeOwned,
C::ScalarExt: Serialize + DeserializeOwned,
{
pub protocol: Option<PlonkProtocol<C>>,
pub instances: Vec<Vec<F>>,
pub proof: Vec<u8>,
pub hex_proof: Option<String>,
pub transcript_type: TranscriptType,
pub split: Option<ProofSplitCommit>,
pub pretty_public_inputs: Option<PrettyElements>,
pub timestamp: Option<u128>,
pub commitment: Option<Commitments>,
}
use pyo3::{types::PyDict, PyObject, Python, ToPyObject};
impl<F: PrimeField + SerdeObject + Serialize, C: CurveAffine + Serialize> ToPyObject for Snark<F, C>
where
C::Scalar: Serialize + DeserializeOwned,
C::ScalarExt: Serialize + DeserializeOwned,
{
fn to_object(&self, py: Python) -> PyObject {
let dict = PyDict::new(py);
let field_elems: Vec<Vec<String>> = self
.instances
.iter()
.map(|x| x.iter().map(|fp| field_to_string(fp)).collect())
.collect::<Vec<_>>();
dict.set_item("instances", field_elems).unwrap();
let hex_proof = hex::encode(&self.proof);
dict.set_item("proof", format!("0x{}", hex_proof)).unwrap();
dict.set_item("transcript_type", self.transcript_type)
.unwrap();
dict.to_object(py)
}
}
impl<
F: PrimeField + SerdeObject + Serialize + FromUniformBytes<64> + DeserializeOwned,
C: CurveAffine + Serialize + DeserializeOwned,
> Snark<F, C>
where
C::Scalar: Serialize + DeserializeOwned,
C::ScalarExt: Serialize + DeserializeOwned,
{
pub fn new( |
protocol: Option<PlonkProtocol<C>>,
instances: Vec<Vec<F>>,
proof: Vec<u8>,
hex_proof: Option<String>,
transcript_type: TranscriptType,
split: Option<ProofSplitCommit>,
pretty_public_inputs: Option<PrettyElements>,
commitment: Option<Commitments>,
) -> Self {
Self {
protocol,
instances,
proof,
hex_proof,
transcript_type,
split,
pretty_public_inputs,
timestamp: Some(
instant::SystemTime::now()
.duration_since(instant::SystemTime::UNIX_EPOCH)
.unwrap()
.as_millis(),
),
commitment,
}
}
pub |
fn create_hex_proof(&mut self) {
let hex_proof = hex::encode(&self.proof);
self.hex_proof = Some(format!("0x{}", hex_proof));
}
pub fn save(&self, proof_path: &PathBuf) -> Result<(), Box<dyn Error>> {
let file = std::fs::File::create(proof_path)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, file);
serde_json::to_writer(&mut writer, &self)?;
Ok(())
}
pub fn load<Scheme: CommitmentScheme<Curve = C, Scalar = F>>(
proof_path: &PathBuf,
) -> Result<Self, Box<dyn Error>>
where
<C as CurveAffine>::ScalarExt: FromUniformBytes<64>,
{
trace!("reading proof");
let file = std::fs::File::open(proof_path)?;
let reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, file);
let proof: Self = serde_json::from_reader(reader)?;
Ok(proof)
}
}
pub |
struct ProofSplitCommit {
start: usize,
end: usize,
}
impl From<GraphWitness> for Option<ProofSplitCommit> {
fn from(witness: GraphWitness) -> Self {
let mut elem_offset = 0;
if let Some(input) = witness.processed_inputs {
if let Some(polycommit) = input.polycommit {
let num_elements = polycommit
.iter()
.map(|polycommit| polycommit.len())
.sum::<usize>();
elem_offset += num_elements;
}
}
if let Some(params) = witness.processed_params {
if let Some(polycommit) = params.polycommit {
let num_elements = polycommit
.iter()
.map(|polycommit| polycommit.len())
.sum::<usize>();
elem_offset += num_elements;
}
}
if let Some(output) = witness.processed_outputs {
if let Some(polycommit) = output.polycommit {
let num_elements = polycommit
.iter()
.map(|polycommit| polycommit.len())
.sum::<usize>();
Some(ProofSplitCommit {
start: elem_offset,
end: elem_offset + num_elements,
})
} else {
None
}
} else {
None
}
}
}
pub struct SnarkWitness<F: PrimeField, C: CurveAffine> {
protocol: Option<PlonkProtocol<C>>,
instances: Vec<Vec<Value<F>>>,
proof: Value<Vec<u8>>,
split: Option<ProofSplitCommit>,
}
impl<F: PrimeField, C: CurveAffine> SnarkWitness<F, C> {
fn without_witnesses(&self) -> Self {
SnarkWitness {
protocol: self.protocol.clone(),
instances: self
.instances
.iter()
.map(|instances| vec![Value::unknown(); instances.len()])
.col |
lect(),
proof: Value::unknown(),
split: self.split.clone(),
}
}
fn proof(&self) -> Value<&[u8]> {
self.proof.as_ref().map(Vec::as_slice)
}
}
impl<F: PrimeField + SerdeObject, C: CurveAffine> From<Snark<F, C>> for SnarkWitness<F, C>
where
C::Scalar: Serialize + DeserializeOwned,
C::ScalarExt: Serialize + DeserializeOwned,
{
fn from(snark: Snark<F, C>) -> Self {
Self {
protocol: snark.protocol,
instances: snark
.instances
.into_iter()
.map(|instances| instances.into_iter().map(Value::known).collect())
.collect(),
proof: Value::known(snark.proof),
split: snark.split,
}
}
}
pub fn create_keys<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
circuit: &C,
params: &'_ Scheme::ParamsProver,
disable_selector_compression: bool,
) -> Result<ProvingKey<Scheme::Curve>, halo2_proofs::plonk::Error>
where
C: Circuit<Scheme::Scalar>,
<Scheme as CommitmentScheme>::Scalar: FromUniformBytes<64>,
{
let empty_circuit = <C as Circuit<Scheme::Scalar>>::without_witnesses(circuit);
let now = Instant::now();
trace!("preparing VK");
let vk = keygen_vk_custom(params, &empty_circuit, !disable_selector_compression)?;
let elapsed = now.elapsed();
info!("VK took {}.{}", elapsed.as_secs(), elapsed.subsec_millis());
let now = Instant::now();
let pk = keygen_pk(params, vk, &empty_circuit)?;
let elapsed = now.elapsed();
info!("PK took {}.{}", elapsed.as_secs(), elapsed.subsec_millis());
Ok(pk)
}
pub fn create_proof_circuit<
'params,
Scheme: CommitmentScheme,
C: Circuit<Scheme::Scalar>,
P: Prover<'params, Scheme>,
V: Verifier<'params, Scheme>,
Strategy: VerificationStrategy<'params, Scheme, V>,
E: EncodedChallenge<Scheme::Curve>,
TW: TranscriptWriterBuffer<Vec<u8>, Scheme::Curve, E>,
TR: TranscriptReadBuffer<Cursor<Vec<u8>>, Schem |
e::Curve, E>,
>(
circuit: C,
instances: Vec<Vec<Scheme::Scalar>>,
params: &'params Scheme::ParamsProver,
pk: &ProvingKey<Scheme::Curve>,
check_mode: CheckMode,
commitment: Commitments,
transcript_type: TranscriptType,
split: Option<ProofSplitCommit>,
protocol: Option<PlonkProtocol<Scheme::Curve>>,
) -> Result<Snark<Scheme::Scalar, Scheme::Curve>, Box<dyn Error>>
where
Scheme::ParamsVerifier: 'params,
Scheme::Scalar: Serialize
+ DeserializeOwned
+ SerdeObject
+ PrimeField
+ FromUniformBytes<64>
+ WithSmallOrderMulGroup<3>,
Scheme::Curve: Serialize + DeserializeOwned,
{
let strategy = Strategy::new(params.verifier_params());
let mut transcript = TranscriptWriterBuffer::<_, Scheme::Curve, _>::init(vec![]);
let mut rng = <StdRng as rand::SeedableRng>::from_seed([0u8; 32]);
let mut rng = OsRng;
let pi_inner = instances
.iter()
.map(|e| e.deref())
.collect::<Vec<&[Scheme::Scalar]>>();
let pi_inner: &[&[&[Scheme::Scalar]]] = &[&pi_inner];
trace!("instances {:?}", instances);
trace!(
"pk num instance column: {:?}",
pk.get_vk().cs().num_instance_columns()
);
info!("proof started...");
let now = Instant::now();
create_proof::<Scheme, P, _, _, TW, _>(
params,
pk,
&[circuit],
pi_inner,
&mut rng,
&mut transcript,
)?;
let proof = transcript.finalize();
let hex_proof = format!("0x{}", hex::encode(&proof));
let checkable_pf = Snark::new(
protocol,
instances,
proof,
Some(hex_proof),
transcript_type,
split,
None,
Some(commitment),
);
if check_mode == CheckMode::SAFE {
debug!("verifying generated proof");
let verifier_params = params.verifier_params();
verify_proof_circuit::<V, Scheme, Strategy, E, TR>(
&checkable_pf,
verifier_params,
pk.get_ |
vk(),
strategy,
verifier_params.n(),
)?;
}
let elapsed = now.elapsed();
info!(
"proof took {}.{}",
elapsed.as_secs(),
elapsed.subsec_millis()
);
Ok(checkable_pf)
}
pub fn swap_proof_commitments<
Scheme: CommitmentScheme,
E: EncodedChallenge<Scheme::Curve>,
TW: TranscriptWriterBuffer<Vec<u8>, Scheme::Curve, E>,
>(
snark: &Snark<Scheme::Scalar, Scheme::Curve>,
commitments: &[Scheme::Curve],
) -> Result<Snark<Scheme::Scalar, Scheme::Curve>, Box<dyn Error>>
where
Scheme::Scalar: SerdeObject
+ PrimeField
+ FromUniformBytes<64>
+ WithSmallOrderMulGroup<3>
+ Ord
+ Serialize
+ DeserializeOwned,
Scheme::Curve: Serialize + DeserializeOwned,
{
let mut transcript_new: TW = TranscriptWriterBuffer::<_, Scheme::Curve, _>::init(vec![]);
for commit in commitments {
transcript_new
.write_point(*commit)
.map_err(|_| "failed to write point")?;
}
let proof_first_bytes = transcript_new.finalize();
let mut snark_new = snark.clone();
snark_new.proof[..proof_first_bytes.len()].copy_from_slice(&proof_first_bytes);
snark_new.create_hex_proof();
Ok(snark_new)
}
pub fn swap_proof_commitments_polycommit(
snark: &Snark<Fr, G1Affine>,
commitments: &[G1Affine],
) -> Result<Snark<Fr, G1Affine>, Box<dyn Error>> {
let proof = match snark.commitment {
Some(Commitments::KZG) => match snark.transcript_type {
TranscriptType::EVM => swap_proof_commitments::<
KZGCommitmentScheme<Bn256>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(snark, commitments)?,
TranscriptType::Poseidon => swap_proof_commitments::<
KZGCommitmentScheme<Bn256>,
_,
PoseidonTranscript<NativeLoader, _>,
>(snark, commitments)?,
},
Some(Commitments::IPA) => match snark.transcript_type { |
TranscriptType::EVM => swap_proof_commitments::<
IPACommitmentScheme<G1Affine>,
_,
EvmTranscript<G1Affine, _, _, _>,
>(snark, commitments)?,
TranscriptType::Poseidon => swap_proof_commitments::<
IPACommitmentScheme<G1Affine>,
_,
PoseidonTranscript<NativeLoader, _>,
>(snark, commitments)?,
},
None => {
return Err("commitment scheme not found".into());
}
};
Ok(proof)
}
pub fn verify_proof_circuit<
'params,
V: Verifier<'params, Scheme>,
Scheme: CommitmentScheme,
Strategy: VerificationStrategy<'params, Scheme, V>,
E: EncodedChallenge<Scheme::Curve>,
TR: TranscriptReadBuffer<Cursor<Vec<u8>>, Scheme::Curve, E>,
>(
snark: &Snark<Scheme::Scalar, Scheme::Curve>,
params: &'params Scheme::ParamsVerifier,
vk: &VerifyingKey<Scheme::Curve>,
strategy: Strategy,
orig_n: u64,
) -> Result<Strategy::Output, halo2_proofs::plonk::Error>
where
Scheme::Scalar: SerdeObject
+ PrimeField
+ FromUniformBytes<64>
+ WithSmallOrderMulGroup<3>
+ Serialize
+ DeserializeOwned,
Scheme::Curve: Serialize + DeserializeOwned,
{
let pi_inner = snark
.instances
.iter()
.map(|e| e.deref())
.collect::<Vec<&[Scheme::Scalar]>>();
let instances: &[&[&[Scheme::Scalar]]] = &[&pi_inner];
trace!("instances {:?}", instances);
let mut transcript = TranscriptReadBuffer::init(Cursor::new(snark.proof.clone()));
verify_proof::<Scheme, V, _, TR, _>(params, vk, strategy, instances, &mut transcript, orig_n)
}
pub fn load_vk<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
path: PathBuf,
params: <C as Circuit<Scheme::Scalar>>::Params,
) -> Result<VerifyingKey<Scheme::Curve>, Box<dyn Error>>
where
C: Circuit<Scheme::Scalar>,
Scheme::Curve: SerdeObject + CurveAffine,
Scheme::Scalar: PrimeField + SerdeObject + FromUn |
iformBytes<64>,
{
info!("loading verification key from {:?}", path);
let f =
File::open(path.clone()).map_err(|_| format!("failed to load vk at {}", path.display()))?;
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
let vk = VerifyingKey::<Scheme::Curve>::read::<_, C>(
&mut reader,
serde_format_from_str(&EZKL_KEY_FORMAT),
params,
)?;
info!("done loading verification key โ
");
Ok(vk)
}
pub fn load_pk<Scheme: CommitmentScheme, C: Circuit<Scheme::Scalar>>(
path: PathBuf,
params: <C as Circuit<Scheme::Scalar>>::Params,
) -> Result<ProvingKey<Scheme::Curve>, Box<dyn Error>>
where
C: Circuit<Scheme::Scalar>,
Scheme::Curve: SerdeObject + CurveAffine,
Scheme::Scalar: PrimeField + SerdeObject + FromUniformBytes<64>,
{
info!("loading proving key from {:?}", path);
let f =
File::open(path.clone()).map_err(|_| format!("failed to load pk at {}", path.display()))?;
let mut reader = BufReader::with_capacity(*EZKL_BUF_CAPACITY, f);
let pk = ProvingKey::<Scheme::Curve>::read::<_, C>(
&mut reader,
serde_format_from_str(&EZKL_KEY_FORMAT),
params,
)?;
info!("done loading proving key โ
");
Ok(pk)
}
pub fn save_pk<C: SerdeObject + CurveAffine>(
path: &PathBuf,
pk: &ProvingKey<C>,
) -> Result<(), io::Error>
where
C::ScalarExt: FromUniformBytes<64> + SerdeObject,
{
info!("saving proving key ๐พ");
let f = File::create(path)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
pk.write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
writer.flush()?;
info!("done saving proving key โ
");
Ok(())
}
pub fn save_vk<C: CurveAffine + SerdeObject>(
path: &PathBuf,
vk: &VerifyingKey<C>,
) -> Result<(), io::Error>
where
C::ScalarExt: FromUniformBytes<64> + SerdeObject,
{
info!("saving verification key ๐พ");
let f = File::create(path)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
vk. |
write(&mut writer, serde_format_from_str(&EZKL_KEY_FORMAT))?;
writer.flush()?;
info!("done saving verification key โ
");
Ok(())
}
pub fn save_params<Scheme: CommitmentScheme>(
path: &PathBuf,
params: &'_ Scheme::ParamsVerifier,
) -> Result<(), io::Error> {
info!("saving parameters ๐พ");
let f = File::create(path)?;
let mut writer = BufWriter::with_capacity(*EZKL_BUF_CAPACITY, f);
params.write(&mut writer)?;
writer.flush()?;
Ok(())
}
mod tests {
use super::*;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2curves::bn256::{Bn256, Fr, G1Affine};
use tempfile::Builder;
async |
fn test_can_load_saved_srs() {
let tmp_dir = Builder::new().prefix("example").tempdir().unwrap();
let fname = tmp_dir.path().join("polycommit.params");
let srs = srs::gen_srs::<KZGCommitmentScheme<Bn256>>(1);
let res = save_params::<KZGCommitmentScheme<Bn256>>(&fname, &srs);
assert!(res.is_ok());
let res = srs::load_srs_prover::<KZGCommitmentScheme<Bn256>>(fname);
assert!(res.is_ok())
} |
fn test_snark_serialization_roundtrip() {
let snark = Snark::<Fr, G1Affine> {
proof: vec![1, 2, 3, 4, 5, 6, 7, 8],
instances: vec![vec![Fr::from(1)], vec![Fr::from(2)]],
transcript_type: TranscriptType::EVM,
protocol: None,
hex_proof: None,
split: None,
pretty_public_inputs: None,
timestamp: None,
commitment: None,
};
snark
.save(&"test_snark_serialization_roundtrip.json".into())
.unwrap();
let snark2 = Snark::<Fr, G1Affine>::load::<KZGCommitmentScheme<Bn256>>(
&"test_snark_serialization_roundtrip.json".into(),
)
.unwrap();
assert_eq!(snark.instances, snark2.instances);
assert_eq!(snark.proof, snark2.proof);
assert_eq!(snark.transcript_type, snark2.transcript_type);
}
} |
use halo2_proofs::poly::commitment::CommitmentScheme;
use halo2_proofs::poly::commitment::Params;
use halo2_proofs::poly::commitment::ParamsProver;
use log::info;
use std::error::Error;
use std::fs::File;
use std::io::BufReader;
use std::path::PathBuf;
/// for now we use the urls of the powers of tau ceremony from <https://github.com/han0110/halo2-kzg-srs>
pub const PUBLIC_SRS_URL: &str =
"https://trusted-setup-halo2kzg.s3.eu-central-1.amazonaws.com/perpetual-powers-of-tau-raw-";
/// Helper function for generating SRS. Only use for testing
pub fn gen_srs<Scheme: CommitmentScheme>(k: u32) -> Scheme::ParamsProver {
Scheme::ParamsProver::new(k)
}
/// Loads the [CommitmentScheme::ParamsVerifier] at `path`.
pub fn load_srs_verifier<Scheme: CommitmentScheme>(
path: PathBuf,
) -> Result<Scheme::ParamsVerifier, Box<dyn Error>> {
info!("loading srs from {:?}", path);
let f = File::open(path.clone())
.map_err(|_| format!("failed to load srs at {}", path.display()))?;
let mut reader = BufReader::new(f);
Params::<'_, Scheme::Curve>::read(&mut reader).map_err(Box::<dyn Error>::from)
}
/// Loads the [CommitmentScheme::ParamsVerifier] at `path`.
pub fn load_srs_prover<Scheme: CommitmentScheme>(
path: PathBuf,
) -> Result<Scheme::ParamsProver, Box<dyn Error>> {
info!("loading srs from {:?}", path);
let f = File::open(path.clone())
.map_err(|_| format!("failed to load srs at {}", path.display()))?;
let mut reader = BufReader::new(f);
Params::<'_, Scheme::Curve>::read(&mut reader).map_err(Box::<dyn Error>::from)
}
|
use crate::circuit::modules::polycommit::PolyCommitChip;
use crate::circuit::modules::poseidon::{
spec::{PoseidonSpec, POSEIDON_RATE, POSEIDON_WIDTH},
PoseidonChip,
};
use crate::circuit::modules::Module;
use crate::circuit::{CheckMode, Tolerance};
use crate::commands::*;
use crate::fieldutils::{felt_to_i128, i128_to_felt};
use crate::graph::modules::POSEIDON_LEN_GRAPH;
use crate::graph::TestDataSource;
use crate::graph::{
quantize_float, scale_to_multiplier, GraphCircuit, GraphSettings, Model, Visibility,
};
use crate::pfsys::evm::aggregation_kzg::AggregationCircuit;
use crate::pfsys::{
load_pk, load_vk, save_params, save_vk, srs::gen_srs as ezkl_gen_srs, srs::load_srs_prover,
ProofType, TranscriptType,
};
use crate::Commitments;
use crate::RunArgs;
use halo2_proofs::poly::ipa::commitment::IPACommitmentScheme;
use halo2_proofs::poly::kzg::commitment::KZGCommitmentScheme;
use halo2curves::bn256::{Bn256, Fq, Fr, G1Affine, G1};
use pyo3::exceptions::{PyIOError, PyRuntimeError};
use pyo3::prelude::*;
use pyo3::wrap_pyfunction;
use pyo3_log;
use snark_verifier::util::arithmetic::PrimeField;
use std::str::FromStr;
use std::{fs::File, path::PathBuf};
use tokio::runtime::Runtime;
type PyFelt = String;
enum PyTestDataSource {
File,
OnChain,
}
impl From<PyTestDataSource> for TestDataSource {
fn from(py_test_data_source: PyTestDataSource) -> Self {
match py_test_data_source {
PyTestDataSource::File => TestDataSource::File,
PyTestDataSource::OnChain => TestDataSource::OnChain,
}
}
} |
struct PyG1 {
x: PyFelt,
y: PyFelt,
z: PyFelt,
}
impl From<G1> for PyG1 {
fn from(g1: G1) -> Self {
PyG1 {
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
z: crate::pfsys::field_to_string::<Fq>(&g1.z),
}
}
}
impl From<PyG1> for G1 {
fn from(val: PyG1) -> Self {
G1 {
x: crate::pfsys::string_to_field::<Fq>(&val.x),
y: crate::pfsys::string_to_field::<Fq>(&val.y),
z: crate::pfsys::string_to_field::<Fq>(&val.z),
}
}
}
impl pyo3::ToPyObject for PyG1 {
fn to_object(&self, py: pyo3::Python) -> pyo3::PyObject {
let g1_dict = pyo3::types::PyDict::new(py);
g1_dict.set_item("x", self.x.to_object(py)).unwrap();
g1_dict.set_item("y", self.y.to_object(py)).unwrap();
g1_dict.set_item("z", self.z.to_object(py)).unwrap();
g1_dict.into()
}
}
pub |
struct PyG1Affine {
pub x: PyFelt,
pub y: PyFelt,
}
impl From<G1Affine> for PyG1Affine {
fn from(g1: G1Affine) -> Self {
PyG1Affine {
x: crate::pfsys::field_to_string::<Fq>(&g1.x),
y: crate::pfsys::field_to_string::<Fq>(&g1.y),
}
}
}
impl From<PyG1Affine> for G1Affine {
fn from(val: PyG1Affine) -> Self {
G1Affine {
x: crate::pfsys::string_to_field::<Fq>(&val.x),
y: crate::pfsys::string_to_field::<Fq>(&val.y),
}
}
}
impl pyo3::ToPyObject for PyG1Affine {
fn to_object(&self, py: pyo3::Python) -> pyo3::PyObject {
let g1_dict = pyo3::types::PyDict::new(py);
g1_dict.set_item("x", self.x.to_object(py)).unwrap();
g1_dict.set_item("y", self.y.to_object(py)).unwrap();
g1_dict.into()
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.