text
stringlengths
1
2.05k
} _ => unreachable!(), }); } let mut table_queries = vec![one.clone()]; for table in tables { table_queries.push(match table { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[0][0], Rotation(0)) } _ => unreachable!(), }); } let lhs = lookup_queries.into_iter().map(|c| c * s_lookupq.clone()); let rhs = table_queries.into_iter().map(|c| c * s_ltableq.clone()); expression.extend(lhs.zip(rhs)); expression }); self.dynamic_lookups .lookup_selectors .entry((x, y)) .or_insert(s_lookup); } } self.dynamic_lookups.table_selectors.push(s_ltable); if self.dynamic_lookups.tables.is_empty() { debug!("assigning dynamic lookup table"); self.dynamic_lookups.tables = tables.to_vec(); } if self.dynamic_lookups.inputs.is_empty() { debug!("assigning dynamic lookup input"); self.dynamic_lookups.inputs = lookups.to_vec(); } Ok(()) } pub fn configure_shuffles( &mut self, cs: &mut ConstraintSystem<F>, inputs: &[VarTensor; 2], references: &[VarTensor; 2], ) -> Result<(), Box<dyn Error>> where F: Field, { for l in inputs.iter() { if !l.is_advice() { return Err("wrong input type for dynamic lookup".into()); } } for t in references.iter() { if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 { return Err("wrong table type for dynamic lookup".into());
} } let one = Expression::Constant(F::ONE); let s_reference = cs.complex_selector(); for x in 0..inputs[0].num_blocks() { for y in 0..inputs[0].num_inner_cols() { let s_input = cs.complex_selector(); cs.lookup_any("lookup", |cs| { let s_inputq = cs.query_selector(s_input); let mut expression = vec![]; let s_referenceq = cs.query_selector(s_reference); let mut input_queries = vec![one.clone()]; for input in inputs { input_queries.push(match input { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }); } let mut ref_queries = vec![one.clone()]; for reference in references { ref_queries.push(match reference { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[0][0], Rotation(0)) } _ => unreachable!(), }); } let lhs = input_queries.into_iter().map(|c| c * s_inputq.clone()); let rhs = ref_queries.into_iter().map(|c| c * s_referenceq.clone()); expression.extend(lhs.zip(rhs)); expression }); self.shuffles .input_selectors .entry((x, y)) .or_insert(s_input); } } self.shuffles.reference_selectors.push(s_reference); if self.shuffles.references.is_empty() { debug!("assigning shuffles reference"); self.shuffles.references
= references.to_vec(); } if self.shuffles.inputs.is_empty() { debug!("assigning shuffles input"); self.shuffles.inputs = inputs.to_vec(); } Ok(()) } pub fn configure_range_check( &mut self, cs: &mut ConstraintSystem<F>, input: &VarTensor, index: &VarTensor, range: Range, logrows: usize, ) -> Result<(), Box<dyn Error>> where F: Field, { if !input.is_advice() { return Err("wrong input type for lookup input".into()); } let range_check = if let std::collections::btree_map::Entry::Vacant(e) = self.range_checks.ranges.entry(range) { let range_check = RangeCheck::<F>::configure(cs, range, logrows); e.insert(range_check.clone()); range_check } else { return Ok(()); }; for x in 0..input.num_blocks() { for y in 0..input.num_inner_cols() { let len = range_check.selector_constructor.degree; let multi_col_selector = cs.complex_selector(); for (col_idx, input_col) in range_check.inputs.iter().enumerate() { cs.lookup("", |cs| { let mut res = vec![]; let sel = cs.query_selector(multi_col_selector); let synthetic_sel = match len { 1 => Expression::Constant(F::from(1)), _ => match index { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }, }; let input_query = match &input { VarTensor::Advice { inner: advices, .. } => {
cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }; let default_x = range_check.get_first_element(col_idx); let col_expr = sel.clone() * range_check .selector_constructor .get_expr_at_idx(col_idx, synthetic_sel); let multiplier = range_check .selector_constructor .get_selector_val_at_idx(col_idx); let not_expr = Expression::Constant(multiplier) - col_expr.clone(); res.extend([( col_expr.clone() * input_query.clone() + not_expr.clone() * Expression::Constant(default_x), *input_col, )]); log::trace!("---------------- col {:?} ------------------", col_idx,); log::trace!("expr: {:?}", col_expr,); log::trace!("multiplier: {:?}", multiplier); log::trace!("not_expr: {:?}", not_expr); log::trace!("default x: {:?}", default_x); res }); } self.range_checks .selectors .insert((range, x, y), multi_col_selector); } } if let VarTensor::Empty = self.range_checks.input { debug!("assigning range check input"); self.range_checks.input = input.clone(); } if let VarTensor::Empty = self.range_checks.index { debug!("assigning range check index"); self.range_checks.index = index.clone(); } Ok(()) } pub fn layout_tables(&mut self, layouter: &mut impl Layouter<F>) -> Result<(), Box<dyn
Error>> { for (i, table) in self.static_lookups.tables.values_mut().enumerate() { if !table.is_assigned { debug!( "laying out table for {}", crate::circuit::ops::Op::<F>::as_string(&table.nonlinearity) ); if i == 0 { table.layout(layouter, false)?; } else { table.layout(layouter, true)?; } } } Ok(()) } pub fn layout_range_checks( &mut self, layouter: &mut impl Layouter<F>, ) -> Result<(), Box<dyn Error>> { for range_check in self.range_checks.ranges.values_mut() { if !range_check.is_assigned { debug!("laying out range check for {:?}", range_check.range); range_check.layout(layouter)?; } } Ok(()) } pub fn layout( &mut self, region: &mut RegionCtx<F>, values: &[ValTensor<F>], op: Box<dyn Op<F>>, ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { op.layout(self, region, values) } }
use super::*; use crate::{ circuit::{layouts, utils, Tolerance}, fieldutils::i128_to_felt, graph::multiplier_to_scale, tensor::{self, Tensor, TensorType, ValTensor}, }; use halo2curves::ff::PrimeField; use serde::{Deserialize, Serialize}; pub enum HybridOp { Recip { input_scale: utils::F32, output_scale: utils::F32, use_range_check_for_int: bool, }, Div { denom: utils::F32, use_range_check_for_int: bool, }, ReduceMax { axes: Vec<usize>, }, ReduceArgMax { dim: usize, }, SumPool { padding: Vec<(usize, usize)>, stride: Vec<usize>, kernel_shape: Vec<usize>, normalized: bool, }, MaxPool { padding: Vec<(usize, usize)>, stride: Vec<usize>, pool_dims: Vec<usize>, }, ReduceMin { axes: Vec<usize>, }, ReduceArgMin { dim: usize, }, Softmax { input_scale: utils::F32, output_scale: utils::F32, axes: Vec<usize>, }, RangeCheck(Tolerance), Greater, GreaterEqual, Less, LessEqual, Equals, Gather { dim: usize, constant_idx: Option<Tensor<usize>>, }, TopK { dim: usize, k: usize, largest: bool, }, OneHot { dim: usize, num_classes: usize, }, } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for HybridOp { fn requires_homogenous_input_scales(&self) -> Vec<usize> { match self { HybridOp::Greater | HybridOp::Less | HybridOp::Equals => vec![0, 1], HybridOp::GreaterEqual | HybridOp::LessEqual => vec![0, 1], _ => vec![], } } fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { match self { HybridOp::Recip { input_scale, output_scale, use_range_check_for_int, } => format!( "RE
CIP (input_scale={}, output_scale={}, use_range_check_for_int={})", input_scale, output_scale, use_range_check_for_int ), HybridOp::Div { denom, use_range_check_for_int, } => format!( "DIV (denom={}, use_range_check_for_int={})", denom, use_range_check_for_int ), HybridOp::SumPool { padding, stride, kernel_shape, normalized, } => format!( "SUMPOOL (padding={:?}, stride={:?}, kernel_shape={:?}, normalized={})", padding, stride, kernel_shape, normalized ), HybridOp::ReduceMax { axes } => format!("REDUCEMAX (axes={:?})", axes), HybridOp::ReduceArgMax { dim } => format!("REDUCEARGMAX (dim={})", dim), HybridOp::MaxPool { padding, stride, pool_dims, } => format!( "MaxPool (padding={:?}, stride={:?}, pool_dims={:?})", padding, stride, pool_dims ), HybridOp::ReduceMin { axes } => format!("REDUCEMIN (axes={:?})", axes), HybridOp::ReduceArgMin { dim } => format!("REDUCEARGMIN (dim={})", dim), HybridOp::Softmax { input_scale, output_scale, axes, } => { format!( "SOFTMAX (input_scale={}, output_scale={}, axes={:?})", input_scale, output_scale, axes ) } HybridOp::RangeCheck(p) => format!("RANGECHECK (tol={:?})", p), HybridOp::Greater => "GREATER".into(), HybridOp::GreaterEqual => "GREATEREQUAL".into(), HybridOp::Less => "LESS".into(), HybridOp::LessEqual => "LESSEQUAL".into(), HybridOp::Equals => "EQUALS".into(), HybridOp::Gather { dim, .. } => format!("GATHER (dim={})", dim
), HybridOp::TopK { k, dim, largest } => { format!("TOPK (k={}, dim={}, largest={})", k, dim, largest) } HybridOp::OneHot { dim, num_classes } => { format!("ONEHOT (dim={}, num_classes={})", dim, num_classes) } } } fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn std::error::Error>> { Ok(Some(match self { HybridOp::SumPool { padding, stride, kernel_shape, normalized, } => layouts::sumpool( config, region, values[..].try_into()?, padding, stride, kernel_shape, *normalized, )?, HybridOp::Recip { input_scale, output_scale, use_range_check_for_int, } => { if input_scale.0.fract() == 0.0 && output_scale.0.fract() == 0.0 && *use_range_check_for_int { layouts::recip( config, region, values[..].try_into()?, i128_to_felt(input_scale.0 as i128), i128_to_felt(output_scale.0 as i128), )? } else { layouts::nonlinearity( config, region, values.try_into()?, &LookupOp::Recip { input_scale: *input_scale, output_scale: *output_scale, }, )? } } HybridOp::Div { denom, use_range_check_f
or_int, .. } => { if denom.0.fract() == 0.0 && *use_range_check_for_int { layouts::loop_div( config, region, values[..].try_into()?, i128_to_felt(denom.0 as i128), )? } else { layouts::nonlinearity( config, region, values.try_into()?, &LookupOp::Div { denom: *denom }, )? } } HybridOp::Gather { dim, constant_idx } => { if let Some(idx) = constant_idx { tensor::ops::gather(values[0].get_inner_tensor()?, idx, *dim)?.into() } else { layouts::gather(config, region, values[..].try_into()?, *dim)? } } HybridOp::MaxPool { padding, stride, pool_dims, } => layouts::max_pool( config, region, values[..].try_into()?, padding, stride, pool_dims, )?, HybridOp::ReduceMax { axes } => { layouts::max_axes(config, region, values[..].try_into()?, axes)? } HybridOp::ReduceArgMax { dim } => { layouts::argmax_axes(config, region, values[..].try_into()?, *dim)? } HybridOp::ReduceMin { axes } => { layouts::min_axes(config, region, values[..].try_into()?, axes)? } HybridOp::ReduceArgMin { dim } => { layouts::argmin_axes(config, region, values[..].try_into()?, *dim)? } HybridOp::Softmax { input_scale, output_scale, axes, } => layouts::softmax_axes( config,
region, values[..].try_into()?, *input_scale, *output_scale, axes, )?, HybridOp::RangeCheck(tol) => layouts::range_check_percent( config, region, values[..].try_into()?, tol.scale, tol.val, )?, HybridOp::Greater => layouts::greater(config, region, values[..].try_into()?)?, HybridOp::GreaterEqual => { layouts::greater_equal(config, region, values[..].try_into()?)? } HybridOp::Less => layouts::less(config, region, values[..].try_into()?)?, HybridOp::LessEqual => layouts::less_equal(config, region, values[..].try_into()?)?, HybridOp::Equals => layouts::equals(config, region, values[..].try_into()?)?, HybridOp::TopK { dim, k, largest } => { layouts::topk_axes(config, region, values[..].try_into()?, *k, *dim, *largest)? } HybridOp::OneHot { dim, num_classes } => { layouts::one_hot_axis(config, region, values[..].try_into()?, *num_classes, *dim)? } })) } fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { let scale = match self { HybridOp::Greater { .. } | HybridOp::GreaterEqual { .. } | HybridOp::Less { .. } | HybridOp::LessEqual { .. } | HybridOp::ReduceArgMax { .. } | HybridOp::OneHot { .. } | HybridOp::ReduceArgMin { .. } => 0, HybridOp::Softmax { output_scale, .. } | HybridOp::Recip { output_scale, .. } => { multiplier_to_scale(output_scale.0 as f64) } _ => in_scales[0], }; Ok(scale) } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } }
use std::{ collections::{HashMap, HashSet}, error::Error, ops::Range, }; use halo2_proofs::circuit::Value; use halo2curves::ff::PrimeField; use itertools::Itertools; use log::{error, trace}; use maybe_rayon::{ iter::IntoParallelRefIterator, prelude::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator}, slice::ParallelSliceMut, }; use self::tensor::{create_constant_tensor, create_zero_tensor}; use super::{ chip::{BaseConfig, CircuitError}, region::RegionCtx, }; use crate::{ circuit::{ops::base::BaseOp, utils}, fieldutils::{felt_to_i128, i128_to_felt}, tensor::{ create_unit_tensor, get_broadcasted_shape, ops::{accumulated, add, mult, sub}, Tensor, TensorError, ValType, }, }; use super::*; use crate::circuit::ops::lookup::LookupOp; pub(crate) fn loop_div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, value: &[ValTensor<F>; 1], divisor: F, ) -> Result<ValTensor<F>, Box<dyn Error>> { if divisor == F::ONE { return Ok(value[0].clone()); } let mut divisor = divisor; let mut num_parts = 1; while felt_to_i128(divisor) % 2 == 0 && felt_to_i128(divisor) > (2_i128.pow(F::S - 4)) { divisor = i128_to_felt(felt_to_i128(divisor) / 2); num_parts += 1; } let output = div(config, region, value, divisor)?; if num_parts == 1 { return Ok(output); } let divisor_int = 2_i128.pow(num_parts - 1); let divisor_felt = i128_to_felt(divisor_int); if divisor_int <= 2_i128.pow(F::S - 3) { div(config, region, &[output], divisor_felt) } else { loop_div(config, region, &[output], divisor_felt) } } pub(crate) fn div<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, value: &[ValTensor<F>; 1], div: F, ) -> Result<ValTensor<F>, Box<dyn Error>> { if div == F::ONE { return Ok(va
lue[0].clone()); } let input = value[0].clone(); let input_dims = input.dims(); let range_check_bracket = felt_to_i128(div) / 2; let divisor = create_constant_tensor(div, 1); let divisor = region.assign(&config.custom_gates.inputs[1], &divisor)?; region.increment(divisor.len()); let is_assigned = !input.any_unknowns()? && !divisor.any_unknowns()?; let mut claimed_output: ValTensor<F> = if is_assigned { let input_evals = input.get_int_evals()?; tensor::ops::nonlinearities::const_div(&input_evals.clone(), felt_to_i128(div) as f64) .par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() .into() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); input.len()]), &[input.len()], )? .into() }; claimed_output.reshape(input_dims)?; region.assign(&config.custom_gates.output, &claimed_output)?; region.increment(claimed_output.len()); let product = pairwise( config, region, &[claimed_output.clone(), divisor.clone()], BaseOp::Mult, )?; let diff_with_input = pairwise( config, region, &[product.clone(), input.clone()], BaseOp::Sub, )?; range_check( config, region, &[diff_with_input], &(-range_check_bracket, range_check_bracket), )?; Ok(claimed_output) } pub(crate) fn recip<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, value: &[ValTensor<F>; 1], input_scale: F, output_scale: F, ) -> Result<ValTensor<F>, Box<dyn Error>> { let input = value[0].clone(); let input_dims = input.dims(); let integer_input_scale = felt_to_i128(input_scale); let integer_output_scale = felt_to_i128(output_scale); let range_check_len = std::cmp::min(integer_output_scale, 2_i128.pow(F::S - 4)); let input_scale_ra
tio = if range_check_len > 0 { i128_to_felt(integer_input_scale * integer_output_scale / range_check_len) } else { F::ONE }; let range_check_bracket = range_check_len / 2; let is_assigned = !input.any_unknowns()?; let mut claimed_output: ValTensor<F> = if is_assigned { let input_evals = input.get_int_evals()?; tensor::ops::nonlinearities::recip( &input_evals, felt_to_i128(input_scale) as f64, felt_to_i128(output_scale) as f64, ) .par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() .into() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); input.len()]), &[input.len()], )? .into() }; claimed_output.reshape(input_dims)?; let claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?; region.increment(claimed_output.len()); let product = pairwise( config, region, &[claimed_output.clone(), input.clone()], BaseOp::Mult, )?; let rebased_div = loop_div(config, region, &[product], input_scale_ratio)?; let zero_inverse_val = tensor::ops::nonlinearities::zero_recip(felt_to_i128(output_scale) as f64)[0]; let zero_inverse = create_constant_tensor(i128_to_felt(zero_inverse_val), 1); let equal_zero_mask = equals_zero(config, region, &[input.clone()])?; let equal_inverse_mask = equals(config, region, &[claimed_output.clone(), zero_inverse])?; enforce_equality( config, region, &[equal_zero_mask.clone(), equal_inverse_mask], )?; let unit_scale = create_constant_tensor(i128_to_felt(range_check_len), 1); let unit_mask = pairwise(config, region, &[equal_zero_mask, unit_scale], BaseOp::Mult)?; let rebased_offset_div = pairwise(config, region, &[rebased_div, unit_mask], BaseOp::Add)?; range_check( config, region,
&[rebased_offset_div], &(range_check_bracket, 3 * range_check_bracket), )?; Ok(claimed_output) } pub fn dot<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { region.flush()?; let global_start = instant::Instant::now(); let mut values = values.clone(); let mut removal_indices = values[0].get_const_zero_indices()?; let second_zero_indices = values[1].get_const_zero_indices()?; removal_indices.extend(second_zero_indices); removal_indices.par_sort_unstable(); removal_indices.dedup(); values[0].remove_indices(&mut removal_indices, true)?; values[1].remove_indices(&mut removal_indices, true)?; let elapsed = global_start.elapsed(); trace!("filtering const zero indices took: {:?}", elapsed); if values[0].len() != values[1].len() { return Err(Box::new(TensorError::DimMismatch("dot".to_string()))); } if values[0].is_empty() && values[1].is_empty() { return Ok(create_zero_tensor(1)); } let start = instant::Instant::now(); let mut inputs = vec![]; let block_width = config.custom_gates.output.num_inner_cols(); let mut assigned_len = 0; for (i, input) in values.iter_mut().enumerate() { input.pad_to_zero_rem(block_width, ValType::Constant(F::ZERO))?; let inp = { let (res, len) = region.assign_with_duplication( &config.custom_gates.inputs[i], input, &config.check_mode, false, )?; assigned_len = len; res.get_inner()? }; inputs.push(inp); } let elapsed = start.elapsed(); trace!("assigning inputs took: {:?}", elapsed); let start = instant::Instant::now(); let accumulated_dot = accumulated::dot(&[inputs[0].clone(), inputs[1].clone()], block_width)?;
let elapsed = start.elapsed(); trace!("calculating accumulated dot took: {:?}", elapsed); let start = instant::Instant::now(); let (output, output_assigned_len) = region.assign_with_duplication( &config.custom_gates.output, &accumulated_dot.into(), &config.check_mode, true, )?; let elapsed = start.elapsed(); trace!("assigning output took: {:?}", elapsed); if !region.is_dummy() { (0..output_assigned_len) .map(|i| { let (x, _, z) = config .custom_gates .output .cartesian_coord(region.linear_coord() + i * block_width); if z == 0 && i > 0 { return Ok(()); } let selector = if i == 0 { config.custom_gates.selectors.get(&(BaseOp::DotInit, x, 0)) } else { config.custom_gates.selectors.get(&(BaseOp::Dot, x, 0)) }; region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } let last_elem = output.get_slice(&[output.len() - 1..output.len()])?; region.increment(assigned_len); let elapsed = global_start.elapsed(); trace!("dot layout took: {:?}, row {}", elapsed, region.row()); trace!("----------------------------"); Ok(last_elem) } pub fn einsum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, inputs: &[ValTensor<F>], equation: &str, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut equation = equation.split("->"); let inputs_eq = equation.next().ok_or(CircuitError::InvalidEinsum)?; let output_eq = equation.next().ok_or(Circuit
Error::InvalidEinsum)?; let inputs_eq = inputs_eq.split(',').collect::<Vec<_>>(); if inputs.len() != inputs_eq.len() { return Err(Box::new(TensorError::DimMismatch("einsum".to_string()))); } let mut indices_to_size = HashMap::new(); for (i, input) in inputs.iter().enumerate() { for j in 0..inputs_eq[i].len() { let c = inputs_eq[i] .chars() .nth(j) .ok_or(CircuitError::InvalidEinsum)?; if let std::collections::hash_map::Entry::Vacant(e) = indices_to_size.entry(c) { e.insert(input.dims()[j]); } else if indices_to_size[&c] != input.dims()[j] { return Err(Box::new(TensorError::DimMismatch("einsum".to_string()))); } } } for c in output_eq.chars() { indices_to_size.entry(c).or_insert(1); } let mut output_shape: Vec<usize> = output_eq .chars() .map(|c| { indices_to_size .get(&c) .ok_or(CircuitError::InvalidEinsum) .copied() }) .collect::<Result<Vec<_>, _>>()?; if output_shape.is_empty() { output_shape.push(1); } let mut output: Tensor<ValType<F>> = Tensor::new(None, &output_shape)?; let mut seen = HashSet::new(); let mut common_indices_to_inputs = vec![]; for input in inputs_eq.iter().take(inputs.len()) { for c in input.chars() { if !seen.contains(&c) { seen.insert(c); } else { common_indices_to_inputs.push(c); } } } let non_common_indices = indices_to_size .keys() .filter(|&x| !common_indices_to_inputs.contains(x)) .collect::<Vec<_>>(); let non_common_coord_size = non_common_indices .iter() .map(|d| { if output_eq.contains(**d) { Ok(1) } else { indices_to_size
.get(d) .ok_or(CircuitError::InvalidEinsum) .copied() } }) .collect::<Result<Vec<_>, _>>()? .iter() .product::<usize>(); let cartesian_coord = output_shape .iter() .map(|d| 0..*d) .multi_cartesian_product() .collect::<Vec<_>>(); let mut common_coord = common_indices_to_inputs .iter() .map(|d| { if output_eq.contains(*d) { Ok(0..1) } else { Ok(0..*indices_to_size.get(d).ok_or(CircuitError::InvalidEinsum)?) } }) .collect::<Result<Vec<Range<_>>, Box<dyn Error>>>()? .into_iter() .multi_cartesian_product() .collect::<Vec<_>>(); if common_coord.is_empty() { common_coord.push(vec![]); } let inner_loop_function = |i: usize, region: &mut RegionCtx<'_, F>| { let coord = cartesian_coord[i].clone(); let inputs = (0..inputs.len()) .map(|idx| { let mut slice = vec![]; for (i, c) in inputs_eq[idx].chars().enumerate() { if let Some(idx) = output_eq.find(c) { slice.push(coord[idx]..coord[idx] + 1); } else { slice.push(0..inputs[idx].dims()[i]); } } inputs[idx].get_slice(&slice) }) .collect::<Result<Vec<_>, _>>()?; if non_common_coord_size == 1 && inputs.len() == 2 { Ok(dot( config, region, inputs[..].try_into().map_err(|e| { error!("{}", e); halo2_proofs::plonk::Error::Synthesis })?, )? .get_inner_tensor()?[0] .clone()) } else { let
mut prod_res = None; for common_dim in &common_coord { let inputs = (0..inputs.len()) .map(|idx| { let mut slice = vec![]; for (i, c) in inputs_eq[idx].chars().enumerate() { if let Some(j) = common_indices_to_inputs.iter().position(|&r| r == c) { slice.push(common_dim[j]..common_dim[j] + 1); } else { slice.push(0..inputs[idx].dims()[i]); } } inputs[idx].get_slice(&slice).map_err(|e| { error!("{}", e); halo2_proofs::plonk::Error::Synthesis }) }) .collect::<Result<Vec<_>, _>>()?; let mut input_pairs = vec![]; for input in inputs { input_pairs.push(input.get_inner_tensor()?.clone().into_iter()); } let input_pairs = input_pairs .into_iter() .multi_cartesian_product() .collect::<Vec<_>>(); for pair in input_pairs { let product_across_pair = prod(config, region, &[pair.into()])?; if let Some(product) = prod_res { prod_res = Some( pairwise(config, region, &[product, product_across_pair], BaseOp::Add) .map_err(|e| { error!("{}", e); halo2_proofs::plonk::Error::Synthesis })?, ); } else { prod_res = Some(product_across_pair); }
} } Ok::<_, region::RegionError>( prod_res .ok_or(Into::<region::RegionError>::into("missing prod"))? .get_inner_tensor()?[0] .clone(), ) } }; region.flush()?; region.apply_in_loop(&mut output, inner_loop_function)?; let output: ValTensor<F> = output.into(); Ok(output) } fn _sort_ascending<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut input = values[0].clone(); input.flatten(); let is_assigned = !input.any_unknowns()?; let sorted = if is_assigned { let mut int_evals = input.get_int_evals()?; int_evals.par_sort_unstable_by(|a, b| a.cmp(b)); int_evals .par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); input.len()]), &[input.len()], )? }; let assigned_sort = region.assign(&config.custom_gates.inputs[0], &sorted.into())?; region.increment(assigned_sort.len()); let window_a = assigned_sort.get_slice(&[0..assigned_sort.len() - 1])?; let window_b = assigned_sort.get_slice(&[1..assigned_sort.len()])?; let is_greater = greater_equal(config, region, &[window_b.clone(), window_a.clone()])?; let unit = create_unit_tensor(is_greater.len()); enforce_equality(config, region, &[unit, is_greater])?; shuffles(config, region, &[assigned_sort.clone()], &[input.clone()])?; Ok(assigned_sort) } fn _select_topk<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], k: usize, largest: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut sorted = _sort_ascending(conf
ig, region, values)?; if largest { sorted.reverse()?; } sorted.get_slice(&[0..k]) } pub fn topk_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], k: usize, dim: usize, largest: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let topk_at_k = move |config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1]| -> Result<ValTensor<F>, Box<dyn Error>> { _select_topk(config, region, values, k, largest) }; let output: ValTensor<F> = multi_dim_axes_op(config, region, values, &[dim], topk_at_k)?; Ok(output) } fn select<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let start = instant::Instant::now(); let (mut input, index) = (values[0].clone(), values[1].clone()); input.flatten(); let dim_indices: ValTensor<F> = Tensor::from((0..input.len() as u64).map(|x| ValType::Constant(F::from(x)))).into(); let is_assigned = !input.any_unknowns()? && !index.any_unknowns()?; let output: ValTensor<F> = if is_assigned && region.witness_gen() { let felt_evals = input.get_felt_evals()?; index .get_int_evals()? .par_iter() .map(|x| Value::known(felt_evals.get(&[*x as usize]))) .collect::<Tensor<Value<F>>>() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); index.len()]), &[index.len()], )? } .into(); let (_, assigned_output) = dynamic_lookup(config, region, &[index, output], &[dim_indices, input])?; let end = start.elapsed(); trace!("select took: {:?}", end); Ok(assigned_output) } fn one_hot<F: PrimeField + TensorType + PartialOrd + std::has
h::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], num_classes: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { assert_eq!(values[0].dims().len(), 1); assert_eq!(values[0].len(), 1); let input = values[0].clone(); let is_assigned = !input.any_unknowns()?; let output: ValTensor<F> = if is_assigned { let int_evals = input.get_int_evals()?; let res = tensor::ops::one_hot(&int_evals, num_classes, 1)?; res.par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<_>>() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); num_classes]), &[num_classes], )? } .into(); let assigned_input = region.assign(&config.custom_gates.inputs[0], &input)?; let assigned_output = boolean_identity(config, region, &[output.clone()], true)?; region.increment(std::cmp::max(assigned_output.len(), assigned_input.len())); let sum = sum(config, region, &[assigned_output.clone()])?; let unit = create_unit_tensor(1); enforce_equality(config, region, &[unit.clone(), sum])?; let gathered = gather( config, region, &[assigned_output.clone(), assigned_input.clone()], 0, )?; enforce_equality(config, region, &[unit, gathered])?; Ok(assigned_output) } pub(crate) fn dynamic_lookup<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, lookups: &[ValTensor<F>; 2], tables: &[ValTensor<F>; 2], ) -> Result<(ValTensor<F>, ValTensor<F>), Box<dyn Error>> { let start = instant::Instant::now(); if lookups[0].len() != lookups[1].len() { return Err("lookups must be same length".into()); } if tables[0].len() != tables[1].len() { return Err("tables must be same length".into()); } let dynamic_lookup_index = region.dynamic_lookup_index(); let (lookup_0,
lookup_1) = (lookups[0].clone(), lookups[1].clone()); let (table_0, table_1) = (tables[0].clone(), tables[1].clone()); let table_0 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[0], &table_0)?; let _table_1 = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[1], &table_1)?; let table_len = table_0.len(); trace!("assigning tables took: {:?}", start.elapsed()); let table_index = create_constant_tensor(F::from(dynamic_lookup_index as u64), table_len); let _table_index = region.assign_dynamic_lookup(&config.dynamic_lookups.tables[2], &table_index)?; trace!("assigning table index took: {:?}", start.elapsed()); let lookup_0 = region.assign(&config.dynamic_lookups.inputs[0], &lookup_0)?; let lookup_1 = region.assign(&config.dynamic_lookups.inputs[1], &lookup_1)?; let lookup_len = lookup_0.len(); trace!("assigning lookups took: {:?}", start.elapsed()); let lookup_index = create_constant_tensor(F::from(dynamic_lookup_index as u64), lookup_len); let _lookup_index = region.assign(&config.dynamic_lookups.inputs[2], &lookup_index)?; trace!("assigning lookup index took: {:?}", start.elapsed()); if !region.is_dummy() { (0..table_len) .map(|i| { let table_selector = config.dynamic_lookups.table_selectors[0]; let (_, _, z) = config.dynamic_lookups.tables[0] .cartesian_coord(region.combined_dynamic_shuffle_coord() + i); region.enable(Some(&table_selector), z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } if !region.is_dummy() { (0..lookup_len) .map(|i| { let (x, y, z) = config.dynamic_lookups.inputs[0].cartesian_coord(region.linear_coord() + i); let lookup_selector = config .dynamic_lookups .lookup_selectors .get(&(x, y))
.ok_or("missing selectors")?; region.enable(Some(lookup_selector), z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } region.increment_dynamic_lookup_col_coord(table_len); region.increment_dynamic_lookup_index(1); region.increment(lookup_len); let end = start.elapsed(); trace!("dynamic lookup took: {:?}", end); Ok((lookup_0, lookup_1)) } pub(crate) fn shuffles<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, input: &[ValTensor<F>; 1], reference: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let shuffle_index = region.shuffle_index(); let (input, reference) = (input[0].clone(), reference[0].clone()); if input.len() != reference.len() { return Err("input and reference must be same length".into()); } let reference = region.assign_shuffle(&config.shuffles.references[0], &reference)?; let reference_len = reference.len(); let index = create_constant_tensor(F::from(shuffle_index as u64), reference_len); let index = region.assign_shuffle(&config.shuffles.references[1], &index)?; let input = region.assign(&config.shuffles.inputs[0], &input)?; region.assign(&config.shuffles.inputs[1], &index)?; if !region.is_dummy() { (0..reference_len) .map(|i| { let ref_selector = config.shuffles.reference_selectors[0]; let (_, _, z) = config.shuffles.references[0] .cartesian_coord(region.combined_dynamic_shuffle_coord() + i); region.enable(Some(&ref_selector), z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } if !region.is_dummy() { (0..reference_len) .map(|i| { let (x, y, z) = config.custom_gates.inputs[0].cartesian_coord(region.linear_coord() + i)
; let input_selector = config .shuffles .input_selectors .get(&(x, y)) .ok_or("missing selectors")?; region.enable(Some(input_selector), z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } region.increment_shuffle_col_coord(reference_len); region.increment_shuffle_index(1); region.increment(reference_len); Ok(input) } pub(crate) fn one_hot_axis<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], num_classes: usize, dim: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let input = values[0].clone(); let input_inner = input.get_inner_tensor()?; let mut output_dims = values[0].dims().to_vec(); output_dims.insert(dim, num_classes); let mut op_tensors: Tensor<ValTensor<F>> = Tensor::new(None, input_inner.dims())?; let inner_loop_function = |i: usize, region: &mut RegionCtx<'_, F>| -> Result<ValTensor<F>, _> { let inp = input_inner[i].clone(); let tensor = Tensor::new(Some(&[inp.clone()]), &[1])?; Ok(one_hot(config, region, &[tensor.into()], num_classes)?) }; region.apply_in_loop(&mut op_tensors, inner_loop_function)?; let cartesian_coord = output_dims .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); let mut output = Tensor::<ValType<F>>::new(None, &output_dims)?; output = output.par_enum_map(|i, _| { let coord = cartesian_coord[i].clone(); let mut op_idx = coord.clone(); let coord_at_dims = vec![coord[dim]]; op_idx.remove(dim); let op_tensor = op_tensors.get(&op_idx); let op_tensor = op_tensor.get_inner_tensor()?; let one_hot_val = op_tensor.get(&coord_at_dims).clone(); Ok::<_, region::RegionError>(o
ne_hot_val) })?; Ok(output.into()) } pub(crate) fn gather<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], dim: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let (input, mut index_clone) = (values[0].clone(), values[1].clone()); index_clone.flatten(); if index_clone.is_singleton() { index_clone.reshape(&[1])?; } let input_dims = input.dims(); let mut output_size = input_dims.to_vec(); output_size[dim] = index_clone.dims()[0]; let linear_index = linearize_element_index(config, region, &[index_clone], input_dims, dim, true)?; let mut output = select(config, region, &[input, linear_index])?; output.reshape(&output_size)?; Ok(output) } pub(crate) fn gather_elements<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], dim: usize, ) -> Result<(ValTensor<F>, ValTensor<F>), Box<dyn Error>> { let (input, index) = (values[0].clone(), values[1].clone()); assert_eq!(input.dims().len(), index.dims().len()); let output_size = index.dims().to_vec(); let linear_index = linearize_element_index(config, region, &[index], input.dims(), dim, false)?; let mut output = select(config, region, &[input, linear_index.clone()])?; output.reshape(&output_size)?; Ok((output, linear_index)) } pub(crate) fn gather_nd<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], batch_dims: usize, ) -> Result<(ValTensor<F>, ValTensor<F>), Box<dyn Error>> { let (input, index) = (values[0].clone(), values[1].clone()); let index_dims = index.dims().to_vec(); let input_dims = input.dims().to_vec(); let last_value = index_dims .last() .ok_or(TensorError::DimMismatch("gather_nd".to_string()))?;
if index_dims.last() > Some(&(input_dims.len() - batch_dims)) { return Err(TensorError::DimMismatch("gather_nd".to_string()).into()); } let output_size = { let output_rank = input_dims.len() + index_dims.len() - 1 - batch_dims - last_value; let mut dims = index_dims[..index_dims.len() - 1].to_vec(); let input_offset = batch_dims + last_value; dims.extend(input_dims[input_offset..input_dims.len()].to_vec()); assert_eq!(output_rank, dims.len()); dims }; let linear_index = linearize_nd_index(config, region, &[index], input.dims(), batch_dims)?; let mut output = select(config, region, &[input, linear_index.clone()])?; output.reshape(&output_size)?; Ok((output, linear_index)) } pub(crate) fn linearize_element_index<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], dims: &[usize], dim: usize, is_flat_index: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let start_time = instant::Instant::now(); let index = values[0].clone(); if !is_flat_index { assert_eq!(index.dims().len(), dims.len()); if index.dims().len() == 1 { return Ok(index); } } let dim_multiplier: Tensor<usize> = Tensor::new(None, &[dims.len()])?; let dim_multiplier: Tensor<F> = dim_multiplier.par_enum_map(|i, _| { let mut res = 1; for dim in dims.iter().skip(i + 1) { res *= dim; } Ok::<_, region::RegionError>(F::from(res as u64)) })?; let iteration_dims = if is_flat_index { let mut dims = dims.to_vec(); dims[dim] = index.len(); dims } else { index.dims().to_vec() }; let cartesian_coord = iteration_dims .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); let val_dim_multiplier:
ValTensor<F> = dim_multiplier .get_slice(&[dim..dim + 1])? .map(|x| ValType::Constant(x)) .into(); let mut output = Tensor::new(None, &[cartesian_coord.len()])?; let inner_loop_function = |i: usize, region: &mut RegionCtx<'_, F>| { let coord = cartesian_coord[i].clone(); let slice: Vec<Range<usize>> = if is_flat_index { coord[dim..dim + 1].iter().map(|x| *x..*x + 1).collect() } else { coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>() }; let index_val = index.get_slice(&slice)?; let mut const_offset = F::ZERO; for i in 0..dims.len() { if i != dim { const_offset += F::from(coord[i] as u64) * dim_multiplier[i]; } } let const_offset = create_constant_tensor(const_offset, 1); let res = pairwise( config, region, &[index_val, val_dim_multiplier.clone()], BaseOp::Mult, )?; let res = pairwise(config, region, &[res, const_offset], BaseOp::Add)?; Ok(res.get_inner_tensor()?[0].clone()) }; region.apply_in_loop(&mut output, inner_loop_function)?; let elapsed = start_time.elapsed(); trace!("linearize_element_index took: {:?}", elapsed); Ok(output.into()) } pub(crate) fn linearize_nd_index<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], dims: &[usize], batch_dims: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let index = values[0].clone(); let index_dims = index.dims().to_vec(); let last_dim = index.dims().last().unwrap(); let input_rank = dims[batch_dims..].len(); let dim_multiplier: Tensor<usize> = Tensor::new(None, &[dims.len()])?; let dim_multiplier: Tensor<F> = dim_multiplier.par_enum_map(|i, _| { let mut res = 1; for dim in dims.iter().skip(i + 1) { res *= dim;
} Ok::<_, region::RegionError>(F::from(res as u64)) })?; let iteration_dims = index.dims()[0..batch_dims].to_vec(); let mut batch_cartesian_coord = iteration_dims .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); if batch_cartesian_coord.is_empty() { batch_cartesian_coord.push(vec![]); } let index_dim_multiplier: ValTensor<F> = dim_multiplier .get_slice(&[batch_dims..dims.len()])? .map(|x| ValType::Constant(x)) .into(); let mut outer_results = vec![]; for coord in batch_cartesian_coord { let slice: Vec<Range<usize>> = coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>(); let mut index_slice = index.get_slice(&slice)?; index_slice.reshape(&index_dims[batch_dims..])?; let mut inner_cartesian_coord = index_slice.dims()[0..index_slice.dims().len() - 1] .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); if inner_cartesian_coord.is_empty() { inner_cartesian_coord.push(vec![]); } let indices = if last_dim < &input_rank { inner_cartesian_coord .iter() .map(|x| { let slice = x.iter().map(|x| *x..*x + 1).collect::<Vec<_>>(); let index = index_slice.get_slice(&slice)?; let grid = (*last_dim..input_rank) .map(|x| 0..dims[x]) .multi_cartesian_product(); Ok(grid .map(|x| { let index = index.clone(); let constant_valtensor: ValTensor<F> = Tensor::from( x.into_iter().map(|x| ValType::Constant(F::from(x as u64))), ) .into();
index.concat(constant_valtensor) }) .collect::<Result<Vec<_>, TensorError>>()?) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()? .into_iter() .flatten() .collect::<Vec<_>>() } else { inner_cartesian_coord .iter() .map(|x| { let slice = x.iter().map(|x| *x..*x + 1).collect::<Vec<_>>(); index_slice.get_slice(&slice) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()? }; let mut const_offset = F::ZERO; for i in 0..batch_dims { const_offset += F::from(coord[i] as u64) * dim_multiplier[i]; } let const_offset = create_constant_tensor(const_offset, 1); let mut results = vec![]; for index_val in indices { let mut index_val = index_val.clone(); index_val.flatten(); let res = pairwise( config, region, &[index_val.clone(), index_dim_multiplier.clone()], BaseOp::Mult, )?; let res = res.concat(const_offset.clone())?; let res = sum(config, region, &[res])?; results.push(res.get_inner_tensor()?.clone()); if region.witness_gen() { assert!( res.get_int_evals()? .iter() .all(|x| *x < dims.iter().product::<usize>() as i128), "res is greater than the product of the dims {} (coord={}, index_dim_multiplier={}, res={})", dims.iter().product::<usize>(), index_val.show(), index_dim_multiplier.show(), res.show() ); } } let result_tensor = Tensor::from(results.into_iter()); outer_results.push(result_tensor.combine()?); } let out
put = Tensor::from(outer_results.into_iter()); let output = output.combine()?; Ok(output.into()) } pub(crate) fn get_missing_set_elements< F: PrimeField + TensorType + PartialOrd + std::hash::Hash, >( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ordered: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let (mut input, fullset) = (values[0].clone(), values[1].clone()); let set_len = fullset.len(); input.flatten(); let is_assigned = !input.any_unknowns()? && !fullset.any_unknowns()?; let mut claimed_output: ValTensor<F> = if is_assigned { let input_evals = input.get_int_evals()?; let mut fullset_evals = fullset.get_int_evals()?.into_iter().collect::<Vec<_>>(); for eval in input_evals.iter() { if let Some(pos) = fullset_evals.iter().position(|x| x == eval) { fullset_evals.remove(pos); } } if fullset_evals.len() != set_len - input.len() { fullset_evals.truncate(set_len - input.len()); } fullset_evals .par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() .into() } else { let dim = fullset.len() - input.len(); Tensor::new(Some(&vec![Value::<F>::unknown(); dim]), &[dim])?.into() }; claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?; let input_and_claimed_output = input.concat(claimed_output.clone())?; shuffles( config, region, &[input_and_claimed_output.clone()], &[fullset.clone()], )?; if ordered { claimed_output = _sort_ascending(config, region, &[claimed_output])?; } Ok(claimed_output) } pub(crate) fn scatter_elements<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTen
sor<F>; 3], dim: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let (input, mut index, src) = (values[0].clone(), values[1].clone(), values[2].clone()); assert_eq!(input.dims().len(), index.dims().len()); if !index.all_prev_assigned() { index = region.assign(&config.custom_gates.inputs[1], &index)?; region.increment(index.len()); } let is_assigned = !input.any_unknowns()? && !index.any_unknowns()? && !src.any_unknowns()?; let claimed_output: ValTensor<F> = if is_assigned && region.witness_gen() { let input_inner = input.get_int_evals()?; let index_inner = index.get_int_evals()?.map(|x| x as usize); let src_inner = src.get_int_evals()?; let res = tensor::ops::scatter(&input_inner, &index_inner, &src_inner, dim)?; res.par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() .into() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); input.len()]), &[input.len()], )? .into() }; let mut claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?; region.increment(claimed_output.len()); claimed_output.reshape(input.dims())?; let (gather_src, linear_index) = gather_elements( config, region, &[claimed_output.clone(), index.clone()], dim, )?; enforce_equality(config, region, &[gather_src, src])?; let full_index_set: ValTensor<F> = Tensor::from((0..input.len() as u64).map(|x| ValType::Constant(F::from(x)))).into(); let input_indices = get_missing_set_elements( config, region, &[linear_index, full_index_set.clone()], true, )?; claimed_output.flatten(); let (gather_input, _) = gather_elements( config, region, &[claimed_output.clone(), input_indices.clone()], 0, )?; dynamic_lookup( config, region,
&[input_indices, gather_input], &[full_index_set, input.clone()], )?; claimed_output.reshape(input.dims())?; Ok(claimed_output) } pub(crate) fn scatter_nd<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 3], ) -> Result<ValTensor<F>, Box<dyn Error>> { let (input, mut index, src) = (values[0].clone(), values[1].clone(), values[2].clone()); if !index.all_prev_assigned() { index = region.assign(&config.custom_gates.inputs[1], &index)?; region.increment(index.len()); } let is_assigned = !input.any_unknowns()? && !index.any_unknowns()? && !src.any_unknowns()?; let claimed_output: ValTensor<F> = if is_assigned && region.witness_gen() { let input_inner = input.get_int_evals()?; let index_inner = index.get_int_evals()?.map(|x| x as usize); let src_inner = src.get_int_evals()?; let res = tensor::ops::scatter_nd(&input_inner, &index_inner, &src_inner)?; res.par_iter() .map(|x| Value::known(i128_to_felt(*x))) .collect::<Tensor<Value<F>>>() .into() } else { Tensor::new( Some(&vec![Value::<F>::unknown(); input.len()]), &[input.len()], )? .into() }; let mut claimed_output = region.assign(&config.custom_gates.output, &claimed_output)?; region.increment(claimed_output.len()); claimed_output.reshape(input.dims())?; let (gather_src, linear_index) = gather_nd(config, region, &[claimed_output.clone(), index.clone()], 0)?; enforce_equality(config, region, &[gather_src, src])?; let full_index_set: ValTensor<F> = Tensor::from((0..input.len() as u64).map(|x| ValType::Constant(F::from(x)))).into(); let input_indices = get_missing_set_elements( config, region, &[linear_index, full_index_set.clone()], true, )?; claimed_output.flatten();
let (gather_input, _) = gather_elements( config, region, &[claimed_output.clone(), input_indices.clone()], 0, )?; dynamic_lookup( config, region, &[input_indices, gather_input], &[full_index_set, input.clone()], )?; claimed_output.reshape(input.dims())?; Ok(claimed_output) } pub fn sum<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { region.flush()?; let global_start = instant::Instant::now(); let mut values = values.clone(); let mut removal_indices = values[0].get_const_zero_indices()?; removal_indices.par_sort_unstable(); removal_indices.dedup(); values[0].remove_indices(&mut removal_indices, true)?; let elapsed = global_start.elapsed(); trace!("filtering const zero indices took: {:?}", elapsed); if values[0].is_empty() { return Ok(create_zero_tensor(1)); } let block_width = config.custom_gates.output.num_inner_cols(); let assigned_len: usize; let input = { let mut input = values[0].clone(); input.pad_to_zero_rem(block_width, ValType::Constant(F::ZERO))?; let (res, len) = region.assign_with_duplication( &config.custom_gates.inputs[1], &input, &config.check_mode, false, )?; assigned_len = len; res.get_inner()? }; let accumulated_sum = accumulated::sum(&input, block_width)?; let (output, output_assigned_len) = region.assign_with_duplication( &config.custom_gates.output, &accumulated_sum.into(), &config.check_mode, true, )?; if !region.is_dummy() { for i in 0..output_assigned_len { let (x, _, z) = config .custom_gates .output .cartesian_coord
(region.linear_coord() + i * block_width); if z == 0 && i > 0 { continue; } let selector = if i == 0 { config.custom_gates.selectors.get(&(BaseOp::SumInit, x, 0)) } else { config.custom_gates.selectors.get(&(BaseOp::Sum, x, 0)) }; region.enable(selector, z)?; } } let last_elem = output.get_slice(&[output.len() - 1..output.len()])?; region.increment(assigned_len); Ok(last_elem) } pub fn prod<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { region.flush()?; let global_start = instant::Instant::now(); let removal_indices = values[0].get_const_zero_indices()?; let elapsed = global_start.elapsed(); trace!("finding const zero indices took: {:?}", elapsed); if !removal_indices.is_empty() { return Ok(create_zero_tensor(1)); } let block_width = config.custom_gates.output.num_inner_cols(); let assigned_len: usize; let input = { let mut input = values[0].clone(); input.pad_to_zero_rem(block_width, ValType::Constant(F::ONE))?; let (res, len) = region.assign_with_duplication( &config.custom_gates.inputs[1], &input, &config.check_mode, false, )?; assigned_len = len; res.get_inner()? }; let accumulated_prod = accumulated::prod(&input, block_width)?; let (output, output_assigned_len) = region.assign_with_duplication( &config.custom_gates.output, &accumulated_prod.into(), &config.check_mode, true, )?; if !region.is_dummy() { (0..output_assigned_len) .map(|i| { let (x, _, z) = config .custom_gates .out
put .cartesian_coord(region.linear_coord() + i * block_width); if z == 0 && i > 0 { return Ok(()); } let selector = if i == 0 { config .custom_gates .selectors .get(&(BaseOp::CumProdInit, x, 0)) } else { config.custom_gates.selectors.get(&(BaseOp::CumProd, x, 0)) }; region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } let last_elem = output.get_slice(&[output.len() - 1..output.len()])?; region.increment(assigned_len); Ok(last_elem) } fn axes_wise_op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], op: impl Fn( &BaseConfig<F>, &mut RegionCtx<F>, &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> + Send + Sync, ) -> Result<ValTensor<F>, Box<dyn Error>> { let a = &values[0]; if axes.is_empty() { return Ok(a.clone()); } let mut new_dims = vec![]; for i in 0..a.dims().len() { if !axes.contains(&i) { new_dims.push(a.dims()[i]); } else { new_dims.push(1); } } let mut res = Tensor::new(None, &new_dims)?; let cartesian_coord = new_dims .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); let inner_loop_function = |i: usize, region: &mut RegionCtx<'_, F>| { let coord = cartesian_coord[i].clone(); let mut prod_dims = vec![]; for (i, c) in coord.iter().enumerate() { if axes.contains(&i) { prod_dims.push(0..a.dims()[i]); } else { prod_di
ms.push(*c..*c + 1); } } let values = a.get_slice(&prod_dims)?; let op = op(config, region, &[values])?; Ok(op.get_inner_tensor()?[0].clone()) }; region.apply_in_loop(&mut res, inner_loop_function)?; Ok(res.into()) } pub fn prod_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { axes_wise_op(config, region, values, axes, prod) } pub fn sum_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { axes_wise_op(config, region, values, axes, sum) } pub fn argmax_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], dim: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let argmax = move |config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1]| -> Result<ValTensor<F>, Box<dyn Error>> { argmax(config, region, values) }; axes_wise_op(config, region, values, &[dim], argmax) } pub fn max_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { axes_wise_op(config, region, values, axes, max) } pub fn argmin_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], dim: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let arg
min = move |config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1]| -> Result<ValTensor<F>, Box<dyn Error>> { argmin(config, region, values) }; axes_wise_op(config, region, values, &[dim], argmin) } pub fn min_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { axes_wise_op(config, region, values, axes, min) } pub(crate) fn pairwise<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], op: BaseOp, ) -> Result<ValTensor<F>, Box<dyn Error>> { let global_start = instant::Instant::now(); let (mut lhs, mut rhs) = (values[0].clone(), values[1].clone()); let broadcasted_shape = get_broadcasted_shape(lhs.dims(), rhs.dims())?; lhs.expand(&broadcasted_shape)?; rhs.expand(&broadcasted_shape)?; let orig_lhs = lhs.clone(); let orig_rhs = rhs.clone(); let first_zero_indices = lhs.get_const_zero_indices()?; let second_zero_indices = rhs.get_const_zero_indices()?; let mut removal_indices = match op { BaseOp::Add | BaseOp::Mult => { let mut removal_indices = first_zero_indices.clone(); removal_indices.extend(second_zero_indices.clone()); removal_indices } BaseOp::Sub => second_zero_indices.clone(), _ => return Err(Box::new(CircuitError::UnsupportedOp)), }; removal_indices.dedup(); let removal_indices: HashSet<&usize> = HashSet::from_iter(removal_indices.iter()); let removal_indices_ptr = &removal_indices; if lhs.len() != rhs.len() { return Err(Box::new(CircuitError::DimMismatch(format!( "pairwise {} layout", op.as_str() )))); } let mut inputs = vec![];
for (i, input) in [lhs.clone(), rhs.clone()].iter().enumerate() { let inp = { let res = region.assign_with_omissions( &config.custom_gates.inputs[i], input, removal_indices_ptr, )?; res.get_inner()? }; inputs.push(inp); } let start = instant::Instant::now(); let op_result = match op { BaseOp::Add => add(&inputs), BaseOp::Sub => sub(&inputs), BaseOp::Mult => mult(&inputs), _ => return Err(Box::new(CircuitError::UnsupportedOp)), } .map_err(|e| { error!("{}", e); halo2_proofs::plonk::Error::Synthesis })?; let elapsed = start.elapsed(); let assigned_len = inputs[0].len() - removal_indices.len(); let mut output = region.assign_with_omissions( &config.custom_gates.output, &op_result.into(), removal_indices_ptr, )?; trace!("pairwise {} calc took {:?}", op.as_str(), elapsed); if !region.is_dummy() { (0..assigned_len) .map(|i| { let (x, y, z) = config.custom_gates.inputs[0].cartesian_coord(region.linear_coord() + i); let selector = config.custom_gates.selectors.get(&(op.clone(), x, y)); region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } region.increment(assigned_len); let a_tensor = orig_lhs.get_inner_tensor()?; let b_tensor = orig_rhs.get_inner_tensor()?; let first_zero_indices: HashSet<&usize> = HashSet::from_iter(first_zero_indices.iter()); let second_zero_indices: HashSet<&usize> = HashSet::from_iter(second_zero_indices.iter()); trace!("setting up indices took {:?}", start.elapsed()); if !removal_indices_ptr.is_empty() { output .get_inner_tensor_mut()? .par_enum_map_mut_filtered(removal_indices_ptr, |i| { let val = match
op { BaseOp::Add => { let a_is_null = first_zero_indices.contains(&i); let b_is_null = second_zero_indices.contains(&i); if a_is_null && b_is_null { ValType::Constant(F::ZERO) } else if a_is_null { b_tensor[i].clone() } else { a_tensor[i].clone() } } BaseOp::Sub => { let a_is_null = first_zero_indices.contains(&i); if a_is_null { ValType::Constant(F::ZERO) } else { a_tensor[i].clone() } } BaseOp::Mult => ValType::Constant(F::ZERO), _ => unreachable!(), }; Ok::<_, TensorError>(val) })?; } output.reshape(&broadcasted_shape)?; let end = global_start.elapsed(); trace!( "pairwise {} layout took {:?}, row: {}", op.as_str(), end, region.row() ); Ok(output) } pub fn mean_of_squares_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let squared = pow(config, region, values, 2)?; let sum_squared = sum_axes(config, region, &[squared], axes)?; let dividand: usize = values[0].len() / sum_squared.len(); let mean_squared = div(config, region, &[sum_squared], F::from(dividand as u64))?; Ok(mean_squared) } pub(crate) fn expand<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1],
shape: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut assigned_input = region.assign(&config.custom_gates.inputs[0], &values[0])?; assigned_input.expand(shape)?; region.increment(assigned_input.len()); Ok(assigned_input) } pub fn greater<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let (mut lhs, mut rhs) = (values[0].clone(), values[1].clone()); let broadcasted_shape = get_broadcasted_shape(lhs.dims(), rhs.dims())?; lhs.expand(&broadcasted_shape)?; rhs.expand(&broadcasted_shape)?; let diff = pairwise(config, region, &[lhs, rhs], BaseOp::Sub)?; nonlinearity( config, region, &[diff], &LookupOp::GreaterThan { a: utils::F32(0.) }, ) } pub fn greater_equal<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let (mut lhs, mut rhs) = (values[0].clone(), values[1].clone()); let broadcasted_shape = get_broadcasted_shape(lhs.dims(), rhs.dims())?; lhs.expand(&broadcasted_shape)?; rhs.expand(&broadcasted_shape)?; let diff = pairwise(config, region, &[lhs, rhs], BaseOp::Sub)?; nonlinearity( config, region, &[diff], &LookupOp::GreaterThanEqual { a: utils::F32(0.) }, ) } pub fn less<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { greater(config, region, &[values[1].clone(), values[0].clone()]) } pub fn less_equal<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region:
&mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { greater_equal(config, region, &[values[1].clone(), values[0].clone()]) } pub fn and<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let a = boolean_identity(config, region, &[values[0].clone()], true)?; let b = boolean_identity(config, region, &[values[1].clone()], true)?; let res = pairwise(config, region, &[a, b], BaseOp::Mult)?; Ok(res) } pub fn or<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let a = values[0].clone(); let b = values[1].clone(); let b = boolean_identity(config, region, &[b], true)?; let iff_values = &[a.clone(), a, b]; let res = iff(config, region, iff_values)?; Ok(res) } pub fn equals<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let diff = pairwise(config, region, values, BaseOp::Sub)?; equals_zero(config, region, &[diff]) } pub(crate) fn equals_zero<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let values = values[0].clone(); let values_inverse = values.inverse()?; let product_values_and_invert = pairwise( config, region, &[values.clone(), values_inverse], BaseOp::Mult, )?; let ones = create_unit_tensor(1); let output = pairwise( config, region, &[ones, product_values_and
_invert], BaseOp::Sub, )?; let prod_check = pairwise(config, region, &[values, output.clone()], BaseOp::Mult)?; let zero_tensor = create_zero_tensor(prod_check.len()); enforce_equality(config, region, &[prod_check, zero_tensor])?; Ok(output) } pub fn xor<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { let lhs = values[0].clone(); let rhs = values[1].clone(); let lhs_not = not(config, region, &[lhs.clone()])?; let rhs_not = not(config, region, &[rhs.clone()])?; let lhs_and_rhs_not = and(config, region, &[lhs, rhs_not.clone()])?; let lhs_not_and_rhs = and(config, region, &[rhs, lhs_not])?; let res: ValTensor<F> = pairwise( config, region, &[lhs_and_rhs_not, lhs_not_and_rhs], BaseOp::Add, )?; Ok(res) } pub fn not<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mask = values[0].clone(); let unit = create_unit_tensor(1); let nil = create_zero_tensor(1); let res = iff(config, region, &[mask, nil, unit])?; Ok(res) } pub fn iff<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 3], ) -> Result<ValTensor<F>, Box<dyn Error>> { let (mask, a, b) = (&values[0], &values[1], &values[2]); let unit = create_unit_tensor(1); let assigned_mask = boolean_identity(config, region, &[mask.clone()], true)?; let one_minus_mask = pairwise(config, region, &[unit, assigned_mask.clone()], BaseOp::Sub)?; let masked_a = pairwise(config, region, &[a.clone(), assigned_mask], BaseOp::Mult)?; let maske
d_b = pairwise(config, region, &[b.clone(), one_minus_mask], BaseOp::Mult)?; let res = pairwise(config, region, &[masked_a, masked_b], BaseOp::Add)?; Ok(res) } pub fn neg<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let nil = create_zero_tensor(1); pairwise(config, region, &[nil, values[0].clone()], BaseOp::Sub) } pub fn sumpool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], padding: &[(usize, usize)], stride: &[usize], kernel_shape: &[usize], normalized: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let batch_size = values[0].dims()[0]; let image_channels = values[0].dims()[1]; let kernel_len = kernel_shape.iter().product(); let mut kernel = create_unit_tensor(kernel_len); let mut kernel_dims = vec![1, 1]; kernel_dims.extend(kernel_shape); kernel.reshape(&kernel_dims)?; let kernel = region.assign(&config.custom_gates.inputs[1], &kernel)?; region.increment(kernel.len()); let cartesian_coord = [(0..batch_size), (0..image_channels)] .iter() .cloned() .multi_cartesian_product() .collect::<Vec<_>>(); let mut res = vec![]; cartesian_coord .iter() .map(|coord| { let (b, i) = (coord[0], coord[1]); let input = values[0].get_slice(&[b..b + 1, i..i + 1])?; let output = conv(config, region, &[input, kernel.clone()], padding, stride)?; res.push(output); Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; let shape = &res[0].dims()[2..]; let mut last_elem = res[1..] .iter() .try_fold(res[0].clone(), |acc, elem| acc.concat(elem.clone()))?; last_elem.reshape(&[&[batch_size, image_cha
nnels], shape].concat())?; if normalized { last_elem = loop_div(config, region, &[last_elem], F::from(kernel_len as u64))?; } Ok(last_elem) } pub fn max_pool<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], padding: &[(usize, usize)], stride: &[usize], pool_dims: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let image = values[0].clone(); let image_dims = image.dims(); let (batch, input_channels) = (image_dims[0], image_dims[1]); let mut padded_image = image.clone(); padded_image.pad(padding.to_vec(), 2)?; let slides = image_dims[2..] .iter() .enumerate() .map(|(i, d)| { let d = padding[i].0 + d + padding[i].1; d.checked_sub(pool_dims[i]) .ok_or_else(|| TensorError::Overflow("conv".to_string()))? .checked_div(stride[i]) .ok_or_else(|| TensorError::Overflow("conv".to_string()))? .checked_add(1) .ok_or_else(|| TensorError::Overflow("conv".to_string())) }) .collect::<Result<Vec<_>, TensorError>>()?; let mut output_dims = vec![batch, input_channels]; output_dims.extend(slides); let mut output: Tensor<ValType<F>> = Tensor::new(None, &output_dims)?; let cartesian_coord = output_dims .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); output .iter_mut() .enumerate() .map(|(flat_index, o)| { let coord = &cartesian_coord[flat_index]; let (b, i) = (coord[0], coord[1]); let mut slice = vec![b..b + 1, i..i + 1]; slice.extend( coord[2..] .iter() .zip(stride.iter()) .zip(pool_dims.iter()) .map(|((c, s), k)| { let start = c * s;
let end = start + k; start..end }), ); let slice = padded_image.get_slice(&slice)?; let max_w = max(config, region, &[slice])?; *o = max_w.get_inner_tensor()?[0].clone(); Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; let res: ValTensor<F> = output.into(); Ok(res) } pub fn deconv< F: PrimeField + TensorType + PartialOrd + std::hash::Hash + std::marker::Send + std::marker::Sync, >( config: &BaseConfig<F>, region: &mut RegionCtx<F>, inputs: &[ValTensor<F>], padding: &[(usize, usize)], output_padding: &[usize], stride: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let has_bias = inputs.len() == 3; let (image, kernel) = (&inputs[0], &inputs[1]); if stride.iter().any(|&s| s == 0) { return Err(Box::new(TensorError::DimMismatch( "non-positive stride is not supported for deconv".to_string(), ))); } let null_val = ValType::Constant(F::ZERO); let mut expanded_image = image.clone(); for (i, s) in stride.iter().enumerate() { expanded_image.intercalate_values(null_val.clone(), *s, 2 + i)?; } expanded_image.pad( kernel.dims()[2..] .iter() .map(|d| (d - 1, d - 1)) .collect::<Vec<_>>(), 2, )?; let channel_coord = (0..kernel.dims()[0]) .cartesian_product(0..kernel.dims()[1]) .collect::<Vec<_>>(); let slice_coord = expanded_image .dims() .iter() .enumerate() .map(|(i, d)| { if i >= 2 { padding[i - 2].0..d - padding[i - 2].1 + output_padding[i - 2] } else { 0..*d } }) .collect::<Vec<_>>(); let sliced_expanded_image = expanded_imag
e.get_slice(&slice_coord)?; let mut inverted_kernels = vec![]; for (i, j) in channel_coord { let channel = kernel.get_slice(&[i..i + 1, j..j + 1])?; let mut channel = Tensor::from(channel.get_inner_tensor()?.clone().into_iter().rev()); channel.reshape(&kernel.dims()[2..])?; inverted_kernels.push(channel); } let mut deconv_kernel = Tensor::new(Some(&inverted_kernels), &[inverted_kernels.len()])?.combine()?; deconv_kernel.reshape(kernel.dims())?; if kernel.dims()[0] == sliced_expanded_image.dims()[1] { let mut dims = deconv_kernel.dims().to_vec(); dims.swap(0, 1); deconv_kernel.reshape(&dims)?; } let conv_input = if has_bias { vec![ sliced_expanded_image, deconv_kernel.clone().into(), inputs[2].clone(), ] } else { vec![sliced_expanded_image, deconv_kernel.clone().into()] }; let conv_dim = kernel.dims()[2..].len(); let output = conv( config, region, &conv_input, &vec![(0, 0); conv_dim], &vec![1; conv_dim], )?; Ok(output) } pub fn conv< F: PrimeField + TensorType + PartialOrd + std::hash::Hash + std::marker::Send + std::marker::Sync, >( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], padding: &[(usize, usize)], stride: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let has_bias = values.len() == 3; let (mut image, mut kernel) = (values[0].clone(), values[1].clone()); if stride.iter().any(|&s| s == 0) { return Err(Box::new(TensorError::DimMismatch( "non-positive stride is not supported for conv".to_string(), ))); } let mut assigned_len = vec![]; if !kernel.all_prev_assigned() { kernel = region.assign(&config.custom_gates.inputs[0], &kernel)?; assigned_len.push(kernel.len()); }
if !image.all_prev_assigned() { image = region.assign(&config.custom_gates.inputs[1], &image)?; assigned_len.push(image.len()); } if !assigned_len.is_empty() { region.increment(*assigned_len.iter().max().unwrap()); } let image_dims = image.dims(); let kernel_dims = kernel.dims(); let mut padded_image = image.clone(); padded_image.pad(padding.to_vec(), 2)?; let batch_size = image_dims[0]; let input_channels = image_dims[1]; let output_channels = kernel_dims[0]; log::debug!( "batch_size: {}, output_channels: {}, input_channels: {}", batch_size, output_channels, input_channels ); let slides = image_dims[2..] .iter() .enumerate() .map(|(i, d)| { let d = padding[i].0 + d + padding[i].1; d.checked_sub(kernel_dims[i + 2]) .ok_or_else(|| TensorError::Overflow("conv".to_string()))? .checked_div(stride[i]) .ok_or_else(|| TensorError::Overflow("conv".to_string()))? .checked_add(1) .ok_or_else(|| TensorError::Overflow("conv".to_string())) }) .collect::<Result<Vec<_>, TensorError>>()?; log::debug!("slides: {:?}", slides); let num_groups = input_channels / kernel_dims[1]; let input_channels_per_group = input_channels / num_groups; let output_channels_per_group = output_channels / num_groups; log::debug!( "num_groups: {}, input_channels_per_group: {}, output_channels_per_group: {}", num_groups, input_channels_per_group, output_channels_per_group ); if output_channels_per_group == 0 { return Err(Box::new(TensorError::DimMismatch(format!( "Given groups={}, expected kernel to be at least {} at dimension 0 but got {} instead", num_groups, num_groups, output_channels_per_group )))); } let num_outputs = batch_size * num_groups * output_channels_per
_group * slides.iter().product::<usize>(); log::debug!("num_outputs: {}", num_outputs); let mut output: Tensor<ValType<F>> = Tensor::new(None, &[num_outputs])?; let mut iterations = vec![0..batch_size, 0..num_groups, 0..output_channels_per_group]; for slide in slides.iter() { iterations.push(0..*slide); } let cartesian_coord = iterations .iter() .cloned() .multi_cartesian_product() .collect::<Vec<_>>(); let inner_loop_function = |idx: usize, region: &mut RegionCtx<F>| { let cartesian_coord_per_group = &cartesian_coord[idx]; let (batch, group, i) = ( cartesian_coord_per_group[0], cartesian_coord_per_group[1], cartesian_coord_per_group[2], ); let start_channel = group * input_channels_per_group; let end_channel = start_channel + input_channels_per_group; let mut slices = vec![batch..batch + 1, start_channel..end_channel]; for (i, stride) in stride.iter().enumerate() { let coord = cartesian_coord_per_group[3 + i] * stride; let kernel_dim = kernel_dims[2 + i]; slices.push(coord..(coord + kernel_dim)); } let mut local_image = padded_image.get_slice(&slices)?; local_image.flatten(); let start_kernel_index = group * output_channels_per_group + i; let end_kernel_index = start_kernel_index + 1; let mut local_kernel = kernel.get_slice(&[start_kernel_index..end_kernel_index])?; local_kernel.flatten(); let mut res = einsum(config, region, &[local_image, local_kernel], "i,i->")?; if has_bias { let bias_index = if values[2].len() > 1 { start_kernel_index } else { 0 }; let bias = values[2].get_single_elem(bias_index)?; res = pairwise(config, region, &[res, bias], BaseOp::Add)?; } region.flush()?; Ok(res.get_inner_tensor()?[0].c
lone()) }; region.flush()?; region.apply_in_loop(&mut output, inner_loop_function)?; let reshape_output = |output: &mut Tensor<ValType<F>>| -> Result<(), TensorError> { let mut dims = vec![batch_size, output_channels]; dims.extend(slides.iter().cloned()); output.reshape(&dims)?; Ok(()) }; reshape_output(&mut output)?; let output: ValTensor<_> = output.into(); Ok(output) } pub(crate) fn pow<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], exponent: u32, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut t = values[0].clone(); for _ in 1..exponent { t = pairwise(config, region, &[t, values[0].clone()], BaseOp::Mult)?; } Ok(t) } pub(crate) fn rescale<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], scales: &[(usize, u128)], ) -> Result<Vec<ValTensor<F>>, Box<dyn Error>> { let mut rescaled_inputs = vec![]; for (i, ri) in values.iter().enumerate() { if scales[i].1 == 1 { rescaled_inputs.push(ri.clone()); continue; } let multiplier = create_constant_tensor(F::from(scales[i].1 as u64), 1); let scaled_input = pairwise(config, region, &[ri.clone(), multiplier], BaseOp::Mult)?; rescaled_inputs.push(scaled_input); } Ok(rescaled_inputs) } pub(crate) fn reshape<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( values: &[ValTensor<F>; 1], new_dims: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut t = values[0].clone(); t.reshape(new_dims)?; Ok(t) } pub(crate) fn move_axis<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( values: &[ValTensor<F>; 1], source: usize, destination: usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut t = values[0].clone();
t.move_axis(source, destination)?; Ok(t) } pub(crate) fn resize<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], scales: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut output = region.assign(&config.custom_gates.output, &values[0])?; region.increment(output.len()); output.resize(scales)?; Ok(output) } pub(crate) fn slice<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axis: &usize, start: &usize, end: &usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut output = values[0].clone(); let is_assigned = output.all_prev_assigned(); if !is_assigned { output = region.assign(&config.custom_gates.output, &values[0])?; region.increment(output.len()); } output.slice(axis, start, end)?; Ok(output) } pub(crate) fn trilu<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], k: &i32, upper: &bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut output = values[0].clone(); let is_assigned = output.all_prev_assigned(); if !is_assigned { output = region.assign(&config.custom_gates.inputs[0], &values[0])?; } let res = tensor::ops::trilu(output.get_inner_tensor()?, *k, *upper)?; let output = region.assign(&config.custom_gates.output, &res.into())?; region.increment(output.len()); Ok(output) } pub(crate) fn concat<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( values: &[ValTensor<F>], axis: &usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let collected_inner: Result<Vec<&Tensor<_>>, _> = values.iter().map(|e| e.get_inner_tensor()).collect(); let collected_inner = collected_inner?; Ok(tensor::ops::concat(&collected_inn
er, *axis)?.into()) } pub(crate) fn identity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut output = values[0].clone(); if !output.all_prev_assigned() { output = region.assign(&config.custom_gates.output, &values[0])?; region.increment(output.len()); } Ok(output) } pub(crate) fn boolean_identity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], assign: bool, ) -> Result<ValTensor<F>, Box<dyn Error>> { let output = if assign || !values[0].get_const_indices()?.is_empty() { let output = region.assign(&config.custom_gates.output, &values[0])?; region.increment(output.len()); output } else { values[0].clone() }; if !region.is_dummy() { (0..output.len()) .map(|j| { let index = region.linear_coord() - j - 1; let (x, y, z) = config.custom_gates.output.cartesian_coord(index); let selector = config .custom_gates .selectors .get(&(BaseOp::IsBoolean, x, y)); region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } Ok(output) } pub(crate) fn downsample<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axis: &usize, stride: &usize, modulo: &usize, ) -> Result<ValTensor<F>, Box<dyn Error>> { let input = region.assign(&config.custom_gates.inputs[0], &values[0])?; let processed_output = tensor::ops::downsample(input.get_inner_tensor()?, *axis, *stride, *modulo)?; let output = region.assign(&config.custom_gates.output, &pro
cessed_output.into())?; region.increment(std::cmp::max(input.len(), output.len())); Ok(output) } pub(crate) fn enforce_equality<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 2], ) -> Result<ValTensor<F>, Box<dyn Error>> { if values[0].len() != values[1].len() { return Err(Box::new(TensorError::DimMismatch( "enforce_equality".to_string(), ))); } let input = region.assign(&config.custom_gates.inputs[1], &values[0])?; let output = region.assign(&config.custom_gates.output, &values[1])?; if !region.is_dummy() { region.constrain_equal(&input, &output)?; } region.increment(output.len()); Ok(output) } pub(crate) fn range_check<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], range: &crate::circuit::table::Range, ) -> Result<ValTensor<F>, Box<dyn Error>> { region.add_used_range_check(*range)?; let timer = instant::Instant::now(); let x = values[0].clone(); let w = region.assign(&config.range_checks.input, &x)?; let assigned_len = x.len(); let is_dummy = region.is_dummy(); let table_index: ValTensor<F> = w .get_inner_tensor()? .par_enum_map(|_, e| { Ok::<ValType<F>, TensorError>(if let Some(f) = e.get_felt_eval() { let col_idx = if !is_dummy { let table = config .range_checks .ranges .get(range) .ok_or(TensorError::TableLookupError)?; table.get_col_index(f) } else { F::ZERO }; Value::known(col_idx).into() } else { Value::<F>::unknown().into() }) })? .into(); region.assign(&config.range
_checks.index, &table_index)?; if !is_dummy { (0..assigned_len) .map(|i| { let (x, y, z) = config .range_checks .input .cartesian_coord(region.linear_coord() + i); let selector = config.range_checks.selectors.get(&(*range, x, y)); region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } let is_assigned = !w.any_unknowns()?; if is_assigned && region.witness_gen() { let int_values = w.get_int_evals()?; for v in int_values.iter() { if v < &range.0 || v > &range.1 { log::error!("Value ({:?}) out of range: {:?}", v, range); return Err(Box::new(TensorError::TableLookupError)); } } } region.increment(assigned_len); let elapsed = timer.elapsed(); trace!( "range check {:?} layout took {:?}, row: {:?}", range, elapsed, region.row() ); Ok(w) } pub(crate) fn nonlinearity<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], nl: &LookupOp, ) -> Result<ValTensor<F>, Box<dyn Error>> { region.add_used_lookup(nl.clone(), values)?; let timer = instant::Instant::now(); let x = values[0].clone(); let removal_indices = values[0].get_const_indices()?; let removal_indices: HashSet<&usize> = HashSet::from_iter(removal_indices.iter()); let removal_indices_ptr = &removal_indices; let w = region.assign_with_omissions(&config.static_lookups.input, &x, removal_indices_ptr)?; let output = w.get_inner_tensor()?.par_enum_map(|i, e| { Ok::<_, TensorError>(if let Some(f) = e.get_felt_eval() { if !removal_indices.contains(&i) { Value::known(nl.f(&[Tensor::from(vec![f].into_iter())])?.output[0]).into()
} else { ValType::Constant(nl.f(&[Tensor::from(vec![f].into_iter())])?.output[0]) } } else { Value::<F>::unknown().into() }) })?; let assigned_len = x.len() - removal_indices.len(); let mut output = region.assign_with_omissions( &config.static_lookups.output, &output.into(), removal_indices_ptr, )?; let is_dummy = region.is_dummy(); let table_index: ValTensor<F> = w .get_inner_tensor()? .par_enum_map(|i, e| { Ok::<_, TensorError>(if let Some(f) = e.get_felt_eval() { let col_idx = if !is_dummy { let table = config .static_lookups .tables .get(nl) .ok_or(TensorError::TableLookupError)?; table.get_col_index(f) } else { F::ZERO }; if !removal_indices.contains(&i) { Value::known(col_idx).into() } else { ValType::Constant(col_idx) } } else { Value::<F>::unknown().into() }) })? .into(); region.assign_with_omissions( &config.static_lookups.index, &table_index, removal_indices_ptr, )?; if !is_dummy { (0..assigned_len) .map(|i| { let (x, y, z) = config .static_lookups .input .cartesian_coord(region.linear_coord() + i); let selector = config.static_lookups.selectors.get(&(nl.clone(), x, y)); region.enable(selector, z)?; Ok(()) }) .collect::<Result<Vec<_>, Box<dyn Error>>>()?; } region.increment(assigned_len); output.reshape(x.dims())?; let elapsed = timer.elapsed(); trace!( "nonlinearity {} layout took {:?}, row: {:
?}", <LookupOp as Op<F>>::as_string(nl), elapsed, region.row() ); Ok(output) } pub(crate) fn argmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let argmax = values[0] .get_int_evals()? .into_par_iter() .enumerate() .max_by_key(|(idx, value)| (*value, -(*idx as i64))) .map(|(idx, _)| idx as i128); let argmax_val: ValTensor<F> = match argmax { None => Tensor::new(Some(&[Value::<F>::unknown()]), &[1])?.into(), Some(i) => Tensor::new(Some(&[Value::known(i128_to_felt::<F>(i))]), &[1])?.into(), }; let assigned_argmax: ValTensor<F> = region.assign(&config.custom_gates.inputs[1], &argmax_val)?; region.increment(assigned_argmax.len()); let claimed_val = select( config, region, &[values[0].clone(), assigned_argmax.clone()], )?; let max_val = max(config, region, &[values[0].clone()])?; enforce_equality(config, region, &[claimed_val, max_val])?; Ok(assigned_argmax) } pub(crate) fn argmin<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let argmin = values[0] .get_int_evals()? .into_par_iter() .enumerate() .min_by_key(|(idx, value)| (*value, (*idx as i64))) .map(|(idx, _)| idx as i128); let argmin_val: ValTensor<F> = match argmin { None => Tensor::new(Some(&[Value::<F>::unknown()]), &[1])?.into(), Some(i) => Tensor::new(Some(&[Value::known(i128_to_felt::<F>(i))]), &[1])?.into(), }; let assigned_argmin: ValTensor<F> = region.assign(&config.custom_gates.inputs[1], &argmin_val)?; region.increment(assigned_argmin.len()); let claimed_val = select(
config, region, &[values[0].clone(), assigned_argmin.clone()], )?; let min_val = min(config, region, &[values[0].clone()])?; enforce_equality(config, region, &[claimed_val, min_val])?; Ok(assigned_argmin) } pub(crate) fn max<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { let input_len = values[0].len(); _sort_ascending(config, region, values)?.get_slice(&[input_len - 1..input_len]) } pub(crate) fn min<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> { _sort_ascending(config, region, values)?.get_slice(&[0..1]) } fn multi_dim_axes_op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], axes: &[usize], op: impl Fn( &BaseConfig<F>, &mut RegionCtx<F>, &[ValTensor<F>; 1], ) -> Result<ValTensor<F>, Box<dyn Error>> + Send + Sync, ) -> Result<ValTensor<F>, Box<dyn Error>> { let mut input = values[0].clone(); if !input.all_prev_assigned() { input = region.assign(&config.custom_gates.inputs[0], &input)?; region.increment(input.len()); } if input.dims().len() == 1 { return op(config, region, &[input]); } let input_dims = input.dims(); let mut sorted_axes = axes.to_vec(); sorted_axes.sort_by(|x, y| y.cmp(x)); let mut output_size_without_dim = input_dims.to_vec(); for dim in &sorted_axes { output_size_without_dim.remove(*dim); } let mut op_tensors = Tensor::<ValTensor<F>>::new(None, &output_size_without_dim)?; let cartesian_coord = output_size_without_dim .iter() .map(|x| 0..*x) .multi_cart
esian_product() .collect::<Vec<_>>(); let inner_loop_function = |i: usize, region: &mut RegionCtx<F>| { let coord = cartesian_coord[i].clone(); let mut slice = coord.iter().map(|x| *x..*x + 1).collect::<Vec<_>>(); for dim in &sorted_axes { slice.insert(*dim, 0..input_dims[*dim]); } let mut sliced_input = input.get_slice(&slice)?; sliced_input.flatten(); Ok(op(config, region, &[sliced_input])?) }; region.apply_in_loop(&mut op_tensors, inner_loop_function)?; let sample_op_output_size = op_tensors[0].dims(); let mut output_size = input_dims.to_vec(); for dim in axes.iter().enumerate() { output_size[*dim.1] = sample_op_output_size[dim.0]; } let cartesian_coord = output_size .iter() .map(|x| 0..*x) .multi_cartesian_product() .collect::<Vec<_>>(); let mut output = Tensor::<ValType<F>>::new(None, &output_size)?; output = output.par_enum_map(|i, _| { let coord = cartesian_coord[i].clone(); let mut op_idx = coord.clone(); let mut coord_at_dims = vec![]; for dim in &sorted_axes { op_idx.remove(*dim); } for dim in axes { coord_at_dims.push(coord[*dim]); } let topk_elem = op_tensors .get(&op_idx) .get_inner_tensor()? .get(&coord_at_dims) .clone(); Ok::<_, region::RegionError>(topk_elem) })?; Ok(output.into()) } pub(crate) fn softmax_axes<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], input_scale: utils::F32, output_scale: utils::F32, axes: &[usize], ) -> Result<ValTensor<F>, Box<dyn Error>> { let soft_max_at_scale = move |config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1]|
-> Result<ValTensor<F>, Box<dyn Error>> { softmax(config, region, values, input_scale, output_scale) }; let output = multi_dim_axes_op(config, region, values, axes, soft_max_at_scale)?; Ok(output) } pub(crate) fn percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], input_scale: utils::F32, output_scale: utils::F32, ) -> Result<ValTensor<F>, Box<dyn Error>> { let is_assigned = values[0].all_prev_assigned(); let mut input = values[0].clone(); if !is_assigned { input = region.assign(&config.custom_gates.inputs[0], &values[0])?; region.increment(input.len()); }; let denom = sum(config, region, &[input.clone()])?; let input_felt_scale = F::from(input_scale.0 as u64); let output_felt_scale = F::from(output_scale.0 as u64); let inv_denom = recip( config, region, &[denom], input_felt_scale, output_felt_scale, )?; let percent = pairwise(config, region, &[input, inv_denom], BaseOp::Mult)?; loop_div(config, region, &[percent], input_felt_scale) } pub fn softmax<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>; 1], input_scale: utils::F32, output_scale: utils::F32, ) -> Result<ValTensor<F>, Box<dyn Error>> { let max_val = max(config, region, values)?; let sub = pairwise(config, region, &[values[0].clone(), max_val], BaseOp::Sub)?; let ex = nonlinearity( config, region, &[sub], &LookupOp::Exp { scale: input_scale }, )?; percent(config, region, &[ex.clone()], input_scale, output_scale) } pub fn range_check_percent<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( config: &BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>;
2], scale: utils::F32, tol: f32, ) -> Result<ValTensor<F>, Box<dyn Error>> { if tol == 0.0 { return enforce_equality(config, region, values); } let mut values = [values[0].clone(), values[1].clone()]; values[0] = region.assign(&config.custom_gates.inputs[0], &values[0])?; values[1] = region.assign(&config.custom_gates.inputs[1], &values[1])?; let total_assigned_0 = values[0].len(); let total_assigned_1 = values[1].len(); let total_assigned = std::cmp::max(total_assigned_0, total_assigned_1); region.increment(total_assigned); let diff = pairwise(config, region, &values, BaseOp::Sub)?; let int_scale = scale.0 as i128; let felt_scale = i128_to_felt(int_scale); let range_check_bracket = std::cmp::min( utils::F32(scale.0), utils::F32(2_f32.powf((F::S - 5) as f32)), ) .0; let range_check_bracket_int = range_check_bracket as i128; let input_scale_ratio = ((scale.0.powf(2.0) / range_check_bracket) * tol) as i128 / 2 * 2; let recip = recip( config, region, &[values[0].clone()], felt_scale, felt_scale * F::from(100), )?; log::debug!("recip: {}", recip.show()); let product = pairwise(config, region, &[diff, recip], BaseOp::Mult)?; log::debug!("product: {}", product.show()); let rebased_product = loop_div(config, region, &[product], i128_to_felt(input_scale_ratio))?; log::debug!("rebased_product: {}", rebased_product.show()); range_check( config, region, &[rebased_product], &(-range_check_bracket_int, range_check_bracket_int), ) }
use super::*; use serde::{Deserialize, Serialize}; use std::error::Error; use crate::{ circuit::{layouts, table::Range, utils}, fieldutils::{felt_to_i128, i128_to_felt}, graph::multiplier_to_scale, tensor::{self, Tensor, TensorError, TensorType}, }; use super::Op; use halo2curves::ff::PrimeField; pub enum LookupOp { Abs, Div { denom: utils::F32, }, Cast { scale: utils::F32, }, ReLU, Max { scale: utils::F32, a: utils::F32, }, Min { scale: utils::F32, a: utils::F32, }, Ceil { scale: utils::F32, }, Floor { scale: utils::F32, }, Round { scale: utils::F32, }, RoundHalfToEven { scale: utils::F32, }, Sqrt { scale: utils::F32, }, Rsqrt { scale: utils::F32, }, Recip { input_scale: utils::F32, output_scale: utils::F32, }, LeakyReLU { slope: utils::F32, }, Sigmoid { scale: utils::F32, }, Ln { scale: utils::F32, }, Exp { scale: utils::F32, }, Cos { scale: utils::F32, }, ACos { scale: utils::F32, }, Cosh { scale: utils::F32, }, ACosh { scale: utils::F32, }, Sin { scale: utils::F32, }, ASin { scale: utils::F32, }, Sinh { scale: utils::F32, }, ASinh { scale: utils::F32, }, Tan { scale: utils::F32, }, ATan { scale: utils::F32, }, Tanh { scale: utils::F32, }, ATanh { scale: utils::F32, }, Erf { scale: utils::F32, }, GreaterThan { a: utils::F32, }, LessThan { a: utils::F32, }, GreaterThanEqual { a: utils::F32, }, LessThanEqual { a: utils::F32, }, Sign, KroneckerDelta, Pow { scale: utils::F32, a: utils::F32, }, HardSwish { scale: uti
ls::F32, }, } impl LookupOp { pub fn bit_range(max_len: usize) -> Range { let range = (max_len - 1) as f64 / 2_f64; let range = range as i128; (-range, range) } pub(crate) fn f<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>( &self, x: &[Tensor<F>], ) -> Result<ForwardResult<F>, TensorError> { let x = x[0].clone().map(|x| felt_to_i128(x)); let res = match &self { LookupOp::Abs => Ok(tensor::ops::abs(&x)?), LookupOp::Ceil { scale } => Ok(tensor::ops::nonlinearities::ceil(&x, scale.into())), LookupOp::Floor { scale } => Ok(tensor::ops::nonlinearities::floor(&x, scale.into())), LookupOp::Round { scale } => Ok(tensor::ops::nonlinearities::round(&x, scale.into())), LookupOp::RoundHalfToEven { scale } => Ok( tensor::ops::nonlinearities::round_half_to_even(&x, scale.into()), ), LookupOp::Pow { scale, a } => Ok(tensor::ops::nonlinearities::pow( &x, scale.0.into(), a.0.into(), )), LookupOp::KroneckerDelta => Ok(tensor::ops::nonlinearities::kronecker_delta(&x)), LookupOp::Max { scale, a } => Ok(tensor::ops::nonlinearities::max( &x, scale.0.into(), a.0.into(), )), LookupOp::Min { scale, a } => Ok(tensor::ops::nonlinearities::min( &x, scale.0.into(), a.0.into(), )), LookupOp::Sign => Ok(tensor::ops::nonlinearities::sign(&x)), LookupOp::LessThan { a } => Ok(tensor::ops::nonlinearities::less_than( &x, f32::from(*a).into(), )), LookupOp::LessThanEqual { a } => Ok(tensor::ops::nonlinearities::less_than_equal( &x, f32::from(*a).into(), )), LookupOp::GreaterThan { a } => Ok(tensor::ops::non
linearities::greater_than( &x, f32::from(*a).into(), )), LookupOp::GreaterThanEqual { a } => Ok( tensor::ops::nonlinearities::greater_than_equal(&x, f32::from(*a).into()), ), LookupOp::Div { denom } => Ok(tensor::ops::nonlinearities::const_div( &x, f32::from(*denom).into(), )), LookupOp::Cast { scale } => Ok(tensor::ops::nonlinearities::const_div( &x, f32::from(*scale).into(), )), LookupOp::Recip { input_scale, output_scale, } => Ok(tensor::ops::nonlinearities::recip( &x, input_scale.into(), output_scale.into(), )), LookupOp::ReLU => Ok(tensor::ops::nonlinearities::leakyrelu(&x, 0_f64)), LookupOp::LeakyReLU { slope: a } => { Ok(tensor::ops::nonlinearities::leakyrelu(&x, a.0.into())) } LookupOp::Sigmoid { scale } => { Ok(tensor::ops::nonlinearities::sigmoid(&x, scale.into())) } LookupOp::Sqrt { scale } => Ok(tensor::ops::nonlinearities::sqrt(&x, scale.into())), LookupOp::Rsqrt { scale } => Ok(tensor::ops::nonlinearities::rsqrt(&x, scale.into())), LookupOp::Erf { scale } => Ok(tensor::ops::nonlinearities::erffunc(&x, scale.into())), LookupOp::Exp { scale } => Ok(tensor::ops::nonlinearities::exp(&x, scale.into())), LookupOp::Ln { scale } => Ok(tensor::ops::nonlinearities::ln(&x, scale.into())), LookupOp::Cos { scale } => Ok(tensor::ops::nonlinearities::cos(&x, scale.into())), LookupOp::ACos { scale } => Ok(tensor::ops::nonlinearities::acos(&x, scale.into())), LookupOp::Cosh { scale } => Ok(tensor::ops::nonlinearities::cosh(&x, scale.into())), LookupOp::ACosh { scale } => Ok(tensor::ops::nonlinearities::acosh(&x,
scale.into())), LookupOp::Sin { scale } => Ok(tensor::ops::nonlinearities::sin(&x, scale.into())), LookupOp::ASin { scale } => Ok(tensor::ops::nonlinearities::asin(&x, scale.into())), LookupOp::Sinh { scale } => Ok(tensor::ops::nonlinearities::sinh(&x, scale.into())), LookupOp::ASinh { scale } => Ok(tensor::ops::nonlinearities::asinh(&x, scale.into())), LookupOp::Tan { scale } => Ok(tensor::ops::nonlinearities::tan(&x, scale.into())), LookupOp::ATan { scale } => Ok(tensor::ops::nonlinearities::atan(&x, scale.into())), LookupOp::ATanh { scale } => Ok(tensor::ops::nonlinearities::atanh(&x, scale.into())), LookupOp::Tanh { scale } => Ok(tensor::ops::nonlinearities::tanh(&x, scale.into())), LookupOp::HardSwish { scale } => { Ok(tensor::ops::nonlinearities::hardswish(&x, scale.into())) } }?; let output = res.map(|x| i128_to_felt(x)); Ok(ForwardResult { output }) } } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for LookupOp { fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { match self { LookupOp::Abs => "ABS".into(), LookupOp::Ceil { scale } => format!("CEIL(scale={})", scale), LookupOp::Floor { scale } => format!("FLOOR(scale={})", scale), LookupOp::Round { scale } => format!("ROUND(scale={})", scale), LookupOp::RoundHalfToEven { scale } => format!("ROUND_HALF_TO_EVEN(scale={})", scale), LookupOp::Pow { a, scale } => format!("POW(scale={}, exponent={})", scale, a), LookupOp::KroneckerDelta => "K_DELTA".into(), LookupOp::Max { scale, a } => format!("MAX(scale={}, a={})", scale, a), LookupOp::Min { scale, a } => format!("MIN(scale={}, a={})", scale, a), LookupOp::Sign => "SIGN".into(), LookupOp::GreaterThan { a } => format!("GREATER_THAN(a={})
", a), LookupOp::GreaterThanEqual { a } => format!("GREATER_THAN_EQUAL(a={})", a), LookupOp::LessThan { a } => format!("LESS_THAN(a={})", a), LookupOp::LessThanEqual { a } => format!("LESS_THAN_EQUAL(a={})", a), LookupOp::Recip { input_scale, output_scale, } => format!( "RECIP(input_scale={}, output_scale={})", input_scale, output_scale ), LookupOp::Div { denom, .. } => format!("DIV(denom={})", denom), LookupOp::Cast { scale } => format!("CAST(scale={})", scale), LookupOp::Ln { scale } => format!("LN(scale={})", scale), LookupOp::ReLU => "RELU".to_string(), LookupOp::LeakyReLU { slope: a } => format!("L_RELU(slope={})", a), LookupOp::Sigmoid { scale } => format!("SIGMOID(scale={})", scale), LookupOp::Sqrt { scale } => format!("SQRT(scale={})", scale), LookupOp::Erf { scale } => format!("ERF(scale={})", scale), LookupOp::Rsqrt { scale } => format!("RSQRT(scale={})", scale), LookupOp::Exp { scale } => format!("EXP(scale={})", scale), LookupOp::Tan { scale } => format!("TAN(scale={})", scale), LookupOp::ATan { scale } => format!("ATAN(scale={})", scale), LookupOp::Tanh { scale } => format!("TANH(scale={})", scale), LookupOp::ATanh { scale } => format!("ATANH(scale={})", scale), LookupOp::Cos { scale } => format!("COS(scale={})", scale), LookupOp::ACos { scale } => format!("ACOS(scale={})", scale), LookupOp::Cosh { scale } => format!("COSH(scale={})", scale), LookupOp::ACosh { scale } => format!("ACOSH(scale={})", scale), LookupOp::Sin { scale } => format!("SIN(scale={})", scale), LookupOp::ASin { scale } => format!("ASIN(scale={})", scale), LookupOp::Sinh { scale } => format!("SINH(scale={})", scale), LookupOp::ASinh { scal
e } => format!("ASINH(scale={})", scale), LookupOp::HardSwish { scale } => format!("HARDSWISH(scale={})", scale), } } fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { Ok(Some(layouts::nonlinearity( config, region, values[..].try_into()?, self, )?)) } fn out_scale(&self, inputs_scale: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { let scale = match self { LookupOp::Cast { scale } => { let in_scale = inputs_scale[0]; in_scale + multiplier_to_scale(1. / scale.0 as f64) } LookupOp::Recip { output_scale, .. } => multiplier_to_scale(output_scale.into()), LookupOp::Sign | LookupOp::GreaterThan { .. } | LookupOp::LessThan { .. } | LookupOp::GreaterThanEqual { .. } | LookupOp::LessThanEqual { .. } | LookupOp::KroneckerDelta => 0, _ => inputs_scale[0], }; Ok(scale) } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } }
use std::{any::Any, error::Error}; use serde::{Deserialize, Serialize}; use crate::{ graph::quantize_tensor, tensor::{self, Tensor, TensorType, ValTensor}, }; use halo2curves::ff::PrimeField; use self::{lookup::LookupOp, region::RegionCtx}; pub mod base; pub mod chip; pub mod hybrid; pub mod layouts; pub mod lookup; pub mod poly; pub mod region; pub struct ForwardResult<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> { pub(crate) output: Tensor<F>, } pub trait Op<F: PrimeField + TensorType + PartialOrd + std::hash::Hash>: std::fmt::Debug + Send + Sync + Any { fn as_string(&self) -> String; fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>>; fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>>; fn requires_homogenous_input_scales(&self) -> Vec<usize> { vec![] } fn is_input(&self) -> bool { false } fn is_constant(&self) -> bool { false } fn clone_dyn(&self) -> Box<dyn Op<F>>; fn as_any(&self) -> &dyn Any; } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Clone for Box<dyn Op<F>> { fn clone(&self) -> Self { self.clone_dyn() } } pub enum InputType { Bool, F16, F32, F64, Int, TDim, } impl InputType { pub fn is_integer(&self) -> bool { matches!(self, InputType::Int | InputType::TDim | InputType::Bool) } pub fn roundtrip<T: num::ToPrimitive + num::FromPrimitive + Clone>(&self, input: &mut T) { match self { InputType::Bool => { let boolean_input = input.clone().to_i64().unwrap(); assert!(boolean_input == 0 || boolean_input == 1); *input = T::from_i64(boolean_input).unwrap(); } Inpu
tType::F16 => { let f32_input = input.clone().to_f32().unwrap(); *input = T::from_f32(f32_input).unwrap(); } InputType::F32 => { let f32_input = input.clone().to_f32().unwrap(); *input = T::from_f32(f32_input).unwrap(); } InputType::F64 => { let f64_input = input.clone().to_f64().unwrap(); *input = T::from_f64(f64_input).unwrap(); } InputType::Int | InputType::TDim => { let int_input = input.clone().to_i128().unwrap(); *input = T::from_i128(int_input).unwrap(); } } } } pub
struct Input { pub scale: crate::Scale, pub datum_type: InputType, } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Input { fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { Ok(self.scale) } fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { "Input".into() } fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { let value = values[0].clone(); if !value.all_prev_assigned() { match self.datum_type { InputType::Bool => { log::debug!("constraining input to be boolean"); Ok(Some(super::layouts::boolean_identity( config, region, values[..].try_into()?, true, )?)) } _ => Ok(Some(super::layouts::identity( config, region, values[..].try_into()?, )?)), } } else { Ok(Some(value)) } } fn is_input(&self) -> bool { true } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } } pub struct Unknown; impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Op<F> for Unknown { fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { Ok(0) } fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { "Unknown".into() } fn layout( &self, _: &mut crate::circuit::BaseConfig<F>, _: &mut RegionCtx<F>, _: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { Err(Box::new(super::CircuitError::Unsu
pportedOp)) } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } } pub struct Constant<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> { pub quantized_values: Tensor<F>, pub raw_values: Tensor<f32>, pub pre_assigned_val: Option<ValTensor<F>>, } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> Constant<F> { pub fn new(quantized_values: Tensor<F>, raw_values: Tensor<f32>) -> Self { Self { quantized_values, raw_values, pre_assigned_val: None, } } pub fn rebase_scale(&mut self, new_scale: crate::Scale) -> Result<(), Box<dyn Error>> { let visibility = self.quantized_values.visibility().unwrap(); self.quantized_values = quantize_tensor(self.raw_values.clone(), new_scale, &visibility)?; Ok(()) } pub
fn empty_raw_value(&mut self) { self.raw_values = Tensor::new(None, &[0]).unwrap(); } pub
fn pre_assign(&mut self, val: ValTensor<F>) { self.pre_assigned_val = Some(val) } } impl< F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>, > Op<F> for Constant<F> { fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { format!("CONST (scale={})", self.quantized_values.scale().unwrap()) } fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, _: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { let value = if let Some(value) = &self.pre_assigned_val { value.clone() } else { self.quantized_values.clone().try_into()? }; Ok(Some(layouts::identity(config, region, &[value])?)) } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } fn out_scale(&self, _: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { Ok(self.quantized_values.scale().unwrap()) } fn is_constant(&self) -> bool { true } }
use crate::{ circuit::layouts, tensor::{self, Tensor, TensorError}, }; use super::{base::BaseOp, *}; pub enum PolyOp { GatherElements { dim: usize, constant_idx: Option<Tensor<usize>>, }, GatherND { batch_dims: usize, indices: Option<Tensor<usize>>, }, ScatterElements { dim: usize, constant_idx: Option<Tensor<usize>>, }, ScatterND { constant_idx: Option<Tensor<usize>>, }, MultiBroadcastTo { shape: Vec<usize>, }, Einsum { equation: String, }, Conv { padding: Vec<(usize, usize)>, stride: Vec<usize>, }, Downsample { axis: usize, stride: usize, modulo: usize, }, DeConv { padding: Vec<(usize, usize)>, output_padding: Vec<usize>, stride: Vec<usize>, }, Add, Sub, Neg, Mult, Identity { out_scale: Option<crate::Scale>, }, Reshape(Vec<usize>), MoveAxis { source: usize, destination: usize, }, Flatten(Vec<usize>), Pad(Vec<(usize, usize)>), Sum { axes: Vec<usize>, }, MeanOfSquares { axes: Vec<usize>, }, Prod { axes: Vec<usize>, len_prod: usize, }, Pow(u32), Concat { axis: usize, }, Slice { axis: usize, start: usize, end: usize, }, Iff, Resize { scale_factor: Vec<usize>, }, Not, And, Or, Xor, Trilu { upper: bool, k: i32, }, } impl< F: PrimeField + TensorType + PartialOrd + std::hash::Hash + Serialize + for<'de> Deserialize<'de>, > Op<F> for PolyOp { fn as_any(&self) -> &dyn Any { self } fn as_string(&self) -> String { match &self { PolyOp::GatherElements { dim, constant_idx } => format!( "GATHERELEMENTS (dim={}, constant_idx{})",
dim, constant_idx.is_some() ), PolyOp::GatherND { batch_dims, indices, } => format!( "GATHERND (batch_dims={}, constant_idx{})", batch_dims, indices.is_some() ), PolyOp::MeanOfSquares { axes } => format!("MEANOFSQUARES (axes={:?})", axes), PolyOp::ScatterElements { dim, constant_idx } => format!( "SCATTERELEMENTS (dim={}, constant_idx{})", dim, constant_idx.is_some() ), PolyOp::ScatterND { constant_idx } => { format!("SCATTERND (constant_idx={})", constant_idx.is_some()) } PolyOp::MultiBroadcastTo { shape } => format!("MULTIBROADCASTTO (shape={:?})", shape), PolyOp::MoveAxis { .. } => "MOVEAXIS".into(), PolyOp::Downsample { .. } => "DOWNSAMPLE".into(), PolyOp::Resize { .. } => "RESIZE".into(), PolyOp::Iff => "IFF".into(), PolyOp::Einsum { equation, .. } => format!("EINSUM {}", equation), PolyOp::Identity { out_scale } => { format!("IDENTITY (out_scale={:?})", out_scale) } PolyOp::Reshape(shape) => format!("RESHAPE (shape={:?})", shape), PolyOp::Flatten(_) => "FLATTEN".into(), PolyOp::Pad(pads) => format!("PAD (pads={:?})", pads), PolyOp::Add => "ADD".into(), PolyOp::Mult => "MULT".into(), PolyOp::Sub => "SUB".into(), PolyOp::Sum { axes } => format!("SUM (axes={:?})", axes), PolyOp::Prod { .. } => "PROD".into(), PolyOp::Pow(_) => "POW".into(), PolyOp::Conv { stride, padding } => { format!("CONV (stride={:?}, padding={:?})", stride, padding) } PolyOp::DeConv { stride, padding, output_padding, } => { format!(
"DECONV (stride={:?}, padding={:?}, output_padding={:?})", stride, padding, output_padding ) } PolyOp::Concat { axis } => format!("CONCAT (axis={})", axis), PolyOp::Slice { axis, start, end } => { format!("SLICE (axis={}, start={}, end={})", axis, start, end) } PolyOp::Neg => "NEG".into(), PolyOp::Not => "NOT".into(), PolyOp::And => "AND".into(), PolyOp::Or => "OR".into(), PolyOp::Xor => "XOR".into(), PolyOp::Trilu { upper, k } => format!("TRILU (upper={}, k={})", upper, k), } } fn layout( &self, config: &mut crate::circuit::BaseConfig<F>, region: &mut RegionCtx<F>, values: &[ValTensor<F>], ) -> Result<Option<ValTensor<F>>, Box<dyn Error>> { Ok(Some(match self { PolyOp::MultiBroadcastTo { shape } => { layouts::expand(config, region, values[..].try_into()?, shape)? } PolyOp::MeanOfSquares { axes } => { layouts::mean_of_squares_axes(config, region, values[..].try_into()?, axes)? } PolyOp::Xor => layouts::xor(config, region, values[..].try_into()?)?, PolyOp::Or => layouts::or(config, region, values[..].try_into()?)?, PolyOp::And => layouts::and(config, region, values[..].try_into()?)?, PolyOp::Not => layouts::not(config, region, values[..].try_into()?)?, PolyOp::MoveAxis { source, destination, } => layouts::move_axis(values[..].try_into()?, *source, *destination)?, PolyOp::Downsample { axis, stride, modulo, } => layouts::downsample(config, region, values[..].try_into()?, axis, stride, modulo)?, PolyOp::Resize { scale_factor } => { layouts::resize(config, region, values[..].try_into()?, scale
_factor)? } PolyOp::Neg => layouts::neg(config, region, values[..].try_into()?)?, PolyOp::Iff => layouts::iff(config, region, values[..].try_into()?)?, PolyOp::Einsum { equation } => layouts::einsum(config, region, values, equation)?, PolyOp::Sum { axes } => { layouts::sum_axes(config, region, values[..].try_into()?, axes)? } PolyOp::Prod { axes, .. } => { layouts::prod_axes(config, region, values[..].try_into()?, axes)? } PolyOp::Conv { padding, stride } => { layouts::conv(config, region, values[..].try_into()?, padding, stride)? } PolyOp::GatherElements { dim, constant_idx } => { if let Some(idx) = constant_idx { tensor::ops::gather_elements(values[0].get_inner_tensor()?, idx, *dim)?.into() } else { layouts::gather_elements(config, region, values[..].try_into()?, *dim)?.0 } } PolyOp::GatherND { batch_dims, indices, } => { if let Some(idx) = indices { tensor::ops::gather_nd(values[0].get_inner_tensor()?, idx, *batch_dims)?.into() } else { layouts::gather_nd(config, region, values[..].try_into()?, *batch_dims)?.0 } } PolyOp::ScatterElements { dim, constant_idx } => { if let Some(idx) = constant_idx { tensor::ops::scatter( values[0].get_inner_tensor()?, idx, values[1].get_inner_tensor()?, *dim, )? .into() } else { layouts::scatter_elements(config, region, values[..].try_into()?, *dim)? } } PolyOp::ScatterND { constant_idx } => {
if let Some(idx) = constant_idx { tensor::ops::scatter_nd( values[0].get_inner_tensor()?, idx, values[1].get_inner_tensor()?, )? .into() } else { layouts::scatter_nd(config, region, values[..].try_into()?)? } } PolyOp::DeConv { padding, output_padding, stride, } => layouts::deconv( config, region, values[..].try_into()?, padding, output_padding, stride, )?, PolyOp::Add => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Add)?, PolyOp::Sub => layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Sub)?, PolyOp::Mult => { layouts::pairwise(config, region, values[..].try_into()?, BaseOp::Mult)? } PolyOp::Identity { .. } => layouts::identity(config, region, values[..].try_into()?)?, PolyOp::Reshape(d) | PolyOp::Flatten(d) => layouts::reshape(values[..].try_into()?, d)?, PolyOp::Pad(p) => { if values.len() != 1 { return Err(Box::new(TensorError::DimError( "Pad operation requires a single input".to_string(), ))); } let mut input = values[0].clone(); input.pad(p.clone(), 0)?; input } PolyOp::Pow(exp) => layouts::pow(config, region, values[..].try_into()?, *exp)?, PolyOp::Concat { axis } => layouts::concat(values[..].try_into()?, axis)?, PolyOp::Slice { axis, start, end } => { layouts::slice(config, region, values[..].try_into()?, axis, start, end)? } PolyOp::Trilu { upper, k } => {
layouts::trilu(config, region, values[..].try_into()?, k, upper)? } })) } fn out_scale(&self, in_scales: Vec<crate::Scale>) -> Result<crate::Scale, Box<dyn Error>> { let scale = match self { PolyOp::MeanOfSquares { .. } => 2 * in_scales[0], PolyOp::Xor | PolyOp::Or | PolyOp::And | PolyOp::Not => 0, PolyOp::Iff => in_scales[1], PolyOp::Einsum { .. } => { let mut scale = in_scales[0]; for s in in_scales.iter().skip(1) { scale += *s; } scale } PolyOp::Prod { len_prod, .. } => in_scales[0] * (*len_prod as crate::Scale), PolyOp::Sum { .. } => in_scales[0], PolyOp::Conv { .. } => { let input_scale = in_scales[0]; let kernel_scale = in_scales[1]; let output_scale = input_scale + kernel_scale; if in_scales.len() == 3 { let bias_scale = in_scales[2]; assert_eq!(output_scale, bias_scale); } output_scale } PolyOp::DeConv { .. } => { let input_scale = in_scales[0]; let kernel_scale = in_scales[1]; let output_scale = input_scale + kernel_scale; if in_scales.len() == 3 { let bias_scale = in_scales[2]; assert_eq!(output_scale, bias_scale); } output_scale } PolyOp::Add => { let scale_a = in_scales[0]; let scale_b = in_scales[1]; assert_eq!(scale_a, scale_b); scale_a } PolyOp::Sub => in_scales[0], PolyOp::Mult => { let mut scale = in_scales[0]; scale += in_scales[1]; scale } PolyOp::Reshape(_) | PolyOp::Flatten(_) => in_
scales[0], PolyOp::Pow(pow) => in_scales[0] * (*pow as crate::Scale), PolyOp::Identity { out_scale } => out_scale.unwrap_or(in_scales[0]), _ => in_scales[0], }; Ok(scale) } fn requires_homogenous_input_scales(&self) -> Vec<usize> { if matches!(self, PolyOp::Add { .. } | PolyOp::Sub) { vec![0, 1] } else if matches!(self, PolyOp::Iff) { vec![1, 2] } else if matches!(self, PolyOp::Concat { .. }) { (0..100).collect() } else if matches!(self, PolyOp::ScatterElements { .. }) | matches!(self, PolyOp::ScatterND { .. }) { vec![0, 2] } else { vec![] } } fn clone_dyn(&self) -> Box<dyn Op<F>> { Box::new(self.clone()) } }
use crate::{ circuit::table::Range, tensor::{Tensor, TensorError, TensorType, ValTensor, ValType, VarTensor}, }; use colored::Colorize; use halo2_proofs::{ circuit::Region, plonk::{Error, Selector}, }; use halo2curves::ff::PrimeField; use portable_atomic::AtomicI128 as AtomicInt; use std::{ cell::RefCell, collections::{HashMap, HashSet}, sync::{ atomic::{AtomicUsize, Ordering}, Arc, Mutex, }, }; use super::lookup::LookupOp; pub type ConstantsMap<F> = HashMap<F, ValType<F>>; pub
struct DynamicLookupIndex { index: usize, col_coord: usize, } impl DynamicLookupIndex { pub fn new(index: usize, col_coord: usize) -> DynamicLookupIndex { DynamicLookupIndex { index, col_coord } } pub fn index(&self) -> usize { self.index } pub fn col_coord(&self) -> usize { self.col_coord } pub
fn update(&mut self, other: &DynamicLookupIndex) { self.index += other.index; self.col_coord += other.col_coord; } } pub
struct ShuffleIndex { index: usize, col_coord: usize, } impl ShuffleIndex { pub fn new(index: usize, col_coord: usize) -> ShuffleIndex { ShuffleIndex { index, col_coord } } pub fn index(&self) -> usize { self.index } pub fn col_coord(&self) -> usize { self.col_coord } pub
fn update(&mut self, other: &ShuffleIndex) { self.index += other.index; self.col_coord += other.col_coord; } } pub enum RegionError { Wrapped(String), } impl From<String> for RegionError { fn from(e: String) -> Self { Self::Wrapped(e) } } impl From<&str> for RegionError { fn from(e: &str) -> Self { Self::Wrapped(e.to_string()) } } impl From<TensorError> for RegionError { fn from(e: TensorError) -> Self { Self::Wrapped(format!("{:?}", e)) } } impl From<Error> for RegionError { fn from(e: Error) -> Self { Self::Wrapped(format!("{:?}", e)) } } impl From<Box<dyn std::error::Error>> for RegionError { fn from(e: Box<dyn std::error::Error>) -> Self { Self::Wrapped(format!("{:?}", e)) } } pub struct RegionCtx<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> { region: Option<RefCell<Region<'a, F>>>, row: usize, linear_coord: usize, num_inner_cols: usize, dynamic_lookup_index: DynamicLookupIndex, shuffle_index: ShuffleIndex, used_lookups: HashSet<LookupOp>, used_range_checks: HashSet<Range>, max_lookup_inputs: i128, min_lookup_inputs: i128, max_range_size: i128, witness_gen: bool, assigned_constants: ConstantsMap<F>, } impl<'a, F: PrimeField + TensorType + PartialOrd + std::hash::Hash> RegionCtx<'a, F> { pub
fn debug_report(&self) { log::debug!( "(rows={}, coord={}, constants={}, max_lookup_inputs={}, min_lookup_inputs={}, max_range_size={}, dynamic_lookup_col_coord={}, shuffle_col_coord={})", self.row().to_string().blue(), self.linear_coord().to_string().yellow(), self.total_constants().to_string().red(), self.max_lookup_inputs().to_string().green(), self.min_lookup_inputs().to_string().green(), self.max_range_size().to_string().green(), self.dynamic_lookup_col_coord().to_string().green(), self.shuffle_col_coord().to_string().green()); } pub fn assigned_constants(&self) -> &ConstantsMap<F> { &self.assigned_constants } pub
fn update_constants(&mut self, constants: ConstantsMap<F>) { self.assigned_constants.extend(constants); } pub
fn increment_dynamic_lookup_index(&mut self, n: usize) { self.dynamic_lookup_index.index += n; } pub
fn increment_dynamic_lookup_col_coord(&mut self, n: usize) { self.dynamic_lookup_index.col_coord += n; } pub
fn increment_shuffle_index(&mut self, n: usize) { self.shuffle_index.index += n; } pub
fn increment_shuffle_col_coord(&mut self, n: usize) { self.shuffle_index.col_coord += n; } pub fn witness_gen(&self) -> bool { self.witness_gen } pub fn new(region: Region<'a, F>, row: usize, num_inner_cols: usize) -> RegionCtx<'a, F> { let region = Some(RefCell::new(region)); let linear_coord = row * num_inner_cols; RegionCtx { region, num_inner_cols, row, linear_coord, dynamic_lookup_index: DynamicLookupIndex::default(), shuffle_index: ShuffleIndex::default(), used_lookups: HashSet::new(), used_range_checks: HashSet::new(), max_lookup_inputs: 0, min_lookup_inputs: 0, max_range_size: 0, witness_gen: true, assigned_constants: HashMap::new(), } } pub fn new_with_constants( region: Region<'a, F>, row: usize, num_inner_cols: usize, constants: ConstantsMap<F>, ) -> RegionCtx<'a, F> { let mut new_self = Self::new(region, row, num_inner_cols); new_self.assigned_constants = constants; new_self } pub fn from_wrapped_region( region: Option<RefCell<Region<'a, F>>>, row: usize, num_inner_cols: usize, dynamic_lookup_index: DynamicLookupIndex, shuffle_index: ShuffleIndex, ) -> RegionCtx<'a, F> { let linear_coord = row * num_inner_cols; RegionCtx { region, num_inner_cols, linear_coord, row, dynamic_lookup_index, shuffle_index, used_lookups: HashSet::new(), used_range_checks: HashSet::new(), max_lookup_inputs: 0, min_lookup_inputs: 0, max_range_size: 0, witness_gen: false, assigned_constants: HashMap::new(), } } pub fn new_dummy(row: usize, num_inner_cols: usize, witness_gen: bool
) -> RegionCtx<'a, F> { let region = None; let linear_coord = row * num_inner_cols; RegionCtx { region, num_inner_cols, linear_coord, row, dynamic_lookup_index: DynamicLookupIndex::default(), shuffle_index: ShuffleIndex::default(), used_lookups: HashSet::new(), used_range_checks: HashSet::new(), max_lookup_inputs: 0, min_lookup_inputs: 0, max_range_size: 0, witness_gen, assigned_constants: HashMap::new(), } } pub fn new_dummy_with_linear_coord( row: usize, linear_coord: usize, num_inner_cols: usize, witness_gen: bool, ) -> RegionCtx<'a, F> { let region = None; RegionCtx { region, num_inner_cols, linear_coord, row, dynamic_lookup_index: DynamicLookupIndex::default(), shuffle_index: ShuffleIndex::default(), used_lookups: HashSet::new(), used_range_checks: HashSet::new(), max_lookup_inputs: 0, min_lookup_inputs: 0, max_range_size: 0, witness_gen, assigned_constants: HashMap::new(), } } pub fn apply_in_loop<T: TensorType + Send + Sync>( &mut self, output: &mut Tensor<T>, inner_loop_function: impl Fn(usize, &mut RegionCtx<'a, F>) -> Result<T, RegionError> + Send + Sync, ) -> Result<(), RegionError> { if self.is_dummy() { self.dummy_loop(output, inner_loop_function)?; } else { self.real_loop(output, inner_loop_function)?; } Ok(()) } pub fn real_loop<T: TensorType + Send + Sync>( &mut self, output: &mut Tensor<T>, inner_loop_function: impl Fn(usize, &mut RegionCtx<'a, F>) -> Result<T, RegionError>, ) -> Result<(), RegionError> { output
.iter_mut() .enumerate() .map(|(i, o)| { *o = inner_loop_function(i, self)?; Ok(()) }) .collect::<Result<Vec<_>, RegionError>>()?; Ok(()) } pub fn dummy_loop<T: TensorType + Send + Sync>( &mut self, output: &mut Tensor<T>, inner_loop_function: impl Fn(usize, &mut RegionCtx<'a, F>) -> Result<T, RegionError> + Send + Sync, ) -> Result<(), RegionError> { let row = AtomicUsize::new(self.row()); let linear_coord = AtomicUsize::new(self.linear_coord()); let max_lookup_inputs = AtomicInt::new(self.max_lookup_inputs()); let min_lookup_inputs = AtomicInt::new(self.min_lookup_inputs()); let lookups = Arc::new(Mutex::new(self.used_lookups.clone())); let range_checks = Arc::new(Mutex::new(self.used_range_checks.clone())); let dynamic_lookup_index = Arc::new(Mutex::new(self.dynamic_lookup_index.clone())); let shuffle_index = Arc::new(Mutex::new(self.shuffle_index.clone())); let constants = Arc::new(Mutex::new(self.assigned_constants.clone())); *output = output .par_enum_map(|idx, _| { let starting_offset = row.load(Ordering::SeqCst); let starting_linear_coord = linear_coord.load(Ordering::SeqCst); let mut local_reg = Self::new_dummy_with_linear_coord( starting_offset, starting_linear_coord, self.num_inner_cols, self.witness_gen, ); let res = inner_loop_function(idx, &mut local_reg); row.fetch_add(local_reg.row() - starting_offset, Ordering::SeqCst); linear_coord.fetch_add( local_reg.linear_coord() - starting_linear_coord, Ordering::SeqCst, );
max_lookup_inputs.fetch_max(local_reg.max_lookup_inputs(), Ordering::SeqCst); min_lookup_inputs.fetch_min(local_reg.min_lookup_inputs(), Ordering::SeqCst); let mut lookups = lookups.lock().unwrap(); lookups.extend(local_reg.used_lookups()); let mut range_checks = range_checks.lock().unwrap(); range_checks.extend(local_reg.used_range_checks()); let mut dynamic_lookup_index = dynamic_lookup_index.lock().unwrap(); dynamic_lookup_index.update(&local_reg.dynamic_lookup_index); let mut shuffle_index = shuffle_index.lock().unwrap(); shuffle_index.update(&local_reg.shuffle_index); let mut constants = constants.lock().unwrap(); constants.extend(local_reg.assigned_constants); res }) .map_err(|e| RegionError::from(format!("dummy_loop: {:?}", e)))?; self.linear_coord = linear_coord.into_inner(); { self.max_lookup_inputs = max_lookup_inputs.into_inner(); self.min_lookup_inputs = min_lookup_inputs.into_inner(); } self.row = row.into_inner(); self.used_lookups = Arc::try_unwrap(lookups) .map_err(|e| RegionError::from(format!("dummy_loop: failed to get lookups: {:?}", e)))? .into_inner() .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get lookups: {:?}", e)) })?; self.used_range_checks = Arc::try_unwrap(range_checks) .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get range checks: {:?}", e)) })? .into_inner() .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get range checks: {:?}", e)) })?; self.dynamic_lookup_index = Arc::try_unwrap(dynamic_l
ookup_index) .map_err(|e| { RegionError::from(format!( "dummy_loop: failed to get dynamic lookup index: {:?}", e )) })? .into_inner() .map_err(|e| { RegionError::from(format!( "dummy_loop: failed to get dynamic lookup index: {:?}", e )) })?; self.shuffle_index = Arc::try_unwrap(shuffle_index) .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get shuffle index: {:?}", e)) })? .into_inner() .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get shuffle index: {:?}", e)) })?; self.assigned_constants = Arc::try_unwrap(constants) .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get constants: {:?}", e)) })? .into_inner() .map_err(|e| { RegionError::from(format!("dummy_loop: failed to get constants: {:?}", e)) })?; Ok(()) } pub fn update_max_min_lookup_inputs( &mut self, inputs: &[ValTensor<F>], ) -> Result<(), Box<dyn std::error::Error>> { let (mut min, mut max) = (0, 0); for i in inputs { max = max.max(i.get_int_evals()?.into_iter().max().unwrap_or_default()); min = min.min(i.get_int_evals()?.into_iter().min().unwrap_or_default()); } self.max_lookup_inputs = self.max_lookup_inputs.max(max); self.min_lookup_inputs = self.min_lookup_inputs.min(min); Ok(()) } pub fn update_max_min_lookup_range( &mut self, range: Range, ) -> Result<(), Box<dyn std::error::Error>> { if range.0 > range.1 { return Err(format!("update_max_min_lookup_range: invalid range {:?}", range).into()); } let range_size = (range.1 -
range.0).abs(); self.max_range_size = self.max_range_size.max(range_size); Ok(()) } pub fn is_dummy(&self) -> bool { self.region.is_none() } pub fn add_used_lookup( &mut self, lookup: LookupOp, inputs: &[ValTensor<F>], ) -> Result<(), Box<dyn std::error::Error>> { self.used_lookups.insert(lookup); self.update_max_min_lookup_inputs(inputs) } pub fn add_used_range_check(&mut self, range: Range) -> Result<(), Box<dyn std::error::Error>> { self.used_range_checks.insert(range); self.update_max_min_lookup_range(range) } pub fn row(&self) -> usize { self.row } pub fn linear_coord(&self) -> usize { self.linear_coord } pub fn total_constants(&self) -> usize { self.assigned_constants.len() } pub fn dynamic_lookup_index(&self) -> usize { self.dynamic_lookup_index.index } pub fn dynamic_lookup_col_coord(&self) -> usize { self.dynamic_lookup_index.col_coord } pub fn shuffle_index(&self) -> usize { self.shuffle_index.index } pub fn shuffle_col_coord(&self) -> usize { self.shuffle_index.col_coord } pub fn used_lookups(&self) -> HashSet<LookupOp> { self.used_lookups.clone() } pub fn used_range_checks(&self) -> HashSet<Range> { self.used_range_checks.clone() } pub fn max_lookup_inputs(&self) -> i128 { self.max_lookup_inputs } pub fn min_lookup_inputs(&self) -> i128 { self.min_lookup_inputs } pub fn max_range_size(&self) -> i128 { self.max_range_size } pub fn assign( &mut self, var: &VarTensor, values: &ValTensor<F>, ) -> Result<ValTensor<F>, Error> { if let Some(region) = &self.region { var.assign( &mut region.borrow_mut(), self.linear_coord,
values, &mut self.assigned_constants, ) } else { if !values.is_instance() { let values_map = values.create_constants_map_iterator(); self.assigned_constants.extend(values_map); } Ok(values.clone()) } } pub fn combined_dynamic_shuffle_coord(&self) -> usize { self.dynamic_lookup_col_coord() + self.shuffle_col_coord() } pub fn assign_dynamic_lookup( &mut self, var: &VarTensor, values: &ValTensor<F>, ) -> Result<ValTensor<F>, Error> { if let Some(region) = &self.region { var.assign( &mut region.borrow_mut(), self.combined_dynamic_shuffle_coord(), values, &mut self.assigned_constants, ) } else { if !values.is_instance() { let values_map = values.create_constants_map_iterator(); self.assigned_constants.extend(values_map); } Ok(values.clone()) } } pub fn assign_shuffle( &mut self, var: &VarTensor, values: &ValTensor<F>, ) -> Result<ValTensor<F>, Error> { self.assign_dynamic_lookup(var, values) } pub fn assign_with_omissions( &mut self, var: &VarTensor, values: &ValTensor<F>, ommissions: &HashSet<&usize>, ) -> Result<ValTensor<F>, Error> { if let Some(region) = &self.region { var.assign_with_omissions( &mut region.borrow_mut(), self.linear_coord, values, ommissions, &mut self.assigned_constants, ) } else { let inner_tensor = values.get_inner_tensor().unwrap(); let mut values_map = values.create_constants_map(); for o in ommissions { if let ValType::Constant(value) = inner_tensor.get_flat_index(**o)
{ values_map.remove(&value); } } self.assigned_constants.extend(values_map); Ok(values.clone()) } } pub fn assign_with_duplication( &mut self, var: &VarTensor, values: &ValTensor<F>, check_mode: &crate::circuit::CheckMode, single_inner_col: bool, ) -> Result<(ValTensor<F>, usize), Error> { if let Some(region) = &self.region { let (res, len) = var.assign_with_duplication( &mut region.borrow_mut(), self.row, self.linear_coord, values, check_mode, single_inner_col, &mut self.assigned_constants, )?; Ok((res, len)) } else { let (_, len) = var.dummy_assign_with_duplication( self.row, self.linear_coord, values, single_inner_col, &mut self.assigned_constants, )?; Ok((values.clone(), len)) } } pub fn enable(&mut self, selector: Option<&Selector>, offset: usize) -> Result<(), Error> { match &self.region { Some(region) => selector.unwrap().enable(&mut region.borrow_mut(), offset), None => Ok(()), } } pub fn constrain_equal(&mut self, a: &ValTensor<F>, b: &ValTensor<F>) -> Result<(), Error> { if let Some(region) = &self.region { let a = a.get_inner_tensor().unwrap(); let b = b.get_inner_tensor().unwrap(); assert_eq!(a.len(), b.len()); a.iter().zip(b.iter()).try_for_each(|(a, b)| { let a = a.get_prev_assigned(); let b = b.get_prev_assigned(); if let (Some(a), Some(b)) = (&a, &b) { region.borrow_mut().constrain_equal(a.cell(), b.cell()) } else if a.is_some() || b.is_some()
{ log::error!( "constrain_equal: one of the tensors is assigned and the other is not" ); return Err(Error::Synthesis); } else { Ok(()) } }) } else { Ok(()) } } pub
fn next(&mut self) { self.linear_coord += 1; if self.linear_coord % self.num_inner_cols == 0 { self.row += 1; } } pub
fn increment(&mut self, n: usize) { for _ in 0..n { self.next() } } pub fn flush(&mut self) -> Result<(), Box<dyn std::error::Error>> { let remainder = self.linear_coord % self.num_inner_cols; if remainder != 0 { let diff = self.num_inner_cols - remainder; self.increment(diff); } if self.linear_coord % self.num_inner_cols != 0 { return Err("flush: linear coord is not aligned with the next row".into()); } Ok(()) } }
use std::{error::Error, marker::PhantomData}; use halo2curves::ff::PrimeField; use halo2_proofs::{ circuit::{Layouter, Value}, plonk::{ConstraintSystem, Expression, TableColumn}, }; use log::{debug, warn}; use maybe_rayon::prelude::{IntoParallelIterator, ParallelIterator}; use crate::{ circuit::CircuitError, fieldutils::i128_to_felt, tensor::{Tensor, TensorType}, }; use crate::circuit::lookup::LookupOp; pub type Range = (i128, i128); pub const RANGE_MULTIPLIER: i128 = 2; pub const RESERVED_BLINDING_ROWS_PAD: usize = 3; pub struct SelectorConstructor<F: PrimeField> { pub degree: usize, _marker: PhantomData<F>, } impl<F: PrimeField> SelectorConstructor<F> { pub fn new(degree: usize) -> Self { Self { degree, _marker: PhantomData, } } pub fn get_expr_at_idx(&self, i: usize, expr: Expression<F>) -> Expression<F> { let indices = 0..self.degree; indices .into_par_iter() .filter(|x| *x != i) .map(|i| { if i == 0 { expr.clone() } else { (Expression::Constant(F::from(i as u64))) - expr.clone() } }) .reduce(|| Expression::Constant(F::from(1_u64)), |acc, x| acc * x) } pub fn get_selector_val_at_idx(&self, i: usize) -> F { let indices = 0..self.degree; indices .into_par_iter() .filter(|x| *x != i) .map(|x| { if x == 0 { F::from(i as u64) } else { F::from(x as u64) - F::from(i as u64) } }) .product() } } pub struct Table<F: PrimeField> { pub nonlinearity: LookupOp, pub table_inputs: Vec<TableColumn>, pub col_size: usize, pub table_outputs: Vec<TableColumn>, pub selector_constructor: SelectorConstructor<F>, pub is_assign