text
stringlengths
1
2.05k
class CausalSelfAttention(nn.Module): def __init__(self, config): super().__init__() assert config.n_embd % config.n_head == 0 self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd, bias=config.bias) self.c_proj = nn.Linear(config.n_embd, config.n_embd, bias=config.bias) self.attn_dropout = nn.Dropout(config.dropout) self.resid_dropout = nn.Dropout(config.dropout) self.n_head = config.n_head self.n_embd = config.n_embd self.dropout = config.dropout self.register_buffer("bias", torch.tril(torch.ones(config.block_size, config.block_size)) .view(1, 1, config.block_size, config.block_size)) def forward(self, x): B, T, C = x.size() q, k, v = self.c_attn(x).split(self.n_embd, dim=2) k = k.view(B, T, self.n_head, C q = q.view(B, T, self.n_head, C v = v.view(B, T, self.n_head, C att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1))) att = att.masked_fill(self.bias[:,:,:T,:T] == 0, float(-10)) att = F.softmax(att, dim=-1) att = self.attn_dropout(att) y = att @ v y = y.transpose(1, 2).contiguous().view(B, T, C) y = self.resid_dropout(self.c_proj(y)) return y
class MLP(nn.Module): def __init__(self, config): super().__init__() self.c_fc = nn.Linear(config.n_embd, 4 * config.n_embd) self.c_proj = nn.Linear(4 * config.n_embd, config.n_embd) self.dropout = nn.Dropout(config.dropout) def forward(self, x): x = self.c_fc(x) x = new_gelu(x) x = self.c_proj(x) x = self.dropout(x) return x
class Block(nn.Module): def __init__(self, config): super().__init__() self.ln_1 = nn.LayerNorm(config.n_embd) self.attention = CausalSelfAttention(config) self.ln_2 = nn.LayerNorm(config.n_embd) self.mlp = MLP(config) def forward(self, x): x = x + self.attention(self.ln_1(x)) x = x + self.mlp(self.ln_2(x)) return x shape = [1, 64, 64] x = torch.ones(1, 64, 64) config = GPTConfig(block_size = 64, vocab_size = 65, n_layer = 4, n_head = 4, n_embd = 64,dropout = 0.0, bias = False) model = Block(config) torch_out = model(x) torch.onnx.export(model, x, "network.onnx", export_params=True, opset_version=10, do_constant_folding=True, input_names = ['input'], output_names = ['output'], dynamic_axes={'input' : {0 : 'batch_size'}, 'output' : {0 : 'batch_size'}}) d = ((x).detach().numpy()).reshape([-1]).tolist() data = dict(input_shapes = [shape], input_data = [d], output_data = [((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out]) json.dump( data, open( "input.json", 'w' ) )
from torch import nn import torch import json import numpy as np class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): m = nn.SELU()(x) return m circuit = MyModel() x = torch.empty(1, 8).uniform_(0, 1) out = circuit(x) print(out) torch.onnx.export(circuit, x, "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=17, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}}) d1 = ((x).detach().numpy()).reshape([-1]).tolist() data = dict( input_data=[d1], ) # Serialize data into file: json.dump(data, open("input.json", 'w'))
from torch import nn import torch import json import numpy as np class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): m = nn.Softplus()(x) return m circuit = MyModel() x = torch.empty(1, 8).uniform_(0, 1) out = circuit(x) print(out) torch.onnx.export(circuit, x, "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=17, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}}) d1 = ((x).detach().numpy()).reshape([-1]).tolist() data = dict( input_data=[d1], ) # Serialize data into file: json.dump(data, open("input.json", 'w'))
from torch import nn import torch import json import numpy as np class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): m = nn.Softsign()(x) return m circuit = MyModel() x = torch.empty(1, 8).uniform_(0, 1) out = circuit(x) print(out) torch.onnx.export(circuit, x, "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=17, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}}) d1 = ((x).detach().numpy()).reshape([-1]).tolist() data = dict( input_data=[d1], ) # Serialize data into file: json.dump(data, open("input.json", 'w'))
import io
import numpy as np from torch
import nn
import torch.onnx
import torch
import torch.nn as nn
import torch.nn.init as init
import json
class Circuit(nn.Module): def __init__(self): super(Circuit, self).__init__() self.softplus = nn.Softplus() def forward(self, x): x = self.softplus(x) x = torch.cos(x) x = torch.sin(x) x = torch.tan(x) x = torch.acos(x) x = torch.asin(x) x = torch.atan(x) x = torch.tanh(x) return (-x).abs().sign() def main(): torch_model = Circuit() shape = [3, 2, 3] x = 0.1*torch.rand(1, *shape, requires_grad=True) torch_out = torch_model(x) torch.onnx.export(torch_model, x, "network.onnx", export_params=True, opset_version=10, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) d = ((x).detach().numpy()).reshape([-1]).tolist() data = dict(input_shapes=[shape, shape, shape], input_data=[d], output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out]) json.dump(data, open("input.json", 'w')) if __name__ == "__main__": main()
from torch import nn import torch import json import numpy as np class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): m = torch.triu(x) return m circuit = MyModel() x = torch.empty(1, 3, 3).uniform_(0, 5) out = circuit(x) print(out) torch.onnx.export(circuit, x, "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=17, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}}) d1 = ((x).detach().numpy()).reshape([-1]).tolist() data = dict( input_data=[d1], ) # Serialize data into file: json.dump(data, open("input.json", 'w'))
from torch import nn import torch import json import numpy as np class MyModel(nn.Module): def __init__(self): super(MyModel, self).__init__() def forward(self, x): m = torch.tril(x) return m circuit = MyModel() x = torch.empty(1, 3, 3).uniform_(0, 5) out = circuit(x) print(out) torch.onnx.export(circuit, x, "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=17, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}}) d1 = ((x).detach().numpy()).reshape([-1]).tolist() data = dict( input_data=[d1], ) # Serialize data into file: json.dump(data, open("input.json", 'w'))
import io
import numpy as np from torch
import nn
import torch.onnx
import torch.nn as nn
import torch.nn.init as init
import json
class Circuit(nn.Module): def __init__(self, inplace=False): super(Circuit, self).__init__() self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() self.conv = nn.Conv2d(3, 3, (2, 2), 1, 2) self._initialize_weights() def forward(self, x, y, z): x = self.sigmoid(self.conv(y@x**2 + (x) - (self.relu(z)))) + 2 return (x, self.relu(z) / 3) def _initialize_weights(self): init.orthogonal_(self.conv.weight) def main(): torch_model = Circuit() shape = [3, 2, 2] x = 0.1*torch.rand(1, *shape, requires_grad=True) y = 0.1*torch.rand(1, *shape, requires_grad=True) z = 0.1*torch.rand(1, *shape, requires_grad=True) torch_out = torch_model(x, y, z) torch.onnx.export(torch_model, (x, y, z), "network.onnx", export_params=True, opset_version=10, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) d = ((x).detach().numpy()).reshape([-1]).tolist() dy = ((y).detach().numpy()).reshape([-1]).tolist() dz = ((z).detach().numpy()).reshape([-1]).tolist() data = dict(input_shapes=[shape, shape, shape], input_data=[d, dy, dz], output_data=[((o).detach().numpy()).reshape([-1]).tolist() for o in torch_out]) json.dump(data, open("input.json", 'w')) if __name__ == "__main__": main()
import json
import numpy as np from sklearn.datasets
import load_iris from sklearn.model_selection
import train_test_split from xgboost
import XGBClassifier as Gbc
import torch
import ezkl
import os from torch
import nn
import xgboost as xgb from hummingbird.ml
import convert NUM_CLASSES = 3 iris = load_iris() X, y = iris.data, iris.target X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y) clr = Gbc(n_estimators=12) clr.fit(X_train, y_train) torch_gbt = convert(clr, 'torch') print(torch_gbt) diffs = [] for i in range(len(X_test)): torch_pred = torch_gbt.predict(torch.tensor(X_test[i].reshape(1, -1))) sk_pred = clr.predict(X_test[i].reshape(1, -1)) diffs.append(torch_pred != sk_pred[0]) print("num diff: ", sum(diffs)) shape = X_train.shape[1:] x = torch.rand(1, *shape, requires_grad=False) torch_out = torch_gbt.predict(x) torch.onnx.export(torch_gbt.model, x, "network.onnx", export_params=True, opset_version=11, do_constant_folding=True, input_names=['input'], output_names=['output'], dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}}) d = ((x).detach().numpy()).reshape([-1]).tolist() data = dict(input_shapes=[shape], input_data=[d], output_data=[(o).reshape([-1]).tolist() for o in torch_out]) json.dump(data, open("input.json", 'w'))
# make sure you have the dependencies required here already installed import json import numpy as np from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from xgboost import XGBRegressor as Gbc import torch import ezkl import os from torch import nn import xgboost as xgb from hummingbird.ml import convert NUM_CLASSES = 3 X, y = np.random.rand(1000, 4), np.random.rand(1000, 1) X = X.astype(np.float32) X_train, X_test, y_train, y_test = train_test_split(X, y) clr = Gbc(n_estimators=12) clr.fit(X_train, y_train) # convert to torch torch_gbt = convert(clr, 'torch') print(torch_gbt) # assert predictions from torch are = to sklearn diffs = [] # Input to the model shape = X_train.shape[1:] x = torch.rand(1, *shape, requires_grad=False) torch_out = torch_gbt.predict(x) # Export the model torch.onnx.export(torch_gbt.model, # model being run # model input (or a tuple for multiple inputs) x, # where to save the model (can be a file or file-like object) "network.onnx", export_params=True, # store the trained parameter weights inside the model file opset_version=11, # the ONNX version to export the model to do_constant_folding=True, # whether to execute constant folding for optimization input_names=['input'], # the model's input names output_names=['output'], # the model's output names dynamic_axes={'input': {0: 'batch_size'}, # variable length axes 'output': {0: 'batch_size'}})
module.exports = { preset: 'ts-jest', testEnvironment: 'node', };
use clap::Parser; use colored_json::ToColoredJson; use ezkl::commands::Cli; use ezkl::execute::run; use ezkl::logger::init_logger; use log::{debug, error, info}; use rand::prelude::SliceRandom; use std::env; use std::error::Error; pub async fn main() -> Result<(), Box<dyn Error>> { let args = Cli::parse(); init_logger(); banner(); if env::var("ENABLE_ICICLE_GPU").is_ok() { info!("Running with ICICLE GPU"); } else { info!("Running with CPU"); } debug!("command: \n {}", &args.as_json()?.to_colored_json_auto()?); let res = run(args.command).await; match &res { Ok(_) => info!("succeeded"), Err(e) => error!("failed: {}", e), }; res.map(|_| ()) } pub
fn main() {}
fn banner() { let ell: Vec<&str> = vec![ "for Neural Networks", "Linear Algebra", "for Layers", "for the Laconic", "Learning", "for Liberty", "for the Lyrical", ]; info!( "{}", format!( " β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ•—β–ˆβ–ˆβ•— β–ˆβ–ˆβ•”β•β•β•β•β•β•šβ•β•β–ˆβ–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•”β•β•β• β–ˆβ–ˆβ–ˆβ•”β• β–ˆβ–ˆβ•”β•β–ˆβ–ˆβ•— β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•—β–ˆβ–ˆβ•‘ β–ˆβ–ˆβ•—β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ•— β•šβ•β•β•β•β•β•β•β•šβ•β•β•β•β•β•β•β•šβ•β• β•šβ•β•β•šβ•β•β•β•β•β•β• ----------------------------------------------------------- Easy Zero Knowledge {}. ----------------------------------------------------------- ", ell.choose(&mut rand::thread_rng()).unwrap() ) ); }
/// pub mod modules; /// pub mod table; /// pub mod utils; /// pub mod ops; pub use ops::chip::*; pub use ops::*; /// Tests #[cfg(test)] mod tests;
/// pub mod poseidon; /// pub mod polycommit; /// pub mod planner; use halo2_proofs::{ circuit::Layouter, plonk::{ConstraintSystem, Error}, }; use halo2curves::ff::PrimeField; pub use planner::*; use crate::tensor::{TensorType, ValTensor}; use super::region::ConstantsMap; /// Module trait used to extend ezkl functionality pub trait Module<F: PrimeField + TensorType + PartialOrd> { /// Config type Config; /// The return type after an input assignment type InputAssignments; /// The inputs used in the run function type RunInputs; /// The params used in configure type Params; /// construct new module from config fn new(config: Self::Config) -> Self; /// Configure fn configure(meta: &mut ConstraintSystem<F>, params: Self::Params) -> Self::Config; /// Name fn name(&self) -> &'static str; /// Run the operation the module represents fn run(input: Self::RunInputs) -> Result<Vec<Vec<F>>, Box<dyn std::error::Error>>; /// Layout inputs fn layout_inputs( &self, layouter: &mut impl Layouter<F>, input: &[ValTensor<F>], constants: &mut ConstantsMap<F>, ) -> Result<Self::InputAssignments, Error>; /// Layout fn layout( &self, layouter: &mut impl Layouter<F>, input: &[ValTensor<F>], row_offset: usize, constants: &mut ConstantsMap<F>, ) -> Result<ValTensor<F>, Error>; /// Number of instance values the module uses every time it is applied fn instance_increment_input(&self) -> Vec<usize>; /// Number of rows used by the module fn num_rows(input_len: usize) -> usize; }
use std::cmp; use std::collections::HashMap; use std::fmt; use std::marker::PhantomData; use halo2curves::ff::Field; use halo2_proofs::{ circuit::{ layouter::{RegionColumn, RegionLayouter, RegionShape, SyncDeps, TableLayouter}, Cell, Layouter, Region, RegionIndex, RegionStart, Table, Value, }, plonk::{ Advice, Any, Assigned, Assignment, Challenge, Circuit, Column, Error, Fixed, FloorPlanner, Instance, Selector, TableColumn, }, }; use log::{debug, trace}; pub struct ModulePlanner; impl FloorPlanner for ModulePlanner { fn synthesize<F: Field, CS: Assignment<F> + SyncDeps, C: Circuit<F>>( cs: &mut CS, circuit: &C, config: C::Config, constants: Vec<Column<Fixed>>, ) -> Result<(), Error> { let layouter = ModuleLayouter::new(cs, constants)?; circuit.synthesize(config, layouter) } } pub type ModuleIdx = usize; pub type RegionIdx = usize; pub struct ModuleLayouter<'a, F: Field, CS: Assignment<F> + 'a> { cs: &'a mut CS, constants: Vec<Column<Fixed>>, regions: HashMap<ModuleIdx, HashMap<RegionIdx, RegionStart>>, region_idx: HashMap<RegionIdx, ModuleIdx>, columns: HashMap<(ModuleIdx, RegionColumn), usize>, table_columns: Vec<TableColumn>, _marker: PhantomData<F>, current_module: usize, total_constants: usize, } impl<'a, F: Field, CS: Assignment<F> + 'a> fmt::Debug for ModuleLayouter<'a, F, CS> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ModuleLayouter") .field("regions", &self.regions) .field("columns", &self.columns) .field("total_constants", &self.total_constants) .finish() } } impl<'a, F: Field, CS: Assignment<F>> ModuleLayouter<'a, F, CS> { pub fn new(cs: &'a mut CS, constants: Vec<Column<Fixed>>) -> Result<Self, Error> { let ret = ModuleLayouter { cs, constants, regions: HashMap::default()
, columns: HashMap::default(), region_idx: HashMap::default(), table_columns: vec![], current_module: 0, total_constants: 0, _marker: PhantomData, }; Ok(ret) } fn get_constant_col_cartesian_coord( &self, linear_coord: usize, col_size: usize, ) -> (usize, usize) { let x = linear_coord / col_size; let y = linear_coord % col_size; (x, y) } } impl<'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> Layouter<F> for ModuleLayouter<'a, F, CS> { type Root = Self; fn assign_region<A, AR, N, NR>(&mut self, name: N, mut assignment: A) -> Result<AR, Error> where A: FnMut(Region<'_, F>) -> Result<AR, Error>, N: Fn() -> NR, NR: Into<String>, { if Into::<String>::into(name()).contains("_enter_module_") { let name = Into::<String>::into(name()); let index = match name.split("_enter_module_").last() { Some(v) => v, None => { log::error!("Invalid module name"); return Err(Error::Synthesis); } }; let index = index.parse::<usize>().map_err(|_| { log::error!("Invalid module name"); Error::Synthesis })?; if !self.regions.contains_key(&index) { debug!("spawning module {}", index) }; self.current_module = index; } let region_index = self.region_idx.len(); self.region_idx.insert(region_index, self.current_module); let mut shape = RegionShape::new(region_index.into()); { let region: &mut dyn RegionLayouter<F> = &mut shape; assignment(region.into())?; } let region_start = match self.regions.get_mut(&self.current_module) { Some(v) => { let mut region_start = 0;
for column in shape.columns().iter() { region_start = cmp::max( region_start, self.columns .get(&(self.current_module, *column)) .cloned() .unwrap_or(0), ); } v.insert(region_index, region_start.into()); region_start } None => { let map = HashMap::from([(region_index, 0.into())]); self.regions.insert(self.current_module, map); 0 } }; for column in shape.columns() { self.columns.insert( (self.current_module, *column), region_start + shape.row_count(), ); } self.cs.enter_region(name); let mut region = ModuleLayouterRegion::new(self, region_index.into()); let result = { let region: &mut dyn RegionLayouter<F> = &mut region; assignment(region.into()) }?; let constants_to_assign = region.constants; self.cs.exit_region(); if self.constants.is_empty() { if !constants_to_assign.is_empty() { return Err(Error::NotEnoughColumnsForConstants); } } else { for (constant, advice) in constants_to_assign { let (constant_column, y) = crate::graph::GLOBAL_SETTINGS.with(|settings| { match settings.borrow().as_ref() { Some(settings) => { let col_size = settings.available_col_size(); let (x, y) = self .get_constant_col_cartesian_coord(self.total_constants, col_size); (self.constants[x], y) } None => (self.constants[0], self.total_constants),
} }); self.cs.assign_fixed( || format!("Constant({:?})", constant.evaluate()), constant_column, y, || Value::known(constant), )?; let region_module = self.region_idx[&advice.region_index]; self.cs.copy( constant_column.into(), y, advice.column, *self.regions[&region_module][&advice.region_index] + advice.row_offset, )?; self.total_constants += 1; } } trace!("region {} assigned", region_index); trace!("total_constants: {:?}", self.total_constants); let max_row_index = self .columns .iter() .filter(|((module, _), _)| *module == self.current_module) .map(|(_, row)| *row) .max() .unwrap_or(0); trace!("max_row_index: {:?}", max_row_index); Ok(result) } fn assign_table<A, N, NR>(&mut self, name: N, mut assignment: A) -> Result<(), Error> where A: FnMut(Table<'_, F>) -> Result<(), Error>, N: Fn() -> NR, NR: Into<String>, { self.cs.enter_region(name); let mut table = halo2_proofs::circuit::SimpleTableLayouter::new(self.cs, &self.table_columns); { let table: &mut dyn TableLayouter<F> = &mut table; assignment(table.into()) }?; let default_and_assigned = table.default_and_assigned; self.cs.exit_region(); let first_unused = { match default_and_assigned .values() .map(|(_, assigned)| { if assigned.iter().all(|b| *b) { Some(assigned.len()) } else { None } }) .red
uce(|acc, item| match (acc, item) { (Some(a), Some(b)) if a == b => Some(a), _ => None, }) { Some(Some(len)) => len, _ => return Err(Error::Synthesis), } }; for column in default_and_assigned.keys() { self.table_columns.push(*column); } for (col, (default_val, _)) in default_and_assigned { let default_val = default_val.ok_or(Error::Synthesis)?; self.cs .fill_from_row(col.inner(), first_unused, default_val)?; } Ok(()) } fn constrain_instance( &mut self, cell: Cell, instance: Column<Instance>, row: usize, ) -> Result<(), Error> { let module_idx = self.region_idx[&cell.region_index]; self.cs.copy( cell.column, *self.regions[&module_idx][&cell.region_index] + cell.row_offset, instance.into(), row, ) } fn get_challenge(&self, challenge: Challenge) -> Value<F> { self.cs.get_challenge(challenge) } fn get_root(&mut self) -> &mut Self::Root { self } fn push_namespace<NR, N>(&mut self, name_fn: N) where NR: Into<String>, N: FnOnce() -> NR, { self.cs.push_namespace(name_fn) }
fn pop_namespace(&mut self, gadget_name: Option<String>) { self.cs.pop_namespace(gadget_name) } } struct ModuleLayouterRegion<'r, 'a, F: Field, CS: Assignment<F> + 'a> { layouter: &'r mut ModuleLayouter<'a, F, CS>, region_index: RegionIndex, constants: Vec<(Assigned<F>, Cell)>, } impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> fmt::Debug for ModuleLayouterRegion<'r, 'a, F, CS> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ModuleLayouterRegion") .field("layouter", &self.layouter) .field("region_index", &self.region_index) .finish() } } impl<'r, 'a, F: Field, CS: Assignment<F> + 'a> ModuleLayouterRegion<'r, 'a, F, CS> { fn new(layouter: &'r mut ModuleLayouter<'a, F, CS>, region_index: RegionIndex) -> Self { ModuleLayouterRegion { layouter, region_index, constants: vec![], } } } impl<'r, 'a, F: Field, CS: Assignment<F> + 'a + SyncDeps> RegionLayouter<F> for ModuleLayouterRegion<'r, 'a, F, CS> { fn instance_value( &mut self, instance: Column<Instance>, row: usize, ) -> Result<Value<F>, Error> { self.layouter.cs.query_instance(instance, row) } fn enable_selector<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), selector: &Selector, offset: usize, ) -> Result<(), Error> { let module_idx = self.layouter.region_idx[&self.region_index]; self.layouter.cs.enable_selector( annotation, selector, *self.layouter.regions[&module_idx][&self.region_index] + offset, ) } fn name_column<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), column: Column<Any>, ) { self.layouter.cs.annotate_column(annotation, column); } fn assign_advice<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), column: Column<Advice>,
offset: usize, to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v), ) -> Result<Cell, Error> { let module_idx = self.layouter.region_idx[&self.region_index]; self.layouter.cs.assign_advice( annotation, column, *self.layouter.regions[&module_idx][&self.region_index] + offset, to, )?; Ok(Cell { region_index: self.region_index, row_offset: offset, column: column.into(), }) } fn assign_advice_from_constant<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), column: Column<Advice>, offset: usize, constant: Assigned<F>, ) -> Result<Cell, Error> { let advice = self.assign_advice(annotation, column, offset, &mut || Value::known(constant))?; self.constrain_constant(advice, constant)?; Ok(advice) } fn assign_advice_from_instance<'v>( &mut self, annotation: &'v (dyn Fn() -> String + 'v), instance: Column<Instance>, row: usize, advice: Column<Advice>, offset: usize, ) -> Result<(Cell, Value<F>), Error> { let value = self.layouter.cs.query_instance(instance, row)?; let cell = self.assign_advice(annotation, advice, offset, &mut || value.to_field())?; let module_idx = self.layouter.region_idx[&cell.region_index]; self.layouter.cs.copy( cell.column, *self.layouter.regions[&module_idx][&cell.region_index] + cell.row_offset, instance.into(), row, )?; Ok((cell, value)) } fn assign_fixed<'v>( &'v mut self, annotation: &'v (dyn Fn() -> String + 'v), column: Column<Fixed>, offset: usize, to: &'v mut (dyn FnMut() -> Value<Assigned<F>> + 'v), ) -> Result<Cell, Error> { let module_idx = self.layouter.region_idx[&self.region_index]; self.layouter.cs.assign_fixed(
annotation, column, *self.layouter.regions[&module_idx][&self.region_index] + offset, to, )?; Ok(Cell { region_index: self.region_index, row_offset: offset, column: column.into(), }) } fn constrain_constant(&mut self, cell: Cell, constant: Assigned<F>) -> Result<(), Error> { self.constants.push((constant, cell)); Ok(()) } fn constrain_equal(&mut self, left: Cell, right: Cell) -> Result<(), Error> { let left_module = self.layouter.region_idx[&left.region_index]; let right_module = self.layouter.region_idx[&right.region_index]; self.layouter.cs.copy( left.column, *self.layouter.regions[&left_module][&left.region_index] + left.row_offset, right.column, *self.layouter.regions[&right_module][&right.region_index] + right.row_offset, )?; Ok(()) } } mod tests { use halo2curves::pasta::vesta; use super::ModulePlanner; use halo2_proofs::{ dev::MockProver, plonk::{Advice, Circuit, Column, Error}, };
fn not_enough_columns_for_constants() {
struct MyCircuit {} impl Circuit<vesta::Scalar> for MyCircuit { type Config = Column<Advice>; type FloorPlanner = ModulePlanner; type Params = (); fn without_witnesses(&self) -> Self { MyCircuit {} } fn configure( meta: &mut halo2_proofs::plonk::ConstraintSystem<vesta::Scalar>, ) -> Self::Config { meta.advice_column() } fn synthesize( &self, config: Self::Config, mut layouter: impl halo2_proofs::circuit::Layouter<vesta::Scalar>, ) -> Result<(), halo2_proofs::plonk::Error> { layouter.assign_region( || "assign constant", |mut region| { region.assign_advice_from_constant( || "one", config, 0, vesta::Scalar::one(), ) }, )?; Ok(()) } } let circuit = MyCircuit {}; assert!(matches!( MockProver::run(3, &circuit, vec![]).unwrap_err(), Error::NotEnoughColumnsForConstants, )); } }
/* An easy-to-use implementation of the Poseidon Hash in the form of a Halo2 Chip. While the Poseidon Hash function is already implemented in halo2_gadgets, there is no wrapper chip that makes it easy to use in other circuits. Thanks to https: */ use std::collections::HashMap; use halo2_proofs::halo2curves::bn256::Fr as Fp; use halo2_proofs::poly::commitment::{Blind, CommitmentScheme, Params}; use halo2_proofs::{circuit::*, plonk::*}; use halo2curves::bn256::G1Affine; use halo2curves::group::prime::PrimeCurveAffine; use halo2curves::group::Curve; use halo2curves::CurveAffine; use crate::circuit::region::ConstantsMap; use crate::tensor::{Tensor, ValTensor, ValType, VarTensor}; use super::Module; pub const NUM_INSTANCE_COLUMNS: usize = 0; pub const NUM_INNER_COLS: usize = 1; pub
struct PolyCommitConfig { pub inputs: VarTensor, } type InputAssignments = (); pub
struct PolyCommitChip { config: PolyCommitConfig, } impl PolyCommitChip { pub fn commit<Scheme: CommitmentScheme<Scalar = Fp, Curve = G1Affine>>( message: Vec<Scheme::Scalar>, num_unusable_rows: u32, params: &Scheme::ParamsProver, ) -> Vec<G1Affine> { let k = params.k(); let domain = halo2_proofs::poly::EvaluationDomain::new(2, k); let n = 2_u64.pow(k) - num_unusable_rows as u64; let num_poly = (message.len() / n as usize) + 1; let mut poly = vec![domain.empty_lagrange(); num_poly]; (0..num_unusable_rows).for_each(|i| { for p in &mut poly { p[(n + i as u64) as usize] = Blind::default().0; } }); for (i, m) in message.iter().enumerate() { let x = i / (n as usize); let y = i % (n as usize); poly[x][y] = *m; } let mut advice_commitments_projective = vec![]; for a in poly { advice_commitments_projective.push(params.commit_lagrange(&a, Blind::default())) } let mut advice_commitments = vec![G1Affine::identity(); advice_commitments_projective.len()]; <G1Affine as CurveAffine>::CurveExt::batch_normalize( &advice_commitments_projective, &mut advice_commitments, ); advice_commitments } } impl Module<Fp> for PolyCommitChip { type Config = PolyCommitConfig; type InputAssignments = InputAssignments; type RunInputs = Vec<Fp>; type Params = (usize, usize); fn name(&self) -> &'static str { "PolyCommit" } fn instance_increment_input(&self) -> Vec<usize> { vec![0] } fn new(config: Self::Config) -> Self { Self { config } } fn configure(meta: &mut ConstraintSystem<Fp>, params: Self::Params) -> Self::Config { let inputs = VarTensor::new_unblinded_advice(meta, params.0, NUM_INNER_COLS, params.1); Self::Config { inputs } } fn layout_i
nputs( &self, _: &mut impl Layouter<Fp>, _: &[ValTensor<Fp>], _: &mut ConstantsMap<Fp>, ) -> Result<Self::InputAssignments, Error> { Ok(()) } fn layout( &self, layouter: &mut impl Layouter<Fp>, input: &[ValTensor<Fp>], _: usize, constants: &mut ConstantsMap<Fp>, ) -> Result<ValTensor<Fp>, Error> { assert_eq!(input.len(), 1); let local_constants = constants.clone(); layouter.assign_region( || "PolyCommit", |mut region| { let mut local_inner_constants = local_constants.clone(); let res = self.config.inputs.assign( &mut region, 0, &input[0], &mut local_inner_constants, )?; *constants = local_inner_constants; Ok(res) }, ) } fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, Box<dyn std::error::Error>> { Ok(vec![message]) } fn num_rows(_: usize) -> usize { 0 } } mod tests { use crate::circuit::modules::ModulePlanner; use super::*; use std::marker::PhantomData; use halo2_proofs::{ circuit::{Layouter, SimpleFloorPlanner, Value}, plonk::{Circuit, ConstraintSystem}, }; use halo2curves::ff::Field; const K: usize = 8; const R: usize = 2048;
struct HashCircuit { message: ValTensor<Fp>, } impl Circuit<Fp> for HashCircuit { type Config = PolyCommitConfig; type FloorPlanner = ModulePlanner; type Params = (); fn without_witnesses(&self) -> Self { let empty_val: Vec<ValType<Fp>> = vec![Value::<Fp>::unknown().into(); R]; let message: Tensor<ValType<Fp>> = empty_val.into_iter().into(); Self { message: message.into(), } } fn configure(meta: &mut ConstraintSystem<Fp>) -> Self::Config { let params = (K, R); PolyCommitChip::configure(meta, params) } fn synthesize( &self, config: Self::Config, mut layouter: impl Layouter<Fp>, ) -> Result<(), Error> { let polycommit_chip = PolyCommitChip::new(config); polycommit_chip.layout( &mut layouter, &[self.message.clone()], 0, &mut HashMap::new(), ); Ok(()) } }
fn polycommit_chip_for_a_range_of_input_sizes() { let rng = rand::rngs::OsRng; env_logger::init(); { let i = 32; println!( "i is {} -------------------------------------------------", i ); let message: Vec<Fp> = (0..i).map(|_| Fp::random(rng)).collect::<Vec<_>>(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let circuit = HashCircuit { message: message.into(), }; let prover = halo2_proofs::dev::MockProver::run(K as u32, &circuit, vec![]).unwrap(); assert_eq!(prover.verify(), Ok(())) } }
fn polycommit_chip_much_longer_input() { env_logger::init(); let rng = rand::rngs::OsRng; let mut message: Vec<Fp> = (0..2048).map(|_| Fp::random(rng)).collect::<Vec<_>>(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let circuit = HashCircuit { message: message.into(), }; let prover = halo2_proofs::dev::MockProver::run(K as u32, &circuit, vec![]).unwrap(); assert_eq!(prover.verify(), Ok(())) } }
/* An easy-to-use implementation of the Poseidon Hash in the form of a Halo2 Chip. While the Poseidon Hash function is already implemented in halo2_gadgets, there is no wrapper chip that makes it easy to use in other circuits. Thanks to https: */ pub mod poseidon_params; pub mod spec; use halo2_gadgets::poseidon::{primitives::*, Hash, Pow5Chip, Pow5Config}; use halo2_proofs::arithmetic::Field; use halo2_proofs::halo2curves::bn256::Fr as Fp; use halo2_proofs::{circuit::*, plonk::*}; use maybe_rayon::prelude::ParallelIterator; use maybe_rayon::slice::ParallelSlice; use std::marker::PhantomData; use crate::circuit::region::ConstantsMap; use crate::tensor::{Tensor, ValTensor, ValType}; use super::Module; pub const NUM_INSTANCE_COLUMNS: usize = 1; pub struct PoseidonConfig<const WIDTH: usize, const RATE: usize> { pub hash_inputs: Vec<Column<Advice>>, pub instance: Option<Column<Instance>>, pub pow5_config: Pow5Config<Fp, WIDTH, RATE>, } type InputAssignments = (Vec<AssignedCell<Fp, Fp>>, AssignedCell<Fp, Fp>); pub struct PoseidonChip< S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize, > { config: PoseidonConfig<WIDTH, RATE>, _marker: PhantomData<S>, } impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize> PoseidonChip<S, WIDTH, RATE, L> { pub fn configure_with_cols( meta: &mut ConstraintSystem<Fp>, partial_sbox: Column<Advice>, rc_a: [Column<Fixed>; WIDTH], rc_b: [Column<Fixed>; WIDTH], hash_inputs: Vec<Column<Advice>>, instance: Option<Column<Instance>>, ) -> PoseidonConfig<WIDTH, RATE> { let pow5_config = Pow5Chip::configure::<S>( meta, hash_inputs.clone().try_into().unwrap(), partial_sbox, rc_a, rc_b, ); PoseidonConfig { pow5_config, instance, hash_inputs, } } } impl<S: Spec<Fp
, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize> PoseidonChip<S, WIDTH, RATE, L> { pub fn configure_with_optional_instance( meta: &mut ConstraintSystem<Fp>, instance: Option<Column<Instance>>, ) -> PoseidonConfig<WIDTH, RATE> { let hash_inputs = (0..WIDTH).map(|_| meta.advice_column()).collect::<Vec<_>>(); for input in &hash_inputs { meta.enable_equality(*input); } let partial_sbox = meta.advice_column(); let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>(); let rc_b = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>(); for input in hash_inputs.iter().take(WIDTH) { meta.enable_equality(*input); } meta.enable_constant(rc_b[0]); Self::configure_with_cols( meta, partial_sbox, rc_a.try_into().unwrap(), rc_b.try_into().unwrap(), hash_inputs, instance, ) } } impl<S: Spec<Fp, WIDTH, RATE> + Sync, const WIDTH: usize, const RATE: usize, const L: usize> Module<Fp> for PoseidonChip<S, WIDTH, RATE, L> { type Config = PoseidonConfig<WIDTH, RATE>; type InputAssignments = InputAssignments; type RunInputs = Vec<Fp>; type Params = (); fn name(&self) -> &'static str { "Poseidon" } fn instance_increment_input(&self) -> Vec<usize> { vec![1] } fn new(config: Self::Config) -> Self { Self { config, _marker: PhantomData, } } fn configure(meta: &mut ConstraintSystem<Fp>, _: Self::Params) -> Self::Config { let hash_inputs = (0..WIDTH).map(|_| meta.advice_column()).collect::<Vec<_>>(); for input in &hash_inputs { meta.enable_equality(*input); } let partial_sbox = meta.advice_column(); let rc_a = (0..WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>(); let rc_b = (0..
WIDTH).map(|_| meta.fixed_column()).collect::<Vec<_>>(); for input in hash_inputs.iter().take(WIDTH) { meta.enable_equality(*input); } meta.enable_constant(rc_b[0]); let instance = meta.instance_column(); meta.enable_equality(instance); Self::configure_with_cols( meta, partial_sbox, rc_a.try_into().unwrap(), rc_b.try_into().unwrap(), hash_inputs, Some(instance), ) } fn layout_inputs( &self, layouter: &mut impl Layouter<Fp>, message: &[ValTensor<Fp>], constants: &mut ConstantsMap<Fp>, ) -> Result<Self::InputAssignments, Error> { assert_eq!(message.len(), 1); let message = message[0].clone(); let start_time = instant::Instant::now(); let local_constants = constants.clone(); let res = layouter.assign_region( || "load message", |mut region| { let assigned_message: Result<Vec<AssignedCell<Fp, Fp>>, Error> = match &message { ValTensor::Value { inner: v, .. } => v .iter() .enumerate() .map(|(i, value)| { let x = i % WIDTH; let y = i / WIDTH; match value { ValType::Value(v) => region.assign_advice( || format!("load message_{}", i), self.config.hash_inputs[x], y, || *v, ), ValType::PrevAssigned(v) | ValType::AssignedConstant(v, ..) => { Ok(v.clone()) } ValType::Constant(f) => { if local_constants.contains_key(f) {
Ok(constants.get(f).unwrap().assigned_cell().ok_or({ log::error!("constant not previously assigned"); Error::Synthesis })?) } else { let res = region.assign_advice_from_constant( || format!("load message_{}", i), self.config.hash_inputs[x], y, *f, )?; constants .insert(*f, ValType::AssignedConstant(res.clone(), *f)); Ok(res) } } e => { log::error!( "wrong input type {:?}, must be previously assigned", e ); Err(Error::Synthesis) } } }) .collect(), ValTensor::Instance { dims, inner: col, idx, initial_offset, .. } => { let num_elems = dims[*idx].iter().product::<usize>(); (0..num_elems) .map(|i| { let x = i % WIDTH; let y = i / WIDTH; region.assign_advice_from_instance(
|| "pub input anchor", *col, initial_offset + i, self.config.hash_inputs[x], y, ) }) .collect() } }; let offset = message.len() / WIDTH + 1; let zero_val = region .assign_advice_from_constant( || "", self.config.hash_inputs[0], offset, Fp::ZERO, ) .unwrap(); Ok((assigned_message?, zero_val)) }, ); log::trace!( "input (N={:?}) layout took: {:?}", message.len(), start_time.elapsed() ); res } fn layout( &self, layouter: &mut impl Layouter<Fp>, input: &[ValTensor<Fp>], row_offset: usize, constants: &mut ConstantsMap<Fp>, ) -> Result<ValTensor<Fp>, Error> { let (mut input_cells, zero_val) = self.layout_inputs(layouter, input, constants)?; let mut assigned_input: Tensor<ValType<Fp>> = input_cells.iter().map(|e| ValType::from(e.clone())).into(); let len = assigned_input.len(); let start_time = instant::Instant::now(); let mut one_iter = false; while input_cells.len() > 1 || !one_iter { let hashes: Result<Vec<AssignedCell<Fp, Fp>>, Error> = input_cells .chunks(L) .enumerate() .map(|(i, block)| { let _start_time = instant::Instant::now(); let mut block = block.to_vec(); let remainder = block.len() % L; if remainder != 0 { block.extend(vec![zero_val.clone(); L - rema
inder]); } let pow5_chip = Pow5Chip::construct(self.config.pow5_config.clone()); let hasher = Hash::<_, _, S, ConstantLength<L>, WIDTH, RATE>::init( pow5_chip, layouter.namespace(|| "block_hasher"), )?; let hash = hasher.hash( layouter.namespace(|| "hash"), block.to_vec().try_into().map_err(|_| Error::Synthesis)?, ); if i == 0 { log::trace!("block (L={:?}) took: {:?}", L, _start_time.elapsed()); } hash }) .collect(); log::trace!("hashes (N={:?}) took: {:?}", len, start_time.elapsed()); one_iter = true; input_cells = hashes?; } let duration = start_time.elapsed(); log::trace!("layout (N={:?}) took: {:?}", len, duration); let result = Tensor::from(input_cells.iter().map(|e| ValType::from(e.clone()))); let output = match result[0].clone() { ValType::PrevAssigned(v) => v, _ => { log::error!("wrong input type, must be previously assigned"); return Err(Error::Synthesis); } }; if let Some(instance) = self.config.instance { layouter.assign_region( || "constrain output", |mut region| { let expected_var = region.assign_advice_from_instance( || "pub input anchor", instance, row_offset, self.config.hash_inputs[0], 0, )?; region.constrain_equal(output.cell(), expected_var.cell()) }, )?; assigned_input.reshape(input[0].dims()).map_err(|e| {
log::error!("reshape failed: {:?}", e); Error::Synthesis })?; Ok(assigned_input.into()) } else { Ok(result.into()) } } fn run(message: Vec<Fp>) -> Result<Vec<Vec<Fp>>, Box<dyn std::error::Error>> { let mut hash_inputs = message; let len = hash_inputs.len(); let start_time = instant::Instant::now(); let mut one_iter = false; while hash_inputs.len() > 1 || !one_iter { let hashes: Vec<Fp> = hash_inputs .par_chunks(L) .map(|block| { let mut block = block.to_vec(); let remainder = block.len() % L; if remainder != 0 { block.extend(vec![Fp::ZERO; L - remainder].iter()); } let message = block.try_into().map_err(|_| Error::Synthesis)?; Ok(halo2_gadgets::poseidon::primitives::Hash::< _, S, ConstantLength<L>, { WIDTH }, { RATE }, >::init() .hash(message)) }) .collect::<Result<Vec<_>, Error>>()?; one_iter = true; hash_inputs = hashes; } let duration = start_time.elapsed(); log::trace!("run (N={:?}) took: {:?}", len, duration); Ok(vec![hash_inputs]) } fn num_rows(mut input_len: usize) -> usize { let fixed_cost: usize = 41 * L; let mut num_rows = 0; loop { let num_chunks = input_len / L + 1; num_rows += num_chunks * fixed_cost; if num_chunks == 1 { break; } input_len = num_chunks; } num_rows } } mod tests { use crate::circuit::modules::ModulePlanner; use super::{ spec::{PoseidonS
pec, POSEIDON_RATE, POSEIDON_WIDTH}, *, }; use std::{collections::HashMap, marker::PhantomData}; use halo2_gadgets::poseidon::primitives::Spec; use halo2_proofs::{ circuit::{Layouter, SimpleFloorPlanner, Value}, plonk::{Circuit, ConstraintSystem}, }; use halo2curves::ff::Field; const WIDTH: usize = POSEIDON_WIDTH; const RATE: usize = POSEIDON_RATE; const R: usize = 240; struct HashCircuit<S: Spec<Fp, WIDTH, RATE>, const L: usize> { message: ValTensor<Fp>, _spec: PhantomData<S>, } impl<S: Spec<Fp, WIDTH, RATE>, const L: usize> Circuit<Fp> for HashCircuit<S, L> { type Config = PoseidonConfig<WIDTH, RATE>; type FloorPlanner = ModulePlanner; type Params = (); fn without_witnesses(&self) -> Self { let empty_val: Vec<ValType<Fp>> = vec![Value::<Fp>::unknown().into()]; let message: Tensor<ValType<Fp>> = empty_val.into_iter().into(); Self { message: message.into(), _spec: PhantomData, } } fn configure(meta: &mut ConstraintSystem<Fp>) -> PoseidonConfig<WIDTH, RATE> { PoseidonChip::<PoseidonSpec, WIDTH, RATE, L>::configure(meta, ()) } fn synthesize( &self, config: PoseidonConfig<WIDTH, RATE>, mut layouter: impl Layouter<Fp>, ) -> Result<(), Error> { let chip: PoseidonChip<PoseidonSpec, WIDTH, RATE, L> = PoseidonChip::new(config); chip.layout( &mut layouter, &[self.message.clone()], 0, &mut HashMap::new(), )?; Ok(()) } }
fn poseidon_hash() { let rng = rand::rngs::OsRng; let message = [Fp::random(rng), Fp::random(rng)]; let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 2>::run(message.to_vec()).unwrap(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let k = 9; let circuit = HashCircuit::<PoseidonSpec, 2> { message: message.into(), _spec: PhantomData, }; let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap(); assert_eq!(prover.verify(), Ok(())) }
fn poseidon_hash_longer_input() { let rng = rand::rngs::OsRng; let message = [Fp::random(rng), Fp::random(rng), Fp::random(rng)]; let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 3>::run(message.to_vec()).unwrap(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let k = 9; let circuit = HashCircuit::<PoseidonSpec, 3> { message: message.into(), _spec: PhantomData, }; let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap(); assert_eq!(prover.verify(), Ok(())) }
fn hash_for_a_range_of_input_sizes() { let rng = rand::rngs::OsRng; env_logger::init(); { let i = 32; println!( "i is {} -------------------------------------------------", i ); let message: Vec<Fp> = (0..i).map(|_| Fp::random(rng)).collect::<Vec<_>>(); let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 32>::run(message.clone()).unwrap(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let k = 17; let circuit = HashCircuit::<PoseidonSpec, 32> { message: message.into(), _spec: PhantomData, }; let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap(); assert_eq!(prover.verify(), Ok(())) } }
fn poseidon_hash_much_longer_input() { let rng = rand::rngs::OsRng; let mut message: Vec<Fp> = (0..2048).map(|_| Fp::random(rng)).collect::<Vec<_>>(); let output = PoseidonChip::<PoseidonSpec, WIDTH, RATE, 25>::run(message.clone()).unwrap(); let mut message: Tensor<ValType<Fp>> = message.into_iter().map(|m| Value::known(m).into()).into(); let k = 17; let circuit = HashCircuit::<PoseidonSpec, 25> { message: message.into(), _spec: PhantomData, }; let prover = halo2_proofs::dev::MockProver::run(k, &circuit, output).unwrap(); assert_eq!(prover.verify(), Ok(())) } }
use halo2_proofs::halo2curves::bn256::Fr as Fp; pub(crate) const ROUND_CONSTANTS: [[Fp; 2]; 64] = [ [ Fp::from_raw([ 0x6c7d_c0db_d0ab_d7a7, 0xa71a_a177_534c_dd1b, 0xfe1f_aaba_294c_ba38, 0x09c4_6e9e_c68e_9bd4, ]), Fp::from_raw([ 0x3c1d_83ff_a604_cb81, 0xc514_2b3a_e405_b834, 0x2a97_ed93_7f31_35cf, 0x0c03_5653_0896_eec4, ]), ], [ Fp::from_raw([ 0x317e_a977_cc15_4a30, 0xa00e_a5aa_bd62_68bd, 0x142e_5118_2bb5_4cf4, 0x1e28_a1d9_3569_8ad1, ]), Fp::from_raw([ 0x4cf9_e2b1_2b91_251f, 0x0e57_57c3_e008_db96, 0x0809_65db_30e2_98e4, 0x27af_2d83_1a9d_2748, ]), ], [ Fp::from_raw([ 0x79aa_f435_45b7_4e03, 0x4129_1462_f214_cd08, 0x3a6a_3cfe_16ae_175a, 0x1e6f_11ce_60fc_8f51, ]), Fp::from_raw([ 0xf719_2062_68d1_42d3, 0x0446_2ed1_4c36_13d8, 0x8541_819c_b681_f0be, 0x2a67_384d_3bbd_5e43, ]), ], [ Fp::from_raw([ 0x3640_8f5d_5c9f_45d0, 0xb985_e381_f025_1889, 0x1609_f8e1_2fbf_ecf0, 0x0b66_fdf3_5609_3a61, ]), Fp::from_raw([ 0xdaa6_852d_bdb0_9e21, 0x0b26_c83c_c5ce_beed, 0x830c_6109_3c2a_de37, 0x012e_e3ec_1e78_d470, ]), ], [ Fp::from_raw([ 0x2d10_8e7b_445b_b1b9, 0x6cd1_c431_b099_b6bb, 0xfd88_f67f_8175_e3fd, 0x0252_ba5f_6760_bfbd, ]), Fp::from_raw([ 0xef5a_eaad_7ca9_32f1, 0x5439_1a89_35ff_71d6, 0x6c6b_ec3c_ef54_2963, 0x1794_74cc_eca5_ff67, ]), ], [ Fp::from_raw([ 0x7e1a_2589_bbed_2b91, 0x9c1f_974a_2649_69b3, 0x9228_ff4a_503f_d4ed,
0x2c24_2613_79a5_1bfa, ]), Fp::from_raw([ 0x53e6_6c05_5180_1b05, 0xc2f6_3f50_01fc_0fc5, 0xac2f_288b_d069_5b43, 0x1cc1_d7b6_2692_e63e, ]), ], [ Fp::from_raw([ 0x5d9e_ff5f_d9c9_1b56, 0x0078_4dbf_17fb_acd0, 0xb2ed_55f8_5297_9e96, 0x2550_5930_1aad_a98b, ]), Fp::from_raw([ 0xb11c_29ce_7e59_efd9, 0xaea2_4234_970a_8193, 0x79e1_f5c0_eccd_32b3, 0x2843_7be3_ac1c_b2e4, ]), ], [ Fp::from_raw([ 0x3387_62c3_7f5f_2043, 0x1854_8da8_fb4f_78d4, 0x1ca4_fa6b_5376_6eb1, 0x2821_6a44_2f2e_1f71, ]), Fp::from_raw([ 0x131f_2377_3234_82c9, 0xeee1_efce_0309_4581, 0x1f39_f4e7_056d_d03f, 0x2c1f_47cd_17fa_5adf, ]), ], [ Fp::from_raw([ 0x646b_8566_a621_afc9, 0xd9da_fca2_7663_8a63, 0x8632_bcc9_356c_eb7d, 0x07ab_ad02_b7a5_ebc4, ]), Fp::from_raw([ 0x37da_0c4d_15f9_6c3c, 0x9429_f908_80a6_9cd1, 0x275b_33ff_aab5_1dfe, 0x0230_2646_01ff_df29, ]), ], [ Fp::from_raw([ 0x717e_5d66_899a_a0a9, 0xa864_4145_57ee_289e, 0xa0f1_6865_6497_ca40, 0x1bc9_7305_4e51_d905, ]), Fp::from_raw([ 0x2a6b_2228_8f0a_67fc, 0xd249_aff5_c2d8_421f, 0x206c_3157_e863_41ed, 0x2e1c_22f9_6443_5008, ]), ], [ Fp::from_raw([ 0xa704_52bc_2bba_86b8, 0x9e8e_a159_8e46_c9f7, 0x121c_1d5f_461b_bc50, 0x1224_f38d_f67c_5378, ]), Fp::from_raw([ 0x69d2_9891_86cd_e20e, 0xd7bf_e8cd_9dfe_da19, 0x9280_b4bd_9ed0_068f, 0x02e4_e69d_8ba5_9e51, ]), ],
[ Fp::from_raw([ 0x6d47_e973_5d98_018e, 0x4f19_ee36_4e65_3f07, 0x7f5d_f81f_c04f_f3ee, 0x1f1e_ccc3_4aab_a013, ]), Fp::from_raw([ 0xeacb_8a4d_4284_f582, 0x1424_4480_32cd_1819, 0x7426_6c30_39a9_a731, 0x1672_ad3d_709a_3539, ]), ], [ Fp::from_raw([ 0x1d2e_d602_df8c_8fc7, 0xcda6_961f_284d_2499, 0x56f4_4af5_192b_4ae9, 0x283e_3fdc_2c6e_420c, ]), Fp::from_raw([ 0x614f_bd69_ff39_4bcc, 0x6837_51f8_fdff_59d6, 0xd0db_0957_170f_a013, 0x1c2a_3d12_0c55_0ecf, ]), ], [ Fp::from_raw([ 0x96cb_6b81_7765_3fbd, 0x143a_9a43_773e_a6f2, 0xf789_7a73_2345_6efe, 0x216f_8487_7aac_6172, ]), Fp::from_raw([ 0x11a1_f515_52f9_4788, 0xceaa_47ea_61ca_59a4, 0x64ba_7e8e_3e28_d12b, 0x2c0d_272b_ecf2_a757, ]), ], [ Fp::from_raw([ 0xcb4a_6c3d_8954_6f43, 0x170a_5480_abe0_508f, 0x484e_e7a7_4c45_4e9f, 0x16e3_4299_865c_0e28, ]), Fp::from_raw([ 0x48cd_9397_5548_8fc5, 0x7720_4776_5802_290f, 0x375a_232a_6fb9_cc71, 0x175c_eba5_99e9_6f5b, ]), ], [ Fp::from_raw([ 0xd8c5_ffbb_44a1_ee32, 0x6aa4_10bf_bc35_4f54, 0xfead_9e17_58b0_2806, 0x0c75_9444_0dc4_8c16, ]), Fp::from_raw([ 0x9247_9882_d919_fd8d, 0x760e_2001_3ccf_912c, 0xc466_db7d_7eb6_fd8f, 0x1a3c_29bc_39f2_1bb5, ]), ], [ Fp::from_raw([ 0x95c8_eeab_cd22_e68f, 0x0855_d349_074f_5a66, 0xc098_6ea0_49b2_5340, 0x0ccf_dd90_6f34_26e5, ]), Fp::from_raw([ 0xe0e6_99b6_7dd9_e79
6, 0x66a7_a8a3_fd06_5b3c, 0x2bdb_475c_e6c9_4118, 0x14f6_bc81_d9f1_86f6, ]), ], [ Fp::from_raw([ 0x88ed_eb73_86b9_7052, 0xcc09_9810_c9c4_95c8, 0x9702_ca70_b2f6_c5aa, 0x0962_b827_89fb_3d12, ]), Fp::from_raw([ 0xafef_0c8f_6a31_a86d, 0x1328_4ab0_1ef0_2575, 0xbf20_c79d_e251_27bc, 0x1a88_0af7_074d_18b3, ]), ], [ Fp::from_raw([ 0x4c30_12bb_7ae9_311b, 0x20af_2924_fc20_ff3f, 0xcd5e_77f0_211c_154b, 0x10cb_a184_19a6_a332, ]), Fp::from_raw([ 0x756a_2849_f302_f10d, 0xfa27_b731_9cae_3406, 0xbdc7_6ba6_3a9e_aca8, 0x057e_62a9_a8f8_9b3e, ]), ], [ Fp::from_raw([ 0xafa0_413b_4428_0cee, 0xb961_303b_bf65_cff5, 0xd44a_df53_84b4_988c, 0x287c_971d_e91d_c0ab, ]), Fp::from_raw([ 0x6f7f_7960_e306_891d, 0x1e56_2bc4_6d4a_ba4e, 0xb3bc_a9da_0cca_908f, 0x21df_3388_af16_87bb, ]), ], [ Fp::from_raw([ 0x3eff_8b56_0e16_82b3, 0x789d_f8f7_0b49_8fd8, 0x3e25_cc97_4d09_34cd, 0x1be5_c887_d25b_ce70, ]), Fp::from_raw([ 0x48d5_9c27_06a0_d5c1, 0xd2cb_5d42_fda5_acea, 0x6811_7175_cea2_cd0d, 0x268d_a36f_76e5_68fb, ]), ], [ Fp::from_raw([ 0xbd06_460c_c26a_5ed6, 0xc5d8_bb74_135e_bd05, 0xc609_beaf_5510_ecec, 0x0e17_ab09_1f6e_ae50, ]), Fp::from_raw([ 0x040f_5caa_1f62_af40, 0x91ef_62d8_cf83_d270, 0x7aee_535a_b074_a430, 0x04d7_27e7_28ff_a0a6, ]), ], [ Fp::from_raw([ 0x2b15_417d_7e39_ca6e, 0x3370_2ac1_0f1b_fd86,
0x81b5_4976_2bc0_22ed, 0x0ddb_d7bf_9c29_3415, ]), Fp::from_raw([ 0x8a29_c49c_8789_654b, 0x34f5_b0d1_d3af_9b58, 0x7681_62e8_2989_c6c2, 0x2790_eb33_5162_1752, ]), ], [ Fp::from_raw([ 0x84b7_6420_6142_f9e9, 0x395f_3d9a_b8b2_fd09, 0x4471_9501_93d8_a570, 0x1e45_7c60_1a63_b73e, ]), Fp::from_raw([ 0xc4c6_86fc_46e0_91b0, 0xfa90_ecd0_c43f_f91f, 0x638d_6ab2_bbe7_135f, 0x21ae_6430_1dca_9625, ]), ], [ Fp::from_raw([ 0x5858_534e_ed8d_350b, 0x854b_e9e3_432e_0955, 0x4da2_9316_6f49_4928, 0x0379_f63c_8ce3_468d, ]), Fp::from_raw([ 0x8c9f_58a3_24c3_5049, 0xca0e_4921_a466_86ac, 0x6a74_4a08_0809_e054, 0x002d_5642_0359_d026, ]), ], [ Fp::from_raw([ 0x0fc2_c5af_9635_15a6, 0xda8d_6245_9e21_f409, 0x1d68_b3cd_32e1_0bbe, 0x1231_58e5_965b_5d9b, ]), Fp::from_raw([ 0x60c8_0eb4_9cad_9ec1, 0x0fbb_2b6f_5283_6d4e, 0x661d_14bb_f6cb_e042, 0x0be2_9fc4_0847_a941, ]), ], [ Fp::from_raw([ 0x2338_02f2_4fdf_4c1a, 0x36db_9d85_9cad_5f9a, 0x5771_6142_015a_453c, 0x1ac9_6991_dec2_bb05, ]), Fp::from_raw([ 0x51ca_3355_bcb0_627e, 0x5e12_c9fa_97f1_8a92, 0x5f49_64fc_61d2_3b3e, 0x1596_443f_763d_bcc2, ]), ], [ Fp::from_raw([ 0xd6d0_49ea_e3ba_3212, 0xf185_7d9f_17e7_15ae, 0x6b28_61d4_ec3a_eae0, 0x12e0_bcd3_654b_dfa7, ]), Fp::from_raw([ 0x04e6_c76c_7cf9_64ba, 0xceab_ac7f_3715_4b19, 0x9ea7_3d4a_f9af_2a50, 0x0fc9_2b4f_1bbe_a
82b, ]), ], [ Fp::from_raw([ 0x9c7e_9652_3387_2762, 0xb14f_7c77_2223_6f4f, 0xd6f2_e592_a801_3f40, 0x1f9c_0b16_1044_6442, ]), Fp::from_raw([ 0x8d15_9f64_3dbb_f4d3, 0x050d_914d_a38b_4c05, 0xf8cd_e061_57a7_82f4, 0x0ebd_7424_4ae7_2675, ]), ], [ Fp::from_raw([ 0x7a83_9839_dccf_c6d1, 0x3b06_71e9_7346_ee39, 0x69a9_fafd_4ab9_51c0, 0x2cb7_f0ed_39e1_6e9f, ]), Fp::from_raw([ 0x90c7_2bca_7352_d9bf, 0xce76_1d05_14ce_5266, 0x5605_443e_e41b_ab20, 0x1a9d_6e2e_cff0_22cc, ]), ], [ Fp::from_raw([ 0x87da_182d_648e_c72f, 0xd0c1_3326_a9a7_ba30, 0x5ea8_3c3b_c44a_9331, 0x2a11_5439_607f_335a, ]), Fp::from_raw([ 0x9535_c115_c5a4_c060, 0xe738_b563_05cd_44f2, 0x15b8_fa7a_ee3e_3410, 0x23f9_b652_9b5d_040d, ]), ], [ Fp::from_raw([ 0x260e_b939_f0e6_e8a7, 0xa3ce_97c1_6d58_b68b, 0x249a_c6ba_484b_b9c3, 0x0587_2c16_db0f_72a2, ]), Fp::from_raw([ 0x2b62_4a7c_dedd_f6a7, 0x0219_b615_1d55_b5c5, 0xca20_fb80_1180_75f4, 0x1300_bdee_08bb_7824, ]), ], [ Fp::from_raw([ 0x072e_4e7b_7d52_b376, 0x8d7a_d299_16d9_8cb1, 0xe638_1786_3a8f_6c28, 0x19b9_b63d_2f10_8e17, ]), Fp::from_raw([ 0x24a2_0128_481b_4f7f, 0x13d1_c887_26b5_ec42, 0xb5bd_a237_6685_22f6, 0x015b_ee13_57e3_c015, ]), ], [ Fp::from_raw([ 0xea92_c785_b128_ffd1, 0xfe1e_1ce4_bab2_18cb, 0x1b97_07a4_f161_5e4e, 0x2953_736e_94bb_6b9f, ]), Fp::from_raw([
0x4ce7_266e_d660_8dfc, 0x851b_98d3_72b4_5f54, 0x862f_8061_80c0_385f, 0x0b06_9353_ba09_1618, ]), ], [ Fp::from_raw([ 0x4f58_8ac9_7d81_f429, 0x55ae_b7eb_9306_b64e, 0x15e4_e0bc_fb93_817e, 0x304f_74d4_61cc_c131, ]), Fp::from_raw([ 0xb8ee_5415_cde9_13fc, 0xaad2_a164_a461_7a4c, 0xe8a3_3f5e_77df_e4f5, 0x15bb_f146_ce9b_ca09, ]), ], [ Fp::from_raw([ 0xa9ff_2385_9572_c8c6, 0x9b8f_4b85_0405_c10c, 0x4490_1031_4879_64ed, 0x0ab4_dfe0_c274_2cde, ]), Fp::from_raw([ 0x251d_e39f_9639_779a, 0xef5e_edfe_a546_dea9, 0x97f4_5f76_49a1_9675, 0x0e32_db32_0a04_4e31, ]), ], [ Fp::from_raw([ 0xa307_8efa_516d_a016, 0x6797_733a_8277_4896, 0xb276_35a7_8b68_88e6, 0x0a17_56aa_1f37_8ca4, ]), Fp::from_raw([ 0x4254_d6a2_a25d_93ef, 0x95e6_1d32_8f85_efa9, 0x47fd_1717_7f95_2ef8, 0x044c_4a33_b10f_6934, ]), ], [ Fp::from_raw([ 0xd37b_07b5_466c_4b8b, 0xfe08_79d7_9a49_6891, 0xbe65_5b53_7f66_f700, 0x2ed3_611b_725b_8a70, ]), Fp::from_raw([ 0xd833_9ea7_1208_58aa, 0xadfd_eb9c_fdd3_47b5, 0xc8ec_c3d7_22aa_2e0e, 0x1f9b_a4e8_bab7_ce42, ]), ], [ Fp::from_raw([ 0xb740_56f8_65c5_d3da, 0xa38e_82ac_4502_066d, 0x8f7e_e907_a84e_518a, 0x1b23_3043_052e_8c28, ]), Fp::from_raw([ 0xca2f_97b0_2087_5954, 0x9020_53bf_c0f1_4db0, 0x7403_1ab7_2bd5_5b4c, 0x2431_e1cc_164b_b8d0, ]), ], [ Fp::from_raw([ 0xa791_f273_9658_01fd, 0xa
13e_3220_9758_3319, 0x30cd_6953_a0a7_db45, 0x082f_934c_91f5_aac3, ]), Fp::from_raw([ 0x9ad6_bb93_0c48_997c, 0xc772_45e2_ae7c_be99, 0xa34b_e074_3155_42a3, 0x2b9a_0a22_3e75_38b0, ]), ], [ Fp::from_raw([ 0xb0b5_89cc_7021_4e7d, 0x8164_163e_75a8_a00e, 0xceb8_5483_b887_a9be, 0x0e1c_d91e_dd2c_fa2c, ]), Fp::from_raw([ 0x88d3_2460_1ceb_e2f9, 0x9977_4f19_854d_00f5, 0xc951_f614_77e3_6989, 0x2e1e_ac0f_2bfd_fd63, ]), ], [ Fp::from_raw([ 0x23d7_4811_5b50_0b83, 0x7345_784d_8efd_b33c, 0x0c76_158e_769d_6d15, 0x0cbf_a95f_37fb_7406, ]), Fp::from_raw([ 0x980c_232d_fa4a_4f84, 0x76d9_91e3_a775_13d9, 0xd65a_d49d_8a61_e9a6, 0x08f0_5b3b_e923_ed44, ]), ], [ Fp::from_raw([ 0x25a2_dd51_0c04_7ef6, 0xe728_4925_dc07_58a3, 0x52bf_8e21_984d_0443, 0x2271_9e2a_070b_cd08, ]), Fp::from_raw([ 0xf41f_62b2_f268_30c0, 0x7bdb_f036_1199_82c0, 0xc060_f7fc_c3a1_ab4c, 0x041f_596a_9ee1_cb2b, ]), ], [ Fp::from_raw([ 0x19fc_dd09_86b1_0f89, 0x021b_e1c2_d0dc_464a, 0x8762_8eb0_6f6b_1d4c, 0x233f_d35d_e1be_520a, ]), Fp::from_raw([ 0xefcb_453c_61c9_c267, 0xd31e_078a_a1b4_707e, 0x4325_e0a4_23eb_c810, 0x0524_b46d_1aa8_7a5e, ]), ], [ Fp::from_raw([ 0xcc44_8623_7c51_5211, 0x4227_bb95_4b0f_3199, 0xce47_fcac_894b_8582, 0x2c34_f424_c81e_5716, ]), Fp::from_raw([ 0xf330_1032_7de4_915e, 0x2dd2_025b_5457_cc97, 0x207e_ffc2_b554_1fb7,
0x0b5f_2a4b_6338_7819, ]), ], [ Fp::from_raw([ 0xaefa_c41f_e05c_659f, 0xc174_35d2_f57a_f6ce, 0xc5b7_2fe4_39d2_cfd6, 0x2220_7856_082c_cc54, ]), Fp::from_raw([ 0x2785_4048_ce2c_8171, 0xcdfb_2101_94ca_f79f, 0x4e24_159b_7f89_50b5, 0x24d5_7a8b_f5da_63fe, ]), ], [ Fp::from_raw([ 0x7391_9bb2_3b79_396e, 0x374a_d709_7bb0_1a85, 0x3b37_1d75_bd69_3f98, 0x0afa_b181_fdd5_e058, ]), Fp::from_raw([ 0xf162_90d6_2b11_28ee, 0x76c0_0571_94c1_6c0b, 0x998a_52ef_ac7c_bd56, 0x2dba_9b10_8f20_8772, ]), ], [ Fp::from_raw([ 0x5aff_13e6_bce4_20b3, 0xcbb8_3de0_bd59_2b25, 0x56f8_81c7_88f5_3f83, 0x2634_9b66_edb8_b16f, ]), Fp::from_raw([ 0x2352_88a3_e6f1_37db, 0xd81a_56d2_8ecc_193b, 0x685e_95f9_2339_753a, 0x25af_7ce0_e5e1_0357, ]), ], [ Fp::from_raw([ 0x1f7c_0187_fe35_011f, 0x70ee_d7aa_e88b_2bff, 0xc094_d6a5_5edd_68b9, 0x25b4_ce7b_d229_4390, ]), Fp::from_raw([ 0x8cb9_d54c_1e02_b631, 0xde9c_ef28_ebdf_30b1, 0x387e_53f1_908a_88e5, 0x22c5_43f1_0f6c_89ec, ]), ], [ Fp::from_raw([ 0xdf66_8e74_882f_87a9, 0x425e_906a_919d_7a34, 0x4fc7_908a_9f19_1e1e, 0x0236_f93e_7789_c472, ]), Fp::from_raw([ 0x9cb4_97af_980c_4b52, 0x652b_dae1_14eb_0165, 0x0e7d_27e3_7d05_da99, 0x2935_0b40_1166_ca01, ]), ], [ Fp::from_raw([ 0xee12_6091_6652_363f, 0x65ed_b75d_844e_bb89, 0x6bd3_1bba_b547_f75a, 0x0eed_787d_6582_0d3f, ]),
Fp::from_raw([ 0x1906_f656_f4de_6fad, 0xfdcd_0e99_bd94_297d, 0x036a_753f_520b_3291, 0x07cc_1170_f13b_46f2, ]), ], [ Fp::from_raw([ 0x2059_4356_89e8_acea, 0x9087_86d7_f9f5_d10c, 0xf49b_cf61_3a3d_30b1, 0x22b9_3923_3b1d_7205, ]), Fp::from_raw([ 0xadd6_50ac_e60a_e5a6, 0x740f_083a_5aa8_5438, 0x8aad_1dc8_bc33_e870, 0x0145_1762_a0aa_b81c, ]), ], [ Fp::from_raw([ 0xe704_fec0_892f_ce89, 0xe32e_aa61_dec7_da57, 0x61fa_bf10_25d4_6d1f, 0x2350_6bb5_d872_7d44, ]), Fp::from_raw([ 0x7f8b_d689_0735_5522, 0x2a37_0953_1e1e_fea9, 0xbac0_6ae3_f71b_dd09, 0x2e48_4c44_e838_aea0, ]), ], [ Fp::from_raw([ 0x4541_8da2_6835_b54c, 0xaf4a_5945_45ce_dc25, 0x379e_78c5_0bd2_e42b, 0x0f4b_c7d0_7eba_fd64, ]), Fp::from_raw([ 0xe620_996d_50d8_e74e, 0x5158_2388_725d_f460, 0xfa76_6378_62fa_aee8, 0x1f4d_3c8f_6583_e9e5, ]), ], [ Fp::from_raw([ 0x53eb_9bcb_48fe_7389, 0xfae0_2abc_7b68_1d91, 0x2660_d07b_e0e4_a988, 0x0935_14e0_c707_11f8, ]), Fp::from_raw([ 0x4a58_e0a3_47e1_53d8, 0x43ee_83ec_e472_28f2, 0x4669_9a2b_5f3b_c036, 0x1ada_b0c8_e2b3_bad3, ]), ], [ Fp::from_raw([ 0x1a22_dbef_9e80_dad2, 0x378c_1b94_b807_2bac, 0xd147_09eb_b474_641a, 0x1672_b172_6057_d99d, ]), Fp::from_raw([ 0x30d4_7b23_9b47_9c14, 0xc5d8_e2fa_e0ac_c4ee, 0x8f44_f53f_dcab_468c, 0x1dfd_53d4_576a_f2e3, ]), ], [ Fp::from_raw([ 0xbc7f_2077
_5320_5c60, 0xe6d7_7d64_0f6f_c3de, 0xa70a_3626_3a37_e17f, 0x0c68_88a1_0b75_b0f3, ]), Fp::from_raw([ 0x8509_1ecc_a9d1_e508, 0x611a_61e0_0ee6_848b, 0x92b3_4a7e_77d1_2fe8, 0x1add_b933_a65b_e770, ]), ], [ Fp::from_raw([ 0x7935_628e_299d_1791, 0xf638_ff54_25f0_afff, 0x5c10_ae18_d1de_933c, 0x00d7_540d_cd26_8a84, ]), Fp::from_raw([ 0xd316_939d_20b8_2c0e, 0x26fe_dde4_acd9_9db1, 0x01b2_827a_5664_ca9c, 0x140c_0e42_687e_9ead, ]), ], [ Fp::from_raw([ 0xc091_e2ae_5656_5984, 0xc20a_0f9b_24f8_c5ed, 0x91ba_89b8_d13d_1806, 0x2f0c_3a11_5d43_17d1, ]), Fp::from_raw([ 0xd8c5_38a1_dc95_8c61, 0x08a0_cff6_70b2_2b82, 0x3006_ed22_0cf9_c810, 0x0c4e_e778_ff7c_1455, ]), ], [ Fp::from_raw([ 0x27c3_d748_5de7_4c69, 0x9424_ed26_c0ac_c662, 0x3693_f004_40cc_c360, 0x1704_f276_6d46_f82c, ]), Fp::from_raw([ 0x39b6_6fe9_009c_3cfa, 0xf076_9c9f_8544_e402, 0xa7a0_2c1b_51d2_44ab, 0x2f2d_19cc_3ea5_d78e, ]), ], [ Fp::from_raw([ 0xd6c7_66a8_06fc_6629, 0xdd7e_e6cb_9cfe_d9c7, 0x5053_f112_e2a8_e8dc, 0x1ae0_3853_b75f_caba, ]), Fp::from_raw([ 0x4e41_a86d_daf0_56d5, 0x3556_921b_2d6f_014e, 0x51d1_31d0_fa61_aa5f, 0x0971_aabf_7952_41df, ]), ], [ Fp::from_raw([ 0x5f5c_29f7_bfe2_f646, 0xda62_4f83_80df_1c87, 0x91d4_cf6b_6e0d_e73e, 0x1408_c316_e601_4e1a, ]), Fp::from_raw([ 0x4169_1f39_822e_f5bd, 0x6c89_f1f7_73ef_2853,
0x248a_be42_b543_093b, 0x1667_f3fe_2edb_e850, ]), ], [ Fp::from_raw([ 0x424c_6957_6500_fe37, 0x5b81_7184_09e5_c133, 0xa48b_0a03_557c_df91, 0x13bf_7c5d_0d2c_4376, ]), Fp::from_raw([ 0x19bc_0ba7_43a6_2c2c, 0x024b_9534_7856_b797, 0x3016_adf3_d353_3c24, 0x0762_0a6d_fb0b_6cec, ]), ], [ Fp::from_raw([ 0x1675_de3e_1982_b4d0, 0x75d2_959e_2f32_2b73, 0x36a8_ca08_bdbd_d8b0, 0x1574_c7ef_0c43_545f, ]), Fp::from_raw([ 0xc06e_03a7_ff83_78f0, 0x5bd4_1845_71c2_54fd, 0xfd56_7970_a717_ceec, 0x269e_4b5b_7a2e_b21a, ]), ], ]; pub(crate) const MDS: [[Fp; 2]; 2] = [ [ Fp::from_raw([ 0xbcec_a70b_d2af_7ad5, 0xaf07_f38a_f8c9_52a7, 0xec10_3453_51a2_3a3a, 0x066f_6f85_d6f6_8a85, ]), Fp::from_raw([ 0x0546_2b9f_8125_b1e8, 0x20a7_c02b_bd8b_ea73, 0x7782_e150_9b1d_0fdb, 0x2b9d_4b41_10c9_ae99, ]), ], [ Fp::from_raw([ 0xf573_f431_221f_8ff9, 0xb6c0_9d55_7013_fff1, 0x2bf6_7a44_93cc_262f, 0x0cc5_7cdb_b085_07d6, ]), Fp::from_raw([ 0x21bc_d147_9432_03c8, 0xade8_57e8_6eb5_c3a1, 0xa31a_6ed6_9724_e1ad, 0x1274_e649_a32e_d355, ]), ], ]; pub(crate) const MDS_INV: [[Fp; 2]; 2] = [ [ Fp::from_raw([ 0x8dbe_bd0f_a8c5_3e66, 0x0554_569d_9b29_d1ea, 0x7081_9ab1_c784_6f21, 0x13ab_ec39_0ada_7f43, ]), Fp::from_raw([ 0xaaf6_185b_1a1e_60fe, 0xbd52_1ead_5dfe_0345, 0x4c98_62a1_d97d_1510, 0x1eb9_e1dc_19a3_3a62, ]), ], [ Fp::from_ra
w([ 0x763f_7875_036b_cb02, 0x8ce5_1690_30a2_ad69, 0x601a_bc49_fdad_4f03, 0x0fc1_c939_4db8_9bb2, ]), Fp::from_raw([ 0x8abc_ed6b_d147_c8be, 0x2b7e_ac34_3459_61bc, 0x9502_054e_dc03_e7b2, 0x16a9_e98c_493a_902b, ]), ], ];
//! This file was generated by running generate_params.py //! Specification for rate 5 Poseidon using the BN256 curve. //! Patterned after [halo2_gadgets::poseidon::primitives::P128Pow5T3] use halo2_gadgets::poseidon::primitives::*; use halo2_proofs::arithmetic::Field; use halo2_proofs::halo2curves::bn256::Fr as Fp; use super::poseidon_params; /// The specification for the Poseidon hash function. #[derive(Debug, Clone, Copy)] pub struct PoseidonSpec; /// Basically the number of columns allocated within the poseidon chip pub const POSEIDON_WIDTH: usize = 2; /// The number of full SBox rounds pub const POSEIDON_RATE: usize = 1; pub(crate) type Mds<Fp, const T: usize> = [[Fp; T]; T]; impl Spec<Fp, POSEIDON_WIDTH, POSEIDON_RATE> for PoseidonSpec { fn full_rounds() -> usize { 8 } fn partial_rounds() -> usize { 56 } fn sbox(val: Fp) -> Fp { val.pow_vartime([5]) } fn secure_mds() -> usize { unimplemented!() } fn constants() -> ( Vec<[Fp; POSEIDON_WIDTH]>, Mds<Fp, POSEIDON_WIDTH>, Mds<Fp, POSEIDON_WIDTH>, ) { ( poseidon_params::ROUND_CONSTANTS[..].to_vec(), poseidon_params::MDS, poseidon_params::MDS_INV, ) } }
use crate::tensor::TensorType; use std::{ fmt, ops::{Add, Mul, Neg, Sub}, }; pub enum BaseOp { Dot, DotInit, CumProdInit, CumProd, Add, Mult, Sub, SumInit, Sum, IsBoolean, } impl BaseOp { pub fn nonaccum_f< T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>, >( &self, inputs: (T, T), ) -> T { let (a, b) = inputs; match &self { BaseOp::Add => a + b, BaseOp::Sub => a - b, BaseOp::Mult => a * b, BaseOp::IsBoolean => b, _ => panic!("nonaccum_f called on accumulating operation"), } } pub fn accum_f< T: TensorType + Add<Output = T> + Sub<Output = T> + Mul<Output = T> + Neg<Output = T>, >( &self, prev_output: T, a: Vec<T>, b: Vec<T>, ) -> T { let zero = T::zero().unwrap(); let one = T::one().unwrap(); match &self { BaseOp::DotInit => a.into_iter().zip(b).fold(zero, |acc, (a, b)| acc + a * b), BaseOp::Dot => prev_output + a.into_iter().zip(b).fold(zero, |acc, (a, b)| acc + a * b), BaseOp::CumProdInit => b.into_iter().fold(one, |acc, b| acc * b), BaseOp::CumProd => prev_output * b.into_iter().fold(one, |acc, b| acc * b), BaseOp::SumInit => b.into_iter().fold(zero, |acc, b| acc + b), BaseOp::Sum => prev_output + b.into_iter().fold(zero, |acc, b| acc + b), _ => panic!("accum_f called on non-accumulating operation"), } } pub fn as_str(&self) -> &'static str { match self { BaseOp::Dot => "DOT", BaseOp::DotInit => "DOTINIT", BaseOp::CumProdInit => "CUMPRODINIT", BaseOp::CumProd => "CUMPROD", BaseOp::Add => "ADD", BaseOp::Sub => "SUB", BaseOp::Mult => "MULT", BaseOp::Sum => "SUM", BaseOp::SumInit => "SUMINIT",
BaseOp::IsBoolean => "ISBOOLEAN", } } pub fn query_offset_rng(&self) -> (i32, usize) { match self { BaseOp::DotInit => (0, 1), BaseOp::Dot => (-1, 2), BaseOp::CumProd => (-1, 2), BaseOp::CumProdInit => (0, 1), BaseOp::Add => (0, 1), BaseOp::Sub => (0, 1), BaseOp::Mult => (0, 1), BaseOp::Sum => (-1, 2), BaseOp::SumInit => (0, 1), BaseOp::IsBoolean => (0, 1), } } pub fn num_inputs(&self) -> usize { match self { BaseOp::DotInit => 2, BaseOp::Dot => 2, BaseOp::CumProdInit => 1, BaseOp::CumProd => 1, BaseOp::Add => 2, BaseOp::Sub => 2, BaseOp::Mult => 2, BaseOp::Sum => 1, BaseOp::SumInit => 1, BaseOp::IsBoolean => 0, } } pub fn constraint_idx(&self) -> usize { match self { BaseOp::DotInit => 0, BaseOp::Dot => 1, BaseOp::Add => 0, BaseOp::Sub => 0, BaseOp::Mult => 0, BaseOp::Sum => 1, BaseOp::SumInit => 0, BaseOp::CumProd => 1, BaseOp::CumProdInit => 0, BaseOp::IsBoolean => 0, } } } impl fmt::Display for BaseOp { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "{}", self.as_str()) } }
use std::str::FromStr; use thiserror::Error; use halo2_proofs::{ circuit::Layouter, plonk::{ConstraintSystem, Constraints, Expression, Selector}, poly::Rotation, }; use log::debug; use pyo3::{ conversion::{FromPyObject, PyTryFrom}, exceptions::PyValueError, prelude::*, types::PyString, }; use serde::{Deserialize, Serialize}; use tosubcommand::ToFlags; use crate::{ circuit::{ ops::base::BaseOp, table::{Range, RangeCheck, Table}, utils, }, tensor::{Tensor, TensorType, ValTensor, VarTensor}, }; use std::{collections::BTreeMap, error::Error, marker::PhantomData}; use super::{lookup::LookupOp, region::RegionCtx, Op}; use halo2curves::ff::{Field, PrimeField}; pub enum CircuitError { DimMismatch(String), LookupInstantiation, TableAlreadyAssigned, UnsupportedOp, InvalidEinsum, } Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, Default, Copy, )] pub enum CheckMode { SAFE, UNSAFE, } impl std::fmt::Display for CheckMode { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { CheckMode::SAFE => write!(f, "safe"), CheckMode::UNSAFE => write!(f, "unsafe"), } } } impl ToFlags for CheckMode { fn to_flags(&self) -> Vec<String> { vec![format!("{}", self)] } } impl From<String> for CheckMode { fn from(value: String) -> Self { match value.to_lowercase().as_str() { "safe" => CheckMode::SAFE, "unsafe" => CheckMode::UNSAFE, _ => { log::error!("Invalid value for CheckMode"); log::warn!("defaulting to SAFE"); CheckMode::SAFE } } } } pub
struct Tolerance { pub val: f32, pub scale: utils::F32, } impl std::fmt::Display for Tolerance { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:.2}", self.val) } } impl ToFlags for Tolerance { fn to_flags(&self) -> Vec<String> { vec![format!("{}", self)] } } impl FromStr for Tolerance { type Err = String; fn from_str(s: &str) -> Result<Self, Self::Err> { if let Ok(val) = s.parse::<f32>() { Ok(Tolerance { val, scale: utils::F32(1.0), }) } else { Err( "Invalid tolerance value provided. It should expressed as a percentage (f32)." .to_string(), ) } } } impl From<f32> for Tolerance { fn from(value: f32) -> Self { Tolerance { val: value, scale: utils::F32(1.0), } } } impl IntoPy<PyObject> for CheckMode { fn into_py(self, py: Python) -> PyObject { match self { CheckMode::SAFE => "safe".to_object(py), CheckMode::UNSAFE => "unsafe".to_object(py), } } } impl<'source> FromPyObject<'source> for CheckMode { fn extract(ob: &'source PyAny) -> PyResult<Self> { let trystr = <PyString as PyTryFrom>::try_from(ob)?; let strval = trystr.to_string(); match strval.to_lowercase().as_str() { "safe" => Ok(CheckMode::SAFE), "unsafe" => Ok(CheckMode::UNSAFE), _ => Err(PyValueError::new_err("Invalid value for CheckMode")), } } } impl IntoPy<PyObject> for Tolerance { fn into_py(self, py: Python) -> PyObject { (self.val, self.scale.0).to_object(py) } } impl<'source> FromPyObject<'source> for Tolerance { fn extract(ob: &'source PyAny) -> PyResult<Self> { if let Ok((val, scale)) = ob.extract::<(f32, f32)>() { Ok(Tolerance { val, scale: utils::F32(scale),
}) } else { Err(PyValueError::new_err("Invalid tolerance value provided. ")) } } } pub
struct DynamicLookups { pub lookup_selectors: BTreeMap<(usize, usize), Selector>, pub table_selectors: Vec<Selector>, pub inputs: Vec<VarTensor>, pub tables: Vec<VarTensor>, } impl DynamicLookups { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { let dummy_var = VarTensor::dummy(col_size, num_inner_cols); let single_col_dummy_var = VarTensor::dummy(col_size, 1); Self { lookup_selectors: BTreeMap::new(), table_selectors: vec![], inputs: vec![dummy_var.clone(), dummy_var.clone(), dummy_var.clone()], tables: vec![ single_col_dummy_var.clone(), single_col_dummy_var.clone(), single_col_dummy_var.clone(), ], } } } pub
struct Shuffles { pub input_selectors: BTreeMap<(usize, usize), Selector>, pub reference_selectors: Vec<Selector>, pub inputs: Vec<VarTensor>, pub references: Vec<VarTensor>, } impl Shuffles { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { let dummy_var = VarTensor::dummy(col_size, num_inner_cols); let single_col_dummy_var = VarTensor::dummy(col_size, 1); Self { input_selectors: BTreeMap::new(), reference_selectors: vec![], inputs: vec![dummy_var.clone(), dummy_var.clone()], references: vec![single_col_dummy_var.clone(), single_col_dummy_var.clone()], } } } pub struct StaticLookups<F: PrimeField + TensorType + PartialOrd> { pub selectors: BTreeMap<(LookupOp, usize, usize), Selector>, pub tables: BTreeMap<LookupOp, Table<F>>, pub index: VarTensor, pub output: VarTensor, pub input: VarTensor, } impl<F: PrimeField + TensorType + PartialOrd> StaticLookups<F> { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { let dummy_var = VarTensor::dummy(col_size, num_inner_cols); Self { selectors: BTreeMap::new(), tables: BTreeMap::new(), index: dummy_var.clone(), output: dummy_var.clone(), input: dummy_var, } } } pub
struct CustomGates { pub inputs: Vec<VarTensor>, pub output: VarTensor, pub selectors: BTreeMap<(BaseOp, usize, usize), Selector>, } impl CustomGates { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { let dummy_var = VarTensor::dummy(col_size, num_inner_cols); Self { inputs: vec![dummy_var.clone(), dummy_var.clone()], output: dummy_var, selectors: BTreeMap::new(), } } } pub struct RangeChecks<F: PrimeField + TensorType + PartialOrd> { pub selectors: BTreeMap<(Range, usize, usize), Selector>, pub ranges: BTreeMap<Range, RangeCheck<F>>, pub index: VarTensor, pub input: VarTensor, } impl<F: PrimeField + TensorType + PartialOrd> RangeChecks<F> { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { let dummy_var = VarTensor::dummy(col_size, num_inner_cols); Self { selectors: BTreeMap::new(), ranges: BTreeMap::new(), index: dummy_var.clone(), input: dummy_var, } } } pub struct BaseConfig<F: PrimeField + TensorType + PartialOrd> { pub custom_gates: CustomGates, pub static_lookups: StaticLookups<F>, pub dynamic_lookups: DynamicLookups, pub range_checks: RangeChecks<F>, pub shuffles: Shuffles, pub check_mode: CheckMode, _marker: PhantomData<F>, } impl<F: PrimeField + TensorType + PartialOrd + std::hash::Hash> BaseConfig<F> { pub fn dummy(col_size: usize, num_inner_cols: usize) -> Self { Self { custom_gates: CustomGates::dummy(col_size, num_inner_cols), static_lookups: StaticLookups::dummy(col_size, num_inner_cols), dynamic_lookups: DynamicLookups::dummy(col_size, num_inner_cols), shuffles: Shuffles::dummy(col_size, num_inner_cols), range_checks: RangeChecks::dummy(col_size, num_inner_cols), check_mode: CheckMode::SAFE,
_marker: PhantomData, } } pub fn configure( meta: &mut ConstraintSystem<F>, inputs: &[VarTensor; 2], output: &VarTensor, check_mode: CheckMode, ) -> Self { let mut nonaccum_selectors = BTreeMap::new(); let mut accum_selectors = BTreeMap::new(); if inputs[0].num_cols() != inputs[1].num_cols() { log::warn!("input shapes do not match"); } if inputs[0].num_cols() != output.num_cols() { log::warn!("input and output shapes do not match"); } for i in 0..output.num_blocks() { for j in 0..output.num_inner_cols() { nonaccum_selectors.insert((BaseOp::Add, i, j), meta.selector()); nonaccum_selectors.insert((BaseOp::Sub, i, j), meta.selector()); nonaccum_selectors.insert((BaseOp::Mult, i, j), meta.selector()); nonaccum_selectors.insert((BaseOp::IsBoolean, i, j), meta.selector()); } } for i in 0..output.num_blocks() { accum_selectors.insert((BaseOp::DotInit, i, 0), meta.selector()); accum_selectors.insert((BaseOp::Dot, i, 0), meta.selector()); accum_selectors.insert((BaseOp::CumProd, i, 0), meta.selector()); accum_selectors.insert((BaseOp::CumProdInit, i, 0), meta.selector()); accum_selectors.insert((BaseOp::Sum, i, 0), meta.selector()); accum_selectors.insert((BaseOp::SumInit, i, 0), meta.selector()); } for ((base_op, block_idx, inner_col_idx), selector) in nonaccum_selectors.iter() { meta.create_gate(base_op.as_str(), |meta| { let selector = meta.query_selector(*selector); let zero = Expression::<F>::Constant(F::ZERO); let mut qis = vec![zero; 2]; for (i, q_i) in qis .iter_mut() .enumerate() .take(2) .skip(
2 - base_op.num_inputs()) { *q_i = inputs[i] .query_rng(meta, *block_idx, *inner_col_idx, 0, 1) .expect("non accum: input query failed")[0] .clone() } let (rotation_offset, rng) = base_op.query_offset_rng(); let constraints = match base_op { BaseOp::IsBoolean => { let expected_output: Tensor<Expression<F>> = output .query_rng(meta, *block_idx, *inner_col_idx, 0, 1) .expect("non accum: output query failed"); let output = expected_output[base_op.constraint_idx()].clone(); vec![(output.clone()) * (output.clone() - Expression::Constant(F::from(1)))] } _ => { let expected_output: Tensor<Expression<F>> = output .query_rng(meta, *block_idx, *inner_col_idx, rotation_offset, rng) .expect("non accum: output query failed"); let res = base_op.nonaccum_f((qis[0].clone(), qis[1].clone())); vec![expected_output[base_op.constraint_idx()].clone() - res] } }; Constraints::with_selector(selector, constraints) }); } for ((base_op, block_idx, _), selector) in accum_selectors.iter() { meta.create_gate(base_op.as_str(), |meta| { let selector = meta.query_selector(*selector); let mut qis = vec![vec![]; 2]; for (i, q_i) in qis .iter_mut() .enumerate() .take(2) .skip(2 - base_op.num_inputs()) { *q_i = inputs[i] .query_whole_block(meta, *block_idx, 0, 1)
.expect("accum: input query failed") .into_iter() .collect() } let (rotation_offset, rng) = base_op.query_offset_rng(); let expected_output: Tensor<Expression<F>> = output .query_rng(meta, *block_idx, 0, rotation_offset, rng) .expect("accum: output query failed"); let res = base_op.accum_f(expected_output[0].clone(), qis[0].clone(), qis[1].clone()); let constraints = vec![expected_output[base_op.constraint_idx()].clone() - res]; Constraints::with_selector(selector, constraints) }); } let selectors = nonaccum_selectors .into_iter() .chain(accum_selectors) .collect(); Self { custom_gates: CustomGates { inputs: inputs.to_vec(), output: output.clone(), selectors, }, static_lookups: StaticLookups::default(), dynamic_lookups: DynamicLookups::default(), shuffles: Shuffles::default(), range_checks: RangeChecks::default(), check_mode, _marker: PhantomData, } } pub fn configure_lookup( &mut self, cs: &mut ConstraintSystem<F>, input: &VarTensor, output: &VarTensor, index: &VarTensor, lookup_range: Range, logrows: usize, nl: &LookupOp, ) -> Result<(), Box<dyn Error>> where F: Field, { if !index.is_advice() { return Err("wrong input type for lookup index".into()); } if !input.is_advice() { return Err("wrong input type for lookup input".into()); } if !output.is_advice() { return Err("wrong input type for lookup output".into()); } let table = if !self.static_lookups.tables.c
ontains_key(nl) { let table = if let Some(table) = self.static_lookups.tables.values().next() { Table::<F>::configure( cs, lookup_range, logrows, nl, Some(table.table_inputs.clone()), ) } else { Table::<F>::configure(cs, lookup_range, logrows, nl, None) }; self.static_lookups.tables.insert(nl.clone(), table.clone()); table } else { return Ok(()); }; for x in 0..input.num_blocks() { for y in 0..input.num_inner_cols() { let len = table.selector_constructor.degree; let multi_col_selector = cs.complex_selector(); for ((col_idx, input_col), output_col) in table .table_inputs .iter() .enumerate() .zip(table.table_outputs.iter()) { cs.lookup("", |cs| { let mut res = vec![]; let sel = cs.query_selector(multi_col_selector); let synthetic_sel = match len { 1 => Expression::Constant(F::from(1)), _ => match index { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }, }; let input_query = match &input { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }; let output_query = match &
output { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0)) } _ => unreachable!(), }; let col_expr = sel.clone() * table .selector_constructor .get_expr_at_idx(col_idx, synthetic_sel); let multiplier = table.selector_constructor.get_selector_val_at_idx(col_idx); let not_expr = Expression::Constant(multiplier) - col_expr.clone(); let (default_x, default_y) = table.get_first_element(col_idx); log::trace!("---------------- col {:?} ------------------", col_idx,); log::trace!("expr: {:?}", col_expr,); log::trace!("multiplier: {:?}", multiplier); log::trace!("not_expr: {:?}", not_expr); log::trace!("default x: {:?}", default_x); log::trace!("default y: {:?}", default_y); res.extend([ ( col_expr.clone() * input_query.clone() + not_expr.clone() * Expression::Constant(default_x), *input_col, ), ( col_expr.clone() * output_query.clone() + not_expr.clone() * Expression::Constant(default_y), *output_col, ), ]); res }); } self.static_lookups .selectors .insert((nl.clone(),
x, y), multi_col_selector); } } if let VarTensor::Empty = self.static_lookups.input { debug!("assigning lookup input"); self.static_lookups.input = input.clone(); } if let VarTensor::Empty = self.static_lookups.output { debug!("assigning lookup output"); self.static_lookups.output = output.clone(); } if let VarTensor::Empty = self.static_lookups.index { debug!("assigning lookup index"); self.static_lookups.index = index.clone(); } Ok(()) } pub fn configure_dynamic_lookup( &mut self, cs: &mut ConstraintSystem<F>, lookups: &[VarTensor; 3], tables: &[VarTensor; 3], ) -> Result<(), Box<dyn Error>> where F: Field, { for l in lookups.iter() { if !l.is_advice() { return Err("wrong input type for dynamic lookup".into()); } } for t in tables.iter() { if !t.is_advice() || t.num_blocks() > 1 || t.num_inner_cols() > 1 { return Err("wrong table type for dynamic lookup".into()); } } let one = Expression::Constant(F::ONE); let s_ltable = cs.complex_selector(); for x in 0..lookups[0].num_blocks() { for y in 0..lookups[0].num_inner_cols() { let s_lookup = cs.complex_selector(); cs.lookup_any("lookup", |cs| { let s_lookupq = cs.query_selector(s_lookup); let mut expression = vec![]; let s_ltableq = cs.query_selector(s_ltable); let mut lookup_queries = vec![one.clone()]; for lookup in lookups { lookup_queries.push(match lookup { VarTensor::Advice { inner: advices, .. } => { cs.query_advice(advices[x][y], Rotation(0))