text
stringlengths 1
2.05k
|
---|
const poseidonGenContract = require("./poseidon_gencontract");
if (process.argv.length != 3) {
console.log("Usage: node poseidon_gencontract.js [numberOfInputs]");
process.exit(1);
}
const nInputs = Number(process.argv[2]);
console.log(nInputs);
console.log(poseidonGenContract.createCode(nInputs));
|
const Poseidon = require("./poseidon.js");
const M = Poseidon.getMatrix();
let S = "[\n ";
for (let i=0; i<M.length; i++) {
const LC = M[i];
S = S + "[\n";
for (let j=0; j<LC.length; j++) {
S = S + " " + M[i][j].toString();
if (j<LC.length-1) S = S + ",";
S = S + "\n";
}
S = S + " ]";
if (i<M.length-1) S = S + ",";
}
S=S+ "\n]\n";
console.log(S);
|
const Scalar = require("ffjavascript").Scalar;
const SMTMemDB = require("./smt_memdb");
const {hash0, hash1, F} = require("./smt_hashes_poseidon"); |
class SMT {
constructor(db, root) {
this.db = db;
this.root = root;
}
_splitBits(_key) {
const res = Scalar.bits(_key);
while (res.length<256) res.push(false);
return res;
}
async update(_key, _newValue) {
const key = Scalar.e(_key);
const newValue = F.e(_newValue);
const resFind = await this.find(key);
const res = {};
res.oldRoot = this.root;
res.oldKey = key;
res.oldValue = resFind.foundValue;
res.newKey = key;
res.newValue = newValue;
res.siblings = resFind.siblings;
const ins = [];
const dels = [];
let rtOld = hash1(key, resFind.foundValue);
let rtNew = hash1(key, newValue);
ins.push([rtNew, [1, key, newValue ]]);
dels.push(rtOld);
const keyBits = this._splitBits(key);
for (let level = resFind.siblings.length-1; level >=0; level--) {
let oldNode, newNode;
const sibling = resFind.siblings[level];
if (keyBits[level]) {
oldNode = [sibling, rtOld];
newNode = [sibling, rtNew];
} else {
oldNode = [rtOld, sibling];
newNode = [rtNew, sibling];
}
rtOld = hash0(oldNode[0], oldNode[1]);
rtNew = hash0(newNode[0], newNode[1]);
dels.push(rtOld);
ins.push([rtNew, newNode]);
}
res.newRoot = rtNew;
await this.db.multiDel(dels);
await this.db.multiIns(ins);
await this.db.setRoot(rtNew);
this.root = rtNew;
return res;
}
async delete(_key) {
const key = Scalar.e(_key);
const resFind = await this.find(key);
if (!resFind.found) throw new Error("Key does not exists");
const res = {
siblings: [],
delKey: key,
delValue: resFind.foundValue
};
const dels = [];
const ins = [];
let rtOld = h |
ash1(key, resFind.foundValue);
let rtNew;
dels.push(rtOld);
let mixed;
if (resFind.siblings.length > 0) {
const record = await this.db.get(resFind.siblings[resFind.siblings.length - 1]);
if ((record.length == 3)&&(F.eq(record[0], F.one))) {
mixed = false;
res.oldKey = record[1];
res.oldValue = record[2];
res.isOld0 = false;
rtNew = resFind.siblings[resFind.siblings.length - 1];
} else if (record.length == 2) {
mixed = true;
res.oldKey = key;
res.oldValue = F.zero;
res.isOld0 = true;
rtNew = F.zero;
} else {
throw new Error("Invalid node. Database corrupted");
}
} else {
rtNew = F.zero;
res.oldKey = key;
res.oldValue = F.zero;
res.isOld0 = true;
}
const keyBits = this._splitBits(key);
for (let level = resFind.siblings.length-1; level >=0; level--) {
let newSibling = resFind.siblings[level];
if ((level == resFind.siblings.length-1)&&(!res.isOld0)) {
newSibling = F.zero;
}
const oldSibling = resFind.siblings[level];
if (keyBits[level]) {
rtOld = hash0(oldSibling, rtOld);
} else {
rtOld = hash0(rtOld, oldSibling);
}
dels.push(rtOld);
if (!F.isZero(newSibling)) {
mixed = true;
}
if (mixed) {
res.siblings.unshift(resFind.siblings[level]);
let newNode;
if (keyBits[level]) {
newNode = [newSibling, rtNew];
} else {
newNode = [rtNew, newSibling];
}
rtNew = hash0(newNode[0], newNode[1]);
ins.push([rtNew, newNode]);
} |
}
await this.db.multiIns(ins);
await this.db.setRoot(rtNew);
this.root = rtNew;
await this.db.multiDel(dels);
res.newRoot = rtNew;
res.oldRoot = rtOld;
return res;
}
async insert(_key, _value) {
const key = Scalar.e(_key);
const value = F.e(_value);
let addedOne = false;
const res = {};
res.oldRoot = this.root;
const newKeyBits = this._splitBits(key);
let rtOld;
const resFind = await this.find(key);
if (resFind.found) throw new Error("Key already exists");
res.siblings = resFind.siblings;
let mixed;
if (!resFind.isOld0) {
const oldKeyits = this._splitBits(resFind.notFoundKey);
for (let i= res.siblings.length; oldKeyits[i] == newKeyBits[i]; i++) {
res.siblings.push(F.zero);
}
rtOld = hash1(resFind.notFoundKey, resFind.notFoundValue);
res.siblings.push(rtOld);
addedOne = true;
mixed = false;
} else if (res.siblings.length >0) {
mixed = true;
rtOld = F.zero;
}
const inserts = [];
const dels = [];
let rt = hash1(key, value);
inserts.push([rt,[1, key, value]] );
for (let i=res.siblings.length-1; i>=0; i--) {
if ((i<res.siblings.length-1)&&(!F.isZero(res.siblings[i]))) {
mixed = true;
}
if (mixed) {
const oldSibling = resFind.siblings[i];
if (newKeyBits[i]) {
rtOld = hash0(oldSibling, rtOld);
} else {
rtOld = hash0(rtOld, oldSibling);
}
dels.push(rtOld);
}
let newRt;
if (newKeyBits[i]) {
newRt = hash0(res.siblings[i], rt);
inserts.push([newRt,[res.siblings[i], rt]] );
} else {
newRt = hash0(rt, res.siblings[i |
]);
inserts.push([newRt,[rt, res.siblings[i]]] );
}
rt = newRt;
}
if (addedOne) res.siblings.pop();
while ((res.siblings.length>0) && (F.isZero(res.siblings[res.siblings.length-1]))) {
res.siblings.pop();
}
res.oldKey = resFind.notFoundKey;
res.oldValue = resFind.notFoundValue;
res.newRoot = rt;
res.isOld0 = resFind.isOld0;
await this.db.multiIns(inserts);
await this.db.setRoot(rt);
this.root = rt;
await this.db.multiDel(dels);
return res;
}
async find(key) {
const keyBits = this._splitBits(key);
return await this._find(key, keyBits, this.root, 0);
}
async _find(key, keyBits, root, level) {
if (typeof root === "undefined") root = this.root;
let res;
if (F.isZero(root)) {
res = {
found: false,
siblings: [],
notFoundKey: key,
notFoundValue: F.zero,
isOld0: true
};
return res;
}
const record = await this.db.get(root);
if ((record.length==3)&&(F.eq(record[0],F.one))) {
if (F.eq(record[1],key)) {
res = {
found: true,
siblings: [],
foundValue: record[2],
isOld0: false
};
} else {
res = {
found: false,
siblings: [],
notFoundKey: record[1],
notFoundValue: record[2],
isOld0: false
};
}
} else {
if (keyBits[level] == 0) {
res = await this._find(key, keyBits, record[0], level+1);
res.siblings.unshift(record[1]);
} else {
res = await this._find(key, keyBits, record[1], level+1);
res.siblings.unshift(re |
cord[0]);
}
}
return res;
}
}
async |
function loadFromFile(fileName) {
}
async |
function newMemEmptyTrie() {
const db = new SMTMemDB();
const rt = await db.getRoot();
const smt = new SMT(db, rt);
return smt;
}
module.exports.loadFromFile = loadFromFile;
module.exports.newMemEmptyTrie = newMemEmptyTrie;
module.exports.SMT = SMT;
module.exports.SMTMemDB = SMTMemDB; |
const mimc7 = require("./mimc7");
const bigInt = require("big-integer");
exports.hash0 = function (left, right) {
return mimc7.multiHash(left, right);
};
exports.hash1 = function(key, value) {
return mimc7.multiHash([key, value], bigInt.one);
};
exports.F = mimc7.F;
|
const ZqField = require("ffjavascript").ZqField;
const Scalar = require("ffjavascript").Scalar;
const poseidon = require("./poseidon");
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
exports.hash0 = function (left, right) {
return poseidon([left, right]);
};
exports.hash1 = function(key, value) {
return poseidon([key, value, F.one]);
};
exports.F = F;
|
const Scalar = require("ffjavascript").Scalar;
const ZqField = require("ffjavascript").ZqField;
// Prime 0x30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001
const F = new ZqField(Scalar.fromString("21888242871839275222246405745257275088548364400416034343698204186575808495617"));
class SMTMemDb {
constructor() {
this.nodes = {};
this.root = F.zero;
}
async getRoot() {
return this.root;
}
_key2str(k) {
// const keyS = bigInt(key).leInt2Buff(32).toString("hex");
const keyS = k.toString();
return keyS;
}
_normalize(n) {
for (let i=0; i<n.length; i++) {
n[i] = F.e(n[i]);
}
}
async get(key) {
const keyS = this._key2str(key);
return this.nodes[keyS];
}
async multiGet(keys) {
const promises = [];
for (let i=0; i<keys.length; i++) {
promises.push(this.get(keys[i]));
}
return await Promise.all(promises);
}
async setRoot(rt) {
this.root = rt;
}
async multiIns(inserts) {
for (let i=0; i<inserts.length; i++) {
const keyS = this._key2str(inserts[i][0]);
this._normalize(inserts[i][1]);
this.nodes[keyS] = inserts[i][1];
}
}
async multiDel(dels) {
for (let i=0; i<dels.length; i++) {
const keyS = this._key2str(dels[i]);
delete this.nodes[keyS];
}
}
}
module.exports = SMTMemDb;
|
use eyre::{bail, eyre, Result, WrapErr as _};
use std::{
env::{var, VarError},
fs,
path::Path,
process::Command,
};
use time::{format_description::well_known::Rfc3339, OffsetDateTime, UtcOffset};
fn main() -> Result<()> {
let commit = rerun_if_git_changes().unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
None
});
println!(
"cargo:rustc-env=COMMIT_SHA={}",
env_or_cmd("COMMIT_SHA", &["git", "rev-parse", "HEAD"]).unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
commit.unwrap_or_else(|| "0000000000000000000000000000000000000000".to_string())
})
);
let build_date = OffsetDateTime::now_utc();
let commit_date = env_or_cmd("COMMIT_DATE", &[
"git",
"log",
"-n1",
"--pretty=format:'%aI'",
])
.and_then(|str| Ok(OffsetDateTime::parse(str.trim_matches('\''), &Rfc3339)?))
.unwrap_or_else(|e| {
eprintln!("Warning: {}", e);
OffsetDateTime::UNIX_EPOCH
});
println!(
"cargo:rustc-env=COMMIT_DATE={}",
commit_date.to_offset(UtcOffset::UTC).date()
);
println!(
"cargo:rustc-env=BUILD_DATE={}",
build_date.to_offset(UtcOffset::UTC).date()
);
println!(
"cargo:rustc-env=TARGET={}",
var("TARGET").wrap_err("Fetching environment variable TARGET")?
);
Ok(())
}
fn env_or_cmd(env: &str, cmd: &[&str]) -> Result<String> {
match var(env) {
Ok(s) => return Ok(s),
Err(VarError::NotPresent) => (),
Err(e) => bail!(e),
};
let err = || {
format!(
"Variable {} is unset and command \"{}\" failed",
env,
cmd.join(" ")
)
};
let output = Command::new(cmd[0])
.args(&cmd[1..])
.output()
.with_context(err)?;
if output.status.success() {
Ok(String::from_utf8(output.stdout)?.trim().to_string())
} else {
bail!(err())
}
}
fn rerun_if_git_changes() -> Result<Option< |
String>> {
let git_head = Path::new(".git/HEAD");
if !git_head.exists() {
eprintln!("No .git/HEAD found, not rerunning on git change");
return Ok(None);
}
println!("cargo:rerun-if-changed=.git/HEAD");
let contents = fs::read_to_string(git_head).wrap_err("Error reading .git/HEAD")?;
let head_ref = contents.split(": ").collect::<Vec<_>>();
let commit = if head_ref.len() == 2 && head_ref[0] == "ref" {
let ref_path = Path::new(".git").join(head_ref[1].trim());
let ref_path_str = ref_path
.to_str()
.ok_or_else(|| eyre!("Could not convert ref path {:?} to string", ref_path))?;
println!("cargo:rerun-if-changed={}", ref_path_str);
fs::read_to_string(&ref_path).with_context(|| format!("Error reading {}", ref_path_str))?
} else {
contents
};
Ok(Some(commit))
} |
use neural_zkp as lib;
fn main() {
let mut criterion = criterion::Criterion::default().configure_from_args();
lib::bench::group(&mut criterion);
criterion.final_summary();
}
|
import timeit |
import numpy as np
from vanilla_cnn |
import *
if __name__ == "__main__":
np.random.seed(12345)
x = np.random.randint(low=-5, high=5, size=(120, 80, 3))
f = np.random.randint(low=-10, high=+10, size=(32, 5, 5, 3))
k = np.random.randint(low=-10, high=+10, size=(32,5,5,32))
weights1 = np.random.randint(low=-10, high=+10, size=(1000, 14688))
biases1 = np.random.randint(low=-10, high=+10, size=(1000))
weights2 = np.random.randint(low=-10, high=+10, size=(5, 1000))
biases2 = np.random.randint(low=-10, high=+10, size=(5))
times = []
runs = 100
for _ in range(runs):
starttime = timeit.default_timer()
x, n_params, n_multiplications, name = conv_layer(x, f)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
x, n_params, n_multiplications, name = relu_layer(x)
x, n_params, n_multiplications, name = conv_layer(x, k)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
x, n_params, n_multiplications, name = relu_layer(x)
x, n_params, n_multiplications, name = flatten_layer(x)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights1, biases1)
x, n_params, n_multiplications, name = relu_layer(x)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights2, biases2)
x, n_params, n_multiplications, name = normalize(x)
times.append(timeit.default_timer() - starttime)
np.random.seed(12345)
x = np.random.randint(low=-5, high=5, size=(120, 80, 3))
average = sum(times) / len(times)
print(f'The average time is {average} seconds for {runs} runs') |
import json
from json |
import JSONEncoder |
import numpy as np
from enum |
import Enum |
import re |
class Encoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, Layer):
return obj.value[0]
return JSONEncoder.default(self, obj)
def conv_layer(input, f):
"""
Evaluate the output of a convolutional layer using the filter f.
input.shape = (h, w, c)
f.shape = (c_out, hf, wf, c_in)
"""
h, w, c = input.shape
c_out, hf, wf, c_in = f.shape
assert c == c_in, "Input channels must match!"
assert hf%2 == 1, "Height of the filter (f.shape[1]) must be an uneven number!"
assert wf%2 == 1, "Width of the filter (f.shape[2]) must be an uneven number!"
dh = hf
dw = wf
output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))
for i in range(dh, h-dh):
for j in range(dw, w-dw):
a = input[i-dh:i+dh+1, j-dw:j+dw+1]
for k in range(c_out):
b = f[k,:,:,:]
output[i-dh, j-dw, k] = (a*b).sum()
n_params = f.size
n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)
name = f"conv {'x'.join([str(e) for e in f.shape])}"
return output, n_params, n_multiplications, name
def relu_layer(input):
output = input.copy()
output[output<0] = 0
n_params = 0
n_multiplications = input.size
return output, n_params, n_multiplications, "relu"
def max_pooling_layer(input, s):
"""
Apply max pooling layer using a sxs patch.
"""
h, w, c = input.shape
assert h%s == 0, "Height must be divisible by s!"
assert w%s == 0, "Width must be dibisible by s!"
output = np.zeros(shape=(h
for i in range(0, h, s):
for j in range(0, w, s):
for k in range(c):
a = input[i:i+s, j:j+s, k]
output[i
n_params = 0
n_multiplications = input.size
return output, n_params, n_multiplications, "max-pool"
def flatten_layer(in |
put):
output = input.flatten()
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "flatten"
def fully_connected_layer(input, weights, biases):
"""
Evaluate the output of a fully connected layer.
input.shape = (output_dim)
weights.shape = (output_dim, input_dim)
f.shape = (output_dim)
"""
assert input.ndim == 1, "Input must be a flattend array!"
assert weights.shape[1] == input.shape[0], "Input shapes must match!"
assert weights.shape[0] == biases.shape[0], "Output shapes must match!"
output = np.dot(weights, input) + biases
n_params = weights.size + biases.size
n_multiplications = weights.size
name = f"full {'x'.join([str(e) for e in weights.shape])}"
return output, n_params, n_multiplications, name
def normalize(input):
output = input / np.linalg.norm(input)
n_params = 0
n_multiplications = 1 + input.size
return output, n_params, n_multiplications, "normalize" |
class Layer(Enum):
Convolution = 'convolution',
MaxPool = 'max_pool',
Relu = 'relu',
Flatten = 'flatten',
FullyConnected = 'fully_connected',
Normalize = 'normalize',
np.random.seed(12345)
p = "{:>20} | {:>15} | {:>15} | {:>15} "
print(p.format("layer", "output shape", "
print(p.format("-"*20, "-"*15, "-"*15, "-"*15))
shape = (120,80,3)
x = np.random.randint(low=-5, high=5, size=shape)
initial = x.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": initial
}
shape = (32,5,5,3)
f = np.random.randint(low=-10, high=+10, size=shape)
conv1 = f.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": conv1
}
conv = {
"layer_type": Layer.Convolution,
"input_shape": x.shape,
"kernel": data,
}
model = [conv]
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
maxpool = {
"layer_type": Layer.MaxPool,
"input_shape": x.shape,
"window": 2,
}
model.append(maxpool)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
shape = (32,5,5,32)
f = np.random.randint(low=-10, high=+10, size=shape)
conv2 = f.flatten().astype(np.float32, copy=False)
data = {
"v": 1,
"dim": shape,
"data": conv2
}
conv = {
"layer_type": Layer.Convolution,
"input_shape": x.shape,
"kernel":data
}
model.append(conv)
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
maxpool = {
"layer_type": Layer.MaxPool,
"input_shape": x.shape,
"window": 2
}
model.append(maxpool)
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format( |
name, str(x.shape), n_params, n_multiplications))
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape,
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
flatten = {
"layer_type": Layer.Flatten,
"input_shape": x.shape,
}
model.append(flatten)
x, n_params, n_multiplications, name = flatten_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
shape = (1000, x.shape[0])
weights = np.random.randint(low=-10, high=+10, size=shape)
data = {
"v": 1,
"dim": shape,
"data": weights.flatten().astype(np.float32, copy=False)
}
shape = [1000]
biases = np.random.randint(low=-10, high=+10, size=(1000))
data2 = {
"v": 1,
"dim": shape,
"data": biases.flatten().astype(np.float32, copy=False)
}
fully_connected = {
"layer_type": Layer.FullyConnected,
"input_shape": x.shape,
"weights":data,
"biases": data2
}
model.append(fully_connected)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
relu = {
"layer_type": Layer.Relu,
"input_shape": x.shape,
}
model.append(relu)
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
shape = (5, x.shape[0])
weights = np.random.randint(low=-10, high=+10, size=shape)
data = {
"v": 1,
"dim": shape,
"data": weights.flatten().astype(np.float32, copy=False)
}
shape = [5]
biases = np.random.randint(low=-10, high=+10, size=shape)
data2 = {
"v": 1,
"dim": [5],
"data": biases.flatten().astype(np.float32, copy=False)
}
fully_connected = {
"layer_type": Layer.FullyConnected,
"input_shape": x.shape,
"weights":data,
"biases": data2
}
model.append(fully_connected)
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplication |
s))
assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())
norm = {
"layer_type": Layer.Normalize,
"input_shape": x.shape,
}
model.append(norm)
model = {
"layers": model
}
x, n_params, n_multiplications, name = normalize(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
print("\nfinal output:", x)
model_data = json.dumps(model, cls=Encoder)
with open('../src/json/model.json', "w") as f:
print('\ncreated model.json in the proto-neural-zkp/src/json folder')
f.write(model_data) |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "cf9cc6d3-87d9-4c07-8773-39cfcc94d0f5",
"metadata": {},
"outputs": [],
"source": [
"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cb8f77a5-dc82-4c34-8ee8-4d510afb175d",
"metadata": {},
"outputs": [],
"source": [
" |
import os"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dc648e92-7bfa-4991-829b-012822604d0f",
"metadata": {},
"outputs": [],
"source": [
" |
import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d306a253-dc0d-425f-b77a-44b0dfadcaa3",
"metadata": {},
"outputs": [],
"source": [
" |
import pandas as pd"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b73322c2-d188-4dae-8480-558a3317fd17",
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline\n",
"\n",
" |
import matplotlib.pyplot as plt\n",
" |
import matplotlib as mpl\n",
"\n",
"
"mpl.rcParams[\"figure.dpi\"] = 120\n",
"mpl.rcParams[\"figure.figsize\"] = [12.0, 8.0]\n",
"
"mpl.rcParams['mathtext.fontset'] = 'cm'\n",
"mpl.rcParams['font.family'] = 'CMU Serif'\n",
"
"mpl.rcParams[\"savefig.bbox\"] = 'tight'\n",
"mpl.rcParams[\"savefig.dpi\"] = 300\n",
"mpl.rcParams[\"savefig.facecolor\"] = 'white'\n",
"
"mpl.rcParams[\"animation.writer\"] = 'ffmpeg'\n",
"mpl.rcParams[\"animation.codec\"] = 'webp'\n",
"mpl.rcParams[\"animation.ffmpeg_args\"] = [\"-lossless\", \"1\", \"-qscale\", \"100\"]\n",
"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "64b79d42-6bcf-49d4-899f-113f193f6d11",
"metadata": {},
"outputs": [],
"source": [
" |
import watermark.watermark as watermark\n",
"print(watermark(machine=True, iso8601=True, python=True, iversions=True, globals_=globals()))\n",
"print(os.getcwd())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33eec25e-635a-4779-b34f-09f638a246d2",
"metadata": {},
"outputs": [],
"source": [
"df1 = pd.read_csv('../bench-1.csv')\n",
"df1['ops'] = df.input_size * df.output_size\n",
"df1"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e0a47916-268f-42fb-8880-7298aef847e6",
"metadata": {},
"outputs": [],
"source": [
"df2 = pd.read_csv('../bench-2.csv')\n",
"df2['ops'] = df.input_size * df.output_size\n",
"df2"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd37c694-d984-46af-af89-febee74eefcf",
"metadata": {},
"outputs": [],
"source": [
"df5 = pd.read_csv('../bench-5.csv')\n",
"df5['ops'] = df.input_size * df.output_size\n",
"df5"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1d9a5d1-a8fa-417d-9ea3-378018f9ff36",
"metadata": {},
"outputs": [],
"source": [
"df10 = pd.read_csv('../bench-10.csv')\n",
"df10['ops'] = df10.input_size * df10.output_size\n",
"df10"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "952a923e-8241-4b78-a156-961bf1b85570",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance')\n",
"plt.xlabel('
"plt.ylabel('proof time [s]')\n",
"plt.plot(df1.ops, df1.proof_time_s, label='1 thread')\n",
"plt.plot(df2.ops, df2.proof_time_s, label='2 thread')\n",
"plt.plot(df5.ops, df5.proof_time_s, label='5 thread')\n",
"plt.plot(df10.ops, df10.proof_time_s, label='10 threads')\n",
"plt.ylim([0, 120])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "149a5d0c-a272-4fc4-b8fe-18771b9d6d44",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code", |
"execution_count": null,
"id": "f758c3a7-4694-4673-971b-f5087b70c2ad",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance')\n",
"plt.xlabel('
"plt.ylabel('utilization')\n",
"plt.plot(df1.ops, df1.proof_time_s / (df2.proof_time_s * 2), label='2 thread utilization')\n",
"plt.plot(df1.ops, df1.proof_time_s / (df5.proof_time_s * 5), label='5 thread utilization')\n",
"plt.plot(df.ops, df1.proof_time_s / (df.proof_time_s * 10), label='10 thread utilization')\n",
"plt.ylim([0, 1])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "25aaf206-4a58-47f1-8272-3df48b5e6e54",
"metadata": {},
"outputs": [],
"source": [
"plt.title('Prover performance (1 threads)')\n",
"plt.xlabel('
"plt.ylabel('memory [b]')\n",
"plt.plot(df1.ops, df1.proof_mem_b, label='1 threads')\n",
"plt.plot(df5.ops, df5.proof_mem_b, label='5 threads')\n",
"plt.plot(df10.ops, df10.proof_mem_b, label='10 threads')\n",
"plt.ylim([0, 1.1*np.max(df.proof_mem_b)])\n",
"plt.legend()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3cef9590-7689-477f-bac3-21d68ff591c2",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "99d7647e-e9f2-48ba-8ea3-27ab41b06808",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
} |
{
"cells": [
{
"cell_type": "markdown",
"id": "d5c490d4",
"metadata": {},
"source": [
"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "794b9914-cce9-4d71-b20d-96df96f6403b",
"metadata": {},
"outputs": [],
"source": [
" |
import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4c3349aa-434a-4c9b-a451-feeb341aaa72",
"metadata": {},
"outputs": [],
"source": [
"def conv_layer(input, f):\n",
" \"\"\"\n",
" Evaluate the output of a convolutional layer using the filter f.\n",
" input.shape = (h, w, c)\n",
" f.shape = (c_out, hf, wf, c_in)\n",
" \"\"\"\n",
" h, w, c = input.shape\n",
" c_out, hf, wf, c_in = f.shape\n",
"\n",
" assert c == c_in, \"Input channels must match!\"\n",
" assert hf%2 == 1, \"Height of the filter (f.shape[1]) must be an uneven number!\"\n",
" assert wf%2 == 1, \"Width of the filter (f.shape[2]) must be an uneven number!\"\n",
"\n",
"
" dh = hf
" dw = wf
"\n",
"
"
" output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))\n",
"\n",
"
"
" for i in range(dh, h-dh):\n",
"
" for j in range(dw, w-dw):\n",
"
" a = input[i-dh:i+dh+1, j-dw:j+dw+1]\n",
" for k in range(c_out):\n",
"
" b = f[k,:,:,:]\n",
"
" output[i-dh, j-dw, k] = (a*b).sum()
" \n",
" n_params = f.size\n",
" n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)\n",
" name = f\"conv {'x'.join([str(e) for e in f.shape])}\"\n",
" \n",
" return output, n_params, n_multiplications, name\n",
"\n",
"\n",
"def relu_layer(input): \n",
" output = input.copy()\n",
" output[output<0] = 0\n",
" \n",
" n_params = 0\n",
" n_multiplications = input.size\n",
" \n",
" return output, n_params, n_multiplications, \"relu\"\n",
"\n",
"\n",
"def max_pooling_layer(input, s):\n",
" \"\"\"\n",
" Apply max pooling layer using a sxs patch.\n",
" \"\"\"\n",
" h, w, c = input.shape\n",
"\n",
" as |
sert h%s == 0, \"Height must be divisible by s!\"\n",
" assert w%s == 0, \"Width must be dibisible by s!\"\n",
"\n",
" output = np.zeros(shape=(h
"\n",
" for i in range(0, h, s):\n",
" for j in range(0, w, s):\n",
" for k in range(c):\n",
" a = input[i:i+s, j:j+s, k]\n",
" output[i
" \n",
" n_params = 0\n",
" n_multiplications = input.size\n",
" return output, n_params, n_multiplications, \"max-pool\"\n",
"\n",
"\n",
"def flatten_layer(input):\n",
" output = input.flatten()\n",
" n_params = 0\n",
" n_multiplications = 0\n",
" return output, n_params, n_multiplications, \"flatten\"\n",
"\n",
"\n",
"def fully_connected_layer(input, weights, biases):\n",
" \"\"\"\n",
" Evaluate the output of a fully connected layer.\n",
" input.shape = (output_dim)\n",
" weights.shape = (output_dim, input_dim)\n",
" f.shape = (output_dim)\n",
" \"\"\"\n",
" assert input.ndim == 1, \"Input must be a flattend array!\"\n",
" assert weights.shape[1] == input.shape[0], \"Input shapes must match!\"\n",
" assert weights.shape[0] == biases.shape[0], \"Output shapes must match!\"\n",
"\n",
" output = np.dot(weights, input) + biases\n",
" \n",
" n_params = weights.size + biases.size\n",
" n_multiplications = weights.size\n",
" name = f\"full {'x'.join([str(e) for e in weights.shape])}\"\n",
" \n",
" return output, n_params, n_multiplications, name\n",
"\n",
"\n",
"def normalize(input):\n",
" output = input / np.linalg.norm(input)\n",
" n_params = 0\n",
" n_multiplications = 1 + input.size\n",
" return output, n_params, n_multiplications, \"normalize\"\n"
]
},
{
"cell_type": "markdown",
"id": "2896d5ed",
"metadata": {},
"source": [
"
]
},
{
"cell_type": "code",
"execution_count": null, |
"id": "00fd6550",
"metadata": {},
"outputs": [],
"source": [
"np.random.seed(12345)\n",
"\n",
"p = \"{:>20} | {:>15} | {:>15} | {:>15} \"\n",
"print(p.format(\"layer\", \"output shape\", \"
"print(p.format(\"-\"*20, \"-\"*15, \"-\"*15, \"-\"*15))\n",
"\n",
"
"x = np.random.randint(low=-5, high=5, size=(120,80,3))\n",
"\n",
"
"f = np.random.randint(low=-10, high=+10, size=(32,5,5,3)) \n",
"x, n_params, n_multiplications, name = conv_layer(x, f)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = max_pooling_layer(x, 2)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"f = np.random.randint(low=-10, high=+10, size=(32,5,5,32)) \n",
"x, n_params, n_multiplications, name = conv_layer(x, f)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = max_pooling_layer(x, 2)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = flatten_layer(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"weights = np.random.randint(low=-10, high=+10, size=(1000, x.shape[0])) \n",
"biases = np.random.randint(low=-10, high=+10, size=(1000)) \n",
"x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"
"x, n_params, n_multiplications, name = relu_layer(x)\n",
"print(p.format(name, str(x.shape), n_ |
params, n_multiplications))\n",
"\n",
"
"weights = np.random.randint(low=-10, high=+10, size=(5, x.shape[0])) \n",
"biases = np.random.randint(low=-10, high=+10, size=(5)) \n",
"x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())\n",
"\n",
"
"x, n_params, n_multiplications, name = normalize(x)\n",
"print(p.format(name, str(x.shape), n_params, n_multiplications))\n",
"\n",
"print(\"\\nfinal output:\", x)"
]
},
{
"cell_type": "markdown",
"id": "43c065d3-e68b-4a63-9e7e-237dbf704819",
"metadata": {},
"source": [
"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d97c695a-f706-401f-b85c-b46396a8c42f",
"metadata": {},
"outputs": [],
"source": [
" |
import json\n",
"\n",
"
"from json |
import JSONEncoder\n",
"\n",
" |
class Encoder(JSONEncoder):\n",
" def default(self, obj):\n",
" if isinstance(obj, np.ndarray):\n",
" return obj.tolist()\n",
" return JSONEncoder.default(self, obj)\n",
"\n",
"np.random.seed(12345)\n",
"\n",
"shape = (3,3,3)\n",
"x = np.random.randint(low=-5, high=5, size=shape)\n",
"\n",
"x = x.flatten()\n",
"\n",
"
"data = {\n",
" \"v\": 1,\n",
" \"dim\": shape,\n",
" \"data\": x\n",
" }\n",
"\n",
"json_data = json.dumps(data, cls=Encoder)\n",
"\n",
"with open(\"../src/json/test.json\", \"w\") as f:\n",
" f.write(json_data)"
]
},
{
"cell_type": "markdown",
"id": "e9f4e31a",
"metadata": {},
"source": [
"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "a619bbbb-fad3-46ae-83b4-b74398cb953b",
"metadata": {},
"outputs": [],
"source": [
"8388608"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "5a1787ed-b448-40e2-96e7-7d597a4660e2",
"metadata": {},
"outputs": [],
"source": [
"4897963 / 8388608"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "33087999-ac7f-4ebd-b290-8dbbe3d0c51b",
"metadata": {},
"outputs": [],
"source": [
"np.log(262144)/np.log(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ae322e92-7fcb-4270-9025-392773fc6da4",
"metadata": {},
"outputs": [],
"source": [
"4896000 * 3 / (14688 * 1000)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3078557e-adc4-4385-be93-567aa7d93e2d",
"metadata": {},
"outputs": [],
"source": [
"np.log(14688 * 800 / 3) / np.log(2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8316d646-b622-47b1-a58c-2ceb5de41c74",
"metadata": {},
"outputs": [],
"source": [
"14688 * 400"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "8abe8302-5958-427f-9453-7e2c4ff2cd |
c9",
"metadata": {},
"outputs": [],
"source": [
"14688 * 1600"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "70387136-104a-4a18-9f20-72847ecee8a0",
"metadata": {},
"outputs": [],
"source": [
"234076 * 100"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "93298188-6ed4-4e9e-b3cc-20f9778e38bc",
"metadata": {},
"outputs": [],
"source": [
" |
import numpy as np\n",
"\n",
"output = [-6276474000, 8343393300, 8266027500, -7525360600, 7814137000]\n",
"norm = np.linalg.norm(output)\n",
"norm"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d1b09558",
"metadata": {},
"outputs": [],
"source": [
" |
import math\n",
"math.sqrt(6276474000**2 + 8343393300**2 + 8266027500**2 + 7525360600**2 + 7814137000**2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c9c85ea1",
"metadata": {},
"outputs": [],
"source": [
"output/norm"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9.13 64-bit",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.6"
},
"vscode": {
"interpreter": {
"hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
}
}
},
"nbformat": 4,
"nbformat_minor": 5
} |
import numpy as np
def conv_layer(input, f):
"""
Evaluate the output of a convolutional layer using the filter f.
input.shape = (h, w, c)
f.shape = (c_out, hf, wf, c_in)
"""
h, w, c = input.shape
c_out, hf, wf, c_in = f.shape
assert c == c_in, "Input channels must match!"
assert hf%2 == 1, "Height of the filter (f.shape[1]) must be an uneven number!"
assert wf%2 == 1, "Width of the filter (f.shape[2]) must be an uneven number!"
dh = hf
dw = wf
output = np.zeros(shape=(h-2*dh, w-2*dw, c_out))
for i in range(dh, h-dh):
for j in range(dw, w-dw):
a = input[i-dh:i+dh+1, j-dw:j+dw+1]
for k in range(c_out):
b = f[k,:,:,:]
output[i-dh, j-dw, k] = (a*b).sum()
n_params = f.size
n_multiplications = a.size * c_out * (w-2*dw) * (h-2*dh)
name = f"conv {'x'.join([str(e) for e in f.shape])}"
return output, n_params, n_multiplications, name
def relu_layer(input):
output = input.copy()
output[output<0] = 0
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "relu"
def max_pooling_layer(input, s):
"""
Apply max pooling layer using a sxs patch.
"""
h, w, c = input.shape
assert h%s == 0, "Height must be devisable by s!"
assert w%s == 0, "Width must be devisable by s!"
output = np.zeros(shape=(h
for i in range(0, h, s):
for j in range(0, w, s):
for k in range(c):
a = input[i:i+s, j:j+s, k]
output[i
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "max-pool"
def flatten_layer(input):
output = input.flatten()
n_params = 0
n_multiplications = 0
return output, n_params, n_multiplications, "flatten"
def fully_connected_layer(input, weights, biases):
"""
Evaluate the output of a fully connected layer.
input.shape = (output_dim)
weights.shape = (output_dim, inpu |
t_dim)
f.shape = (output_dim)
"""
assert input.ndim == 1, "Input must be a flattend array!"
assert weights.shape[1] == input.shape[0], "Input shapes must match!"
assert weights.shape[0] == biases.shape[0], "Output shapes must match!"
output = np.dot(weights, input) + biases
n_params = weights.size + biases.size
n_multiplications = weights.size
name = f"conv {'x'.join([str(e) for e in weights.shape])}"
return output, n_params, n_multiplications, name
def normalize(input):
output = input / np.linalg.norm(input)
n_params = 0
n_multiplications = 1 + input.size
return output, n_params, n_multiplications, "normalize"
if __name__ == "__main__":
np.random.seed(12345)
p = "{:>20} | {:>15} | {:>15} | {:>15} "
print(p.format("layer", "output shape", "
print(p.format("-"*20, "-"*15, "-"*15, "-"*15))
x = np.random.randint(low=-5, high=5, size=(120,80,3))
f = np.random.randint(low=-10, high=+10, size=(32,5,5,3))
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
f = np.random.randint(low=-10, high=+10, size=(32,5,5,32))
x, n_params, n_multiplications, name = conv_layer(x, f)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = max_pooling_layer(x, 2)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = flatten_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications)) |
weights = np.random.randint(low=-10, high=+10, size=(1000, x.shape[0]))
biases = np.random.randint(low=-10, high=+10, size=(1000))
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
x, n_params, n_multiplications, name = relu_layer(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
weights = np.random.randint(low=-10, high=+10, size=(5, x.shape[0]))
biases = np.random.randint(low=-10, high=+10, size=(5))
x, n_params, n_multiplications, name = fully_connected_layer(x, weights, biases)
print(p.format(name, str(x.shape), n_params, n_multiplications))
assert(np.isclose(x, [ -9404869, -11033050, -34374361, -20396580, 70483360.]).all())
x, n_params, n_multiplications, name = normalize(x)
print(p.format(name, str(x.shape), n_params, n_multiplications))
print("\nfinal output:", x) |
use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
pub use std::alloc::System as StdAlloc;
use std::alloc::{GlobalAlloc, Layout};
pub use mimalloc::MiMalloc;
pub struct Allocator<T: GlobalAlloc> {
inner: T,
pub allocated: AtomicUsize,
pub peak_allocated: AtomicUsize,
pub total_allocated: AtomicUsize,
pub largest_allocated: AtomicUsize,
pub num_allocations: AtomicUsize,
}
pub const fn new_std() -> Allocator<StdAlloc> {
Allocator::new(StdAlloc)
}
pub const fn new_mimalloc() -> Allocator<MiMalloc> {
Allocator::new(MiMalloc)
}
impl<T: GlobalAlloc> Allocator<T> {
pub const fn new(alloc: T) -> Self {
Self {
inner: alloc,
allocated: AtomicUsize::new(0),
peak_allocated: AtomicUsize::new(0),
total_allocated: AtomicUsize::new(0),
largest_allocated: AtomicUsize::new(0),
num_allocations: AtomicUsize::new(0),
}
} |
fn count_alloc(&self, size: usize) {
let allocated = self.allocated.fetch_add(size, Relaxed);
self.total_allocated.fetch_add(size, Relaxed);
self.num_allocations.fetch_add(1, Relaxed);
self.peak_allocated.fetch_max(allocated, Relaxed);
self.largest_allocated.fetch_max(size, Relaxed);
} |
fn count_dealloc(&self, size: usize) {
self.allocated.fetch_sub(size, Relaxed);
}
}
unsafe impl<T: GlobalAlloc> GlobalAlloc for Allocator<T> {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
self.count_alloc(layout.size());
self.inner.alloc(layout)
}
unsafe |
fn dealloc(&self, ptr: *mut u8, layout: Layout) {
self.count_dealloc(layout.size());
self.inner.dealloc(ptr, layout);
}
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
self.count_alloc(layout.size());
self.inner.alloc_zeroed(layout)
}
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
let old_size = layout.size();
if new_size >= old_size {
self.count_alloc(new_size - old_size);
} else {
self.count_dealloc(old_size - new_size);
}
self.inner.realloc(ptr, layout, new_size)
}
} |
use eyre::eyre;
pub trait MapAny<T> {
fn map_any(self) -> eyre::Result<T>;
}
impl<T> MapAny<T> for anyhow::Result<T> {
fn map_any(self) -> eyre::Result<T> {
self.map_err(|e| eyre!(e.to_string()))
}
}
|
use core::str::FromStr;
use eyre::{bail, Error as EyreError, Result as EyreResult, WrapErr as _};
use std::process::id as pid;
use structopt::StructOpt;
use tracing::{info, Level, Subscriber};
use tracing_log::{AsLog as _, LogTracer};
use tracing_subscriber::{
filter::{LevelFilter, Targets},
fmt::{self, time::Uptime},
layer::SubscriberExt,
Layer, Registry,
};
enum LogFormat {
Compact,
Pretty,
Json,
}
impl LogFormat {
fn to_layer<S>(&self) -> impl Layer<S>
where
S: Subscriber + for<'a> tracing_subscriber::registry::LookupSpan<'a> + Send + Sync,
{
let layer = fmt::Layer::new().with_writer(std::io::stderr);
match self {
LogFormat::Compact => {
Box::new(layer.event_format(fmt::format().with_timer(Uptime::default()).compact()))
as Box<dyn Layer<S> + Send + Sync>
}
LogFormat::Pretty => Box::new(layer.event_format(fmt::format().pretty())),
LogFormat::Json => Box::new(layer.event_format(fmt::format().json())),
}
}
}
impl FromStr for LogFormat {
type Err = EyreError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(match s {
"compact" => Self::Compact,
"pretty" => Self::Pretty,
"json" => Self::Json,
_ => bail!("Invalid log format: {}", s),
})
}
}
pub |
struct Options {
verbose: usize,
log_filter: String,
log_format: LogFormat,
}
impl Options {
pub fn init(&self) -> EyreResult<()> {
let verbosity = {
let (all, app) = match self.verbose {
0 => (Level::INFO, Level::INFO),
1 => (Level::INFO, Level::DEBUG),
2 => (Level::INFO, Level::TRACE),
3 => (Level::DEBUG, Level::TRACE),
_ => (Level::TRACE, Level::TRACE),
};
Targets::new()
.with_default(all)
.with_target("lib", app)
.with_target(env!("CARGO_CRATE_NAME"), app)
};
let log_filter = if self.log_filter.is_empty() {
Targets::new()
} else {
self.log_filter
.parse()
.wrap_err("Error parsing log-filter")?
};
let targets = verbosity.with_targets(log_filter);
let subscriber = Registry::default().with(self.log_format.to_layer().with_filter(targets));
tracing::subscriber::set_global_default(subscriber)?;
LogTracer::builder()
.with_max_level(LevelFilter::current().as_log())
.init()?;
info!(
host = env!("TARGET"),
pid = pid(),
main = &crate::main as *const _ as usize,
commit = &env!("COMMIT_SHA")[..8],
"{name} {version}",
name = env!("CARGO_CRATE_NAME"),
version = env!("CARGO_PKG_VERSION"),
);
Ok(())
}
}
pub mod test {
use super::*; |
fn test_parse_args() {
let cmd = "arg0 -v --log-filter foo -vvv";
let options = Options::from_iter_safe(cmd.split(' ')).unwrap();
assert_eq!(options, Options {
verbose: 4,
log_filter: "foo".to_owned(),
log_format: LogFormat::Compact,
});
}
} |
use neural_zkp as lib;
mod logging;
mod random;
use eyre::{Result as EyreResult, WrapErr as _};
use structopt::StructOpt;
use tokio::runtime::{self, Runtime};
use tracing::info;
const VERSION: &str = concat!(
env!("CARGO_PKG_VERSION"),
"\n",
env!("COMMIT_SHA"),
" ",
env!("COMMIT_DATE"),
"\n",
env!("TARGET"),
" ",
env!("BUILD_DATE"),
"\n",
env!("CARGO_PKG_AUTHORS"),
"\n",
env!("CARGO_PKG_HOMEPAGE"),
"\n",
env!("CARGO_PKG_DESCRIPTION"),
); |
struct Options {
log: logging::Options,
app: lib::Options,
random: random::Options,
threads: Option<usize>,
}
fn main() -> EyreResult<()> {
color_eyre::install()?;
let matches = Options::clap().long_version(VERSION).get_matches();
let options = Options::from_clap(&matches);
options.log.init()?;
let rng = options.random.init();
init_rayon(options.threads)?;
let runtime = init_tokio()?;
let main_future = lib::main(rng, options.app);
runtime.block_on(main_future)?;
info!("Program terminating normally");
Ok(())
}
fn init_rayon(threads: Option<usize>) -> EyreResult<()> {
if let Some(threads) = threads {
rayon::ThreadPoolBuilder::new()
.num_threads(threads)
.build_global()
.context("Failed to build thread pool.")?;
}
info!(
"Using {} compute threads on {} cores",
rayon::current_num_threads(),
num_cpus::get()
);
Ok(())
}
fn init_tokio() -> EyreResult<Runtime> {
runtime::Builder::new_multi_thread()
.enable_all()
.build()
.wrap_err("Error creating Tokio runtime")
}
pub mod test {
use super::*;
use tracing::{error, warn};
use tracing_test::traced_test; |
fn test_with_log_output() {
error!("logged on the error level");
assert!(logs_contain("logged on the error level"));
}
async |
fn async_test_with_log() {
info!("This is being logged on the info level");
tokio::spawn(async {
warn!("This is being logged on the warn level from a spawned task");
})
.await
.unwrap();
assert!(logs_contain("logged on the info level"));
assert!(logs_contain("logged on the warn level"));
assert!(!logs_contain("logged on the error level"));
}
} |
use rand::{rngs::OsRng, RngCore, SeedableRng};
use rand_pcg::Mcg128Xsl64;
use structopt::StructOpt;
use tracing::info;
pub type Generator = Mcg128Xsl64;
#[derive(StructOpt)]
pub struct Options {
/// Random seed for deterministic random number generation.
/// If not specified a seed is periodically generated from OS entropy.
#[structopt(long, parse(try_from_str = parse_hex_u64))]
seed: Option<u64>,
}
impl Options {
#[must_use]
pub fn init(self) -> Generator {
let rng_seed = self.seed.unwrap_or_else(random_seed);
info!("Using random seed {rng_seed:16x}");
Mcg128Xsl64::seed_from_u64(rng_seed)
}
}
#[must_use]
fn random_seed() -> u64 {
OsRng::default().next_u64()
}
#[must_use]
fn parse_hex_u64(src: &str) -> Result<u64, std::num::ParseIntError> {
u64::from_str_radix(src, 16)
}
|
use ndarray::{s, Array, Array3, Array4, ArrayD, ArrayViewD, Ix3};
use serde::Serialize;
use super::{Layer, LayerJson};
pub |
struct Convolution {
kernel: Array4<f32>,
name: String,
input_shape: Vec<usize>,
}
impl Convolution {
pub fn new(kernel: Array4<f32>, input_shape: Vec<usize>) -> Convolution {
let (c_out, hf, wf, c_in) = kernel.dim();
let name = format!("conv {}x{}x{}x{}", c_out, hf, wf, c_in);
Convolution {
kernel,
name,
input_shape,
}
}
}
impl Layer for Convolution {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
let input = input.clone().into_dimensionality::<Ix3>().unwrap();
let (h, w, c) = input.dim();
let (c_out, hf, wf, c_in) = self.kernel.dim();
assert_eq!(c, c_in, "input channels must match");
assert!(hf % 2 == 1, "height of the kernel must be an odd number");
assert!(wf % 2 == 1, "width of the kernel must be an odd number");
let window_dim = (hf, wf, c_in);
let output_shape = (h - hf + 1, w - wf + 1);
let mut output = Array3::zeros((output_shape.0, output_shape.1, c_out));
for i in 0..c_out {
let mut output_mut = output.slice_mut(s![.., .., i]);
let kernel = self.kernel.slice(s![i, .., .., ..]);
let values = input
.windows(window_dim)
.into_iter()
.map(|w| (&w * &kernel).sum());
let values = Array::from_iter(values)
.into_shape(output_shape)
.expect("Kernel result dimensions mismatch");
output_mut.assign(&values);
}
output.into_dyn()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
self.kernel.len()
}
fn num_muls(&self) -> usize {
let (c_out, hf, wf, _) = self.kernel.dim();
let output_shape |
= self.output_shape();
output_shape[0] * output_shape[1] * c_out * hf * wf
}
fn output_shape(&self) -> Vec<usize> {
let input_shape = self.input_shape();
let h = input_shape[0];
let w = input_shape[1];
let c = input_shape[2];
let (c_out, hf, wf, c_in) = self.kernel.dim();
assert_eq!(c, c_in, "input channels must match");
assert!(hf % 2 == 1, "height of the kernel must be an odd number");
assert!(wf % 2 == 1, "width of the kernel must be an odd number");
vec![h - hf + 1, w - wf + 1, c_out]
}
fn to_json(&self) -> LayerJson {
LayerJson::Convolution {
kernel: self.kernel.clone().into(),
input_shape: self.input_shape(),
}
}
}
pub mod test {
use super::*; |
fn test_small() {
use ndarray::array;
let input = array![
[
[0.51682377_f32],
[-2.3552072],
[-0.120499134],
[2.3132505],
[-3.470844]
],
[[-1.1741579], [3.4295654], [-1.2318683], [-1.9749749], [
-0.8161392
]],
[[4.7562046], [-2.8918338], [2.308525], [2.6111293], [
-1.0765815
]],
[[-4.1224194], [3.022316], [-4.5339823], [4.2970715], [
2.6773367
]],
[[-4.289216], [-3.3795083], [-2.651745], [-1.1392272], [
3.9378529
]]
];
let kernel: Array4<f32> = array![
[[1.0336475_f32], [-4.7104144], [-0.24099827]],
[[4.626501], [-6.941688], [-2.3483157]],
[[6.859131], [-2.4637365], [-3.9499497]]
]
.into_shape((1, 3, 3, 1))
.unwrap();
let expected = array![
[[15.940444], [-9.205237], [13.396301]],
[[1.7727833], [-10.784569], [-48.952152]],
[[-22.043327], [8.725433], [-97.68271]]
];
let conv = Convolution::new(kernel, vec![1, 5, 5, 1]);
let result = conv.apply(&input.into_dyn().view());
let delta = result - &expected;
let max_error = delta.into_iter().map(f32::abs).fold(0.0, f32::max);
dbg!(max_error);
assert!(max_error < 10.0 * f32::EPSILON);
} |
fn conv_test() {
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((120, 80, 3), Uniform::<f32>::new(-5., 5.), &mut rng);
let kernel = Array4::random_using((32, 5, 5, 3), Uniform::<f32>::new(-10., 10.), &mut rng);
let conv = Convolution::new(kernel, vec![120, 80, 3]);
let result = conv
.apply(&input.into_dyn().view())
.into_dimensionality::<Ix3>()
.unwrap();
assert_eq!(conv.output_shape(), vec![116, 76, 32]);
let (dim_x, dim_y, dim_z) = result.dim();
println!(
"
output dim: {}x{}x{}\n
{} output:\n
{}",
conv.num_params(),
dim_x,
dim_y,
dim_z,
conv.num_muls(),
conv.name(),
result
);
}
} |
use ndarray::{Array1, ArrayD, ArrayViewD};
use serde::Serialize;
use super::{Layer, LayerJson};
pub |
struct Flatten {
name: String,
input_shape: Vec<usize>,
}
impl Flatten {
pub fn new(input_shape: Vec<usize>) -> Flatten {
Flatten {
name: "flatten".into(),
input_shape,
}
}
}
impl Layer for Flatten {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
Array1::from_iter(input.iter().copied()).into_dyn()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
0
}
fn num_muls(&self) -> usize {
0
}
fn output_shape(&self) -> Vec<usize> {
let mut output_shape = 1;
for i in self.input_shape() {
output_shape *= i;
}
vec![output_shape]
}
fn to_json(&self) -> LayerJson {
LayerJson::Flatten {
input_shape: self.input_shape(),
}
}
}
mod test {
use super::*;
use ndarray::Array3;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng; |
fn flatten_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((27, 17, 32), Uniform::<f32>::new(-5.0, 5.0), &mut rng);
let flat = Flatten::new(vec![27, 17, 32]);
let output = flat.apply(&input.into_dyn().view());
let n_multiplications = flat.num_muls();
let n_params = flat.num_params();
assert_eq!(output.len(), 14688);
println!(
"
{} \n
output dim: {} \n
output:\n
{}",
flat.name,
n_params,
output.len(),
n_multiplications,
output
);
}
} |
use ndarray::{Array1, Array2, ArrayD, ArrayViewD, Ix1};
use serde::Serialize;
use super::{Layer, LayerJson};
pub |
struct FullyConnected {
weights: Array2<f32>,
biases: Array1<f32>,
name: String,
}
impl FullyConnected {
pub fn new(weights: Array2<f32>, biases: Array1<f32>) -> FullyConnected {
FullyConnected {
weights,
biases,
name: "full".into(),
}
}
}
impl Layer for FullyConnected {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
assert!(input.ndim() == 1, "Input must be a flattenened array!");
assert!(
self.weights.shape()[1] == input.shape()[0],
"Input shapes must match (for the dot product to work)!"
);
assert!(
self.weights.shape()[0] == self.biases.shape()[0],
"Output shapes must match!"
);
let output = self
.weights
.dot(&input.clone().into_dimensionality::<Ix1>().unwrap())
+ self.biases.clone();
output.into_dyn()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
self.weights.len() + self.biases.len()
}
fn num_muls(&self) -> usize {
self.weights.len()
}
fn output_shape(&self) -> Vec<usize> {
assert!(
self.weights.shape()[0] == self.biases.shape()[0],
"Output shapes must match!"
);
vec![self.weights.shape()[0]]
}
fn input_shape(&self) -> Vec<usize> {
vec![self.weights.shape()[1]]
}
fn to_json(&self) -> LayerJson {
LayerJson::FullyConnected {
weights: self.weights.clone().into(),
biases: self.biases.clone().into(),
}
}
}
pub mod test {
use super::*;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng; |
fn fully_connected_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array1::random_using(14688, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let weights =
Array2::random_using((1000, 14688), Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let biases = Array1::random_using(1000, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let fully_connected = FullyConnected::new(weights, biases);
let output = fully_connected.apply(&input.into_dyn().view());
let n_params = fully_connected.num_params();
let n_multiplications = fully_connected.num_muls();
println!(
"
{}
output dim: {}x1
output:\n{}",
fully_connected.name,
n_params,
output.len(),
n_multiplications,
output
);
}
} |
use ndarray::{s, Array3, ArrayD, ArrayViewD, Ix3};
use ndarray_stats::QuantileExt;
use serde::Serialize;
use super::{Layer, LayerJson};
pub |
struct MaxPool {
kernel_side: usize,
name: String,
input_shape: Vec<usize>,
}
impl MaxPool {
pub fn new(kernel_side: usize, input_shape: Vec<usize>) -> MaxPool {
MaxPool {
name: "max-pool".into(),
kernel_side,
input_shape,
}
}
}
impl Layer for MaxPool {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
let input = input.clone().into_dimensionality::<Ix3>().unwrap();
let (h, w, c) = input.dim();
assert!(h % self.kernel_side == 0, "Height must be divisible by s!");
assert!(w % self.kernel_side == 0, "Width must be divisible by s!");
let mut output = Array3::<f32>::zeros((h / self.kernel_side, w / self.kernel_side, c));
for i in (0..h).step_by(self.kernel_side) {
for j in (0..w).step_by(self.kernel_side) {
for k in 0..c {
let a = input.slice(s![i..i + self.kernel_side, j..j + self.kernel_side, k]);
output[[i / self.kernel_side, j / self.kernel_side, k]] = *a.max().unwrap();
}
}
}
output.into_dyn()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
0
}
fn num_muls(&self) -> usize {
let mut muls = 1;
for i in self.input_shape() {
muls *= i;
}
muls
}
fn output_shape(&self) -> Vec<usize> {
let input_shape = self.input_shape();
let h = input_shape[0];
let w = input_shape[1];
let c = input_shape[2];
assert!(h % self.kernel_side == 0, "Height must be divisible by s!");
assert!(w % self.kernel_side == 0, "Width must be divisible by s!");
vec![w / self.kernel_side, h / self.kernel_side, c]
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clo |
ne()
}
fn to_json(&self) -> LayerJson {
LayerJson::MaxPool {
window: self.kernel_side,
input_shape: self.input_shape(),
}
}
}
pub mod test {
use super::*;
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng; |
fn maxpool_test() {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let input = Array3::random_using((116, 76, 32), Uniform::<f32>::new(-5.0, 5.0), &mut rng);
let maxpool = MaxPool::new(2, vec![126, 76, 32]);
let output = maxpool
.apply(&input.into_dyn().view())
.into_dimensionality::<Ix3>()
.unwrap();
let n_params = maxpool.num_params();
let n_multiplications = maxpool.num_muls();
assert_eq!(output.dim(), (58, 38, 32));
let (dim_x, dim_y, dim_z) = output.dim();
println!(
"
{} \n
output dim: {}x{}x{}\n
output:\n
{}",
maxpool.name, n_params, dim_x, dim_y, dim_z, n_multiplications, output
);
}
} |
use std::fmt::{Display, Formatter, Result};
use erased_serde::serialize_trait_object;
use ndarray::{ArcArray, ArrayD, ArrayViewD, Ix1, Ix2, Ix4};
use serde::{Deserialize, Serialize};
pub mod conv;
pub mod flatten;
pub mod fully_connected;
pub mod maxpool;
pub mod normalize;
pub mod relu;
pub trait Layer: erased_serde::Serialize {
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32>;
fn input_shape(&self) -> Vec<usize>;
fn name(&self) -> &str;
fn num_params(&self) -> usize;
fn num_muls(&self) -> usize;
fn output_shape(&self) -> Vec<usize>;
fn to_json(&self) -> LayerJson;
fn box_clone(&self) -> Box<dyn Layer>;
}
serialize_trait_object!(Layer);
impl Display for Box<dyn Layer> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(
f,
"{:<20} | {:?}{:<5} | {:<5} | {:<5}",
self.name(),
self.output_shape(),
"",
self.num_params(),
self.num_muls(),
)
}
}
impl Clone for Box<dyn Layer> {
fn clone(&self) -> Self {
Layer::box_clone(&**self)
}
}
pub enum LayerJson {
Convolution {
kernel: ArcArray<f32, Ix4>,
input_shape: Vec<usize>,
},
MaxPool {
window: usize,
input_shape: Vec<usize>,
},
FullyConnected {
weights: ArcArray<f32, Ix2>,
biases: ArcArray<f32, Ix1>,
},
Relu {
input_shape: Vec<usize>,
},
Flatten {
input_shape: Vec<usize>,
},
Normalize {
input_shape: Vec<usize>,
},
}
pub |
struct NNJson {
pub layers: Vec<LayerJson>,
}
impl TryFrom<LayerJson> for Box<dyn Layer> {
type Error = ();
fn try_from(value: LayerJson) -> std::result::Result<Self, ()> {
Ok(match value {
LayerJson::Convolution {
kernel,
input_shape,
} => Box::new(conv::Convolution::new(kernel.to_owned(), input_shape)),
LayerJson::MaxPool {
window,
input_shape,
} => Box::new(maxpool::MaxPool::new(window.to_owned(), input_shape)),
LayerJson::FullyConnected { weights, biases } => Box::new(
fully_connected::FullyConnected::new(weights.to_owned(), biases.to_owned()),
),
LayerJson::Flatten { input_shape } => Box::new(flatten::Flatten::new(input_shape)),
LayerJson::Relu { input_shape } => Box::new(relu::Relu::new(input_shape)),
LayerJson::Normalize { input_shape } => {
Box::new(normalize::Normalize::new(input_shape))
}
})
}
}
impl FromIterator<LayerJson> for NNJson {
fn from_iter<T: IntoIterator<Item = LayerJson>>(iter: T) -> Self {
let mut nnvec = vec![];
for i in iter {
nnvec.push(i);
}
Self { layers: nnvec }
}
}
impl From<NeuralNetwork> for NNJson {
fn from(nn: NeuralNetwork) -> Self {
nn.layers.into_iter().map(|l| l.to_json()).collect()
}
}
impl TryFrom<NNJson> for NeuralNetwork {
type Error = ();
fn try_from(value: NNJson) -> std::result::Result<Self, ()> {
Ok(Self {
layers: value
.layers
.into_iter()
.map(|i| i.try_into().unwrap())
.collect(),
})
}
}
pub |
struct NeuralNetwork {
layers: Vec<Box<dyn Layer>>,
}
impl NeuralNetwork {
pub fn new() -> Self {
Self { layers: vec![] }
}
pub |
fn add_layer(&mut self, layer: Box<dyn Layer>) {
self.layers.push(layer);
}
pub fn apply(&self, input: &ArrayViewD<f32>, dim: usize) -> Option<ArrayD<f32>> {
if dim == 3 {
let mut output = input.view().into_owned();
for layer in &self.layers {
output = layer.apply(&output.view());
println!("{}", layer);
}
Some(output)
} else {
None
}
}
}
impl Default for NeuralNetwork {
fn default() -> Self {
Self::new()
}
} |
use super::LayerJson;
use ndarray::{ArrayD, ArrayViewD};
use serde::Serialize;
use super::Layer;
#[derive(Clone, Serialize)]
pub struct Normalize {
name: String,
input_shape: Vec<usize>,
}
impl Normalize {
#[must_use]
pub fn new(input_shape: Vec<usize>) -> Normalize {
Normalize {
name: "normalize".into(),
input_shape,
}
}
}
impl Layer for Normalize {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
let input = input.clone().mapv(|x| x as i128);
let norm = f32::sqrt(input.mapv(|x| x.pow(2)).sum() as f32);
input.mapv(|x| x as f32 / norm).into_dyn()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
0
}
fn num_muls(&self) -> usize {
let mut muls = 1;
for i in self.input_shape() {
muls *= i;
}
1 + muls
}
fn output_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn to_json(&self) -> LayerJson {
LayerJson::Normalize {
input_shape: self.input_shape(),
}
}
}
#[cfg(test)]
mod test {
use super::*;
use ndarray::{arr1, array};
#[test]
fn normalize_test() {
let input = arr1(&[
-6276474000.,
8343393300.,
8266027500.,
-7525360600.,
7814137000.,
]);
let normalize = Normalize::new(vec![5]);
let output = normalize.apply(&input.into_dyn().view());
let expected = array![-0.36541474, 0.4857503, 0.48124605, -0.43812463, 0.4549371];
let delta = &output - &expected;
let max_error = delta.into_iter().map(f32::abs).fold(0.0, f32::max);
assert!(max_error < 10.0 * f32::EPSILON);
println!("{}", output);
}
}
|
use ndarray::{ArrayD, ArrayViewD};
use serde::Serialize;
use super::{Layer, LayerJson};
pub |
struct Relu {
name: String,
input_shape: Vec<usize>,
}
impl Relu {
pub fn new(input_shape: Vec<usize>) -> Self {
Self {
name: "relu".into(),
input_shape,
}
}
}
impl Layer for Relu {
fn box_clone(&self) -> Box<dyn Layer> {
Box::new(self.clone())
}
fn apply(&self, input: &ArrayViewD<f32>) -> ArrayD<f32> {
input.mapv(|x| f32::max(0.0, x))
}
fn name(&self) -> &str {
&self.name
}
fn num_params(&self) -> usize {
let mut params = 1;
for i in self.input_shape() {
params *= i;
}
params
}
fn num_muls(&self) -> usize {
0
}
fn output_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn input_shape(&self) -> Vec<usize> {
self.input_shape.clone()
}
fn to_json(&self) -> LayerJson {
LayerJson::Relu {
input_shape: self.input_shape(),
}
}
}
pub mod test {
use super::*;
use ndarray::{arr1, arr3, Ix1, Ix3}; |
fn relu_test3() {
let input = arr3(&[[[1.2, -4.3], [-2.1, 4.3]], [[5.2, 6.1], [7.6, -1.8]], [
[9.3, 0.0],
[1.2, 3.4],
]]);
let relu = Relu::new(vec![3, 2, 2]);
let output = relu.apply(&input.into_dyn().view());
let n_params = relu.num_params();
let n_multiplications = relu.num_muls();
assert_eq!(
output,
arr3(&[[[1.2, 0.0], [0.0, 4.3]], [[5.2, 6.1], [7.6, 0.0]], [
[9.3, 0.0],
[1.2, 3.4],
]])
.into_dyn()
);
let size: (usize, usize, usize) = (3, 2, 2);
assert_eq!(
&output.clone().into_dimensionality::<Ix3>().unwrap().dim(),
&size
);
println!(
"
{}
output dim: {}x1
output:\n{}",
relu.name(),
n_params,
n_params,
n_multiplications,
output
);
} |
fn relu_test1() {
let input = arr1(&[-4., -3.4, 6., 7., 1., -3.]).into_dyn();
let relu = Relu::new(vec![6]);
let output = relu.apply(&input.into_dyn().view());
let n_params = relu.num_params();
let n_multiplications = relu.num_muls();
assert_eq!(output, arr1(&[0., 0., 6., 7., 1., 0.]).into_dyn());
assert_eq!(
output.clone().into_dimensionality::<Ix1>().unwrap().len(),
6
);
println!(
"
{}
output dim: {}x1
output:\n{}",
relu.name(),
n_params,
n_params,
n_multiplications,
output
);
}
} |
mod allocator;
mod anyhow;
pub mod layers;
pub mod nn;
pub mod serialize;
use self::{allocator::Allocator, anyhow::MapAny as _};
use bytesize::ByteSize;
use eyre::{eyre, Result as EyreResult};
use log::Level;
use plonky2::{
field::types::Field,
iop::{
target::Target,
witness::{PartialWitness, Witness},
},
plonk::{
circuit_builder::CircuitBuilder,
circuit_data::{CircuitConfig, CircuitData},
config::{GenericConfig, KeccakGoldilocksConfig, PoseidonGoldilocksConfig},
proof::{CompressedProofWithPublicInputs, ProofWithPublicInputs},
},
};
use rand::Rng as _;
use std::{iter::once, sync::atomic::Ordering, time::Instant};
use structopt::StructOpt;
use tracing::{info, trace};
type Rng = rand_pcg::Mcg128Xsl64;
pub static ALLOCATOR: Allocator<allocator::StdAlloc> = allocator::new_std();
pub static ALLOCATOR: Allocator<allocator::MiMalloc> = allocator::new_mimalloc();
pub |
struct Options {
pub bench: bool,
pub input_size: usize,
pub output_size: usize,
pub coefficient_bits: usize,
pub num_wires: usize,
pub num_routed_wires: usize,
pub constant_gate_size: usize,
}
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type Builder = CircuitBuilder<F, D>;
type Proof = ProofWithPublicInputs<F, C, D>;
fn to_field(value: i32) -> F {
if value >= 0 {
F::from_canonical_u32(value as u32)
} else {
-F::from_canonical_u32(-value as u32)
}
}
fn dot(builder: &mut Builder, coefficients: &[i32], input: &[Target]) -> Target {
assert_eq!(coefficients.len(), input.len());
let mut sum = builder.zero();
for (&coefficient, &input) in coefficients.iter().zip(input) {
let coefficient = to_field(coefficient);
sum = builder.mul_const_add(coefficient, input, sum);
}
sum
}
fn full(builder: &mut Builder, coefficients: &[i32], input: &[Target]) -> Vec<Target> {
let input_size = input.len();
let output_size = coefficients.len() / input_size;
assert_eq!(coefficients.len(), input_size * output_size);
builder.push_context(Level::Info, "full");
let mut output = Vec::with_capacity(output_size);
for coefficients in coefficients.chunks_exact(input_size) {
output.push(dot(builder, coefficients, input));
}
builder.pop_context();
output
} |
struct Circuit {
inputs: Vec<Target>,
outputs: Vec<Target>,
data: CircuitData<F, C, D>,
}
impl Circuit {
fn build(options: &Options, coefficients: &[i32]) -> Circuit {
assert_eq!(coefficients.len(), options.input_size * options.output_size);
info!(
"Building circuit for for {}x{} matrix-vector multiplication",
options.input_size, options.output_size
);
let config = CircuitConfig {
num_wires: options.num_wires,
num_routed_wires: options.num_routed_wires,
..CircuitConfig::default()
};
let mut builder = CircuitBuilder::<F, D>::new(config);
builder.push_context(Level::Info, "Inputs");
let inputs = builder.add_virtual_targets(options.input_size);
inputs
.iter()
.for_each(|target| builder.register_public_input(*target));
builder.pop_context();
let outputs = full(&mut builder, &coefficients, &inputs);
outputs
.iter()
.for_each(|target| builder.register_public_input(*target));
builder.print_gate_counts(0);
let data = builder.build::<C>();
Self {
inputs,
outputs,
data,
}
}
fn prove(&self, input: &[i32]) -> EyreResult<Proof> {
info!("Proving {} size input", input.len());
let mut pw = PartialWitness::new();
for (&target, &value) in self.inputs.iter().zip(input) {
pw.set_target(target, to_field(value));
}
let proof = self.data.prove(pw).map_any()?;
let proof_size = ByteSize(proof.to_bytes().map_any()?.len() as u64);
info!("Proof size: {proof_size}");
Ok(proof)
}
fn verify(&self, proof: &Proof) -> EyreResult<()> {
info!(
"Verifying proof with {} public inputs",
proof.public_inputs.len()
);
self.data.verify |
(proof.clone()).map_any()
}
}
pub async fn main(mut rng: Rng, mut options: Options) -> EyreResult<()> {
info!(
"Computing proof for {}x{} matrix-vector multiplication",
options.input_size, options.output_size
);
println!(
"input_size,output_size,build_time_s,proof_time_s,proof_mem_b,proof_size_b,verify_time_s"
);
let output_sizes: Box<dyn Iterator<Item = usize>> = if options.bench {
Box::new((1..).map(|n| n * 1000))
} else {
Box::new(once(options.output_size))
};
for output_size in output_sizes {
options.output_size = output_size;
let quantize_coeff = |c: i32| c % (1 << options.coefficient_bits);
let coefficients: Vec<i32> = (0..options.input_size * options.output_size)
.map(|_| quantize_coeff(rng.gen()))
.collect();
let now = Instant::now();
let circuit = Circuit::build(&options, &coefficients);
let circuit_build_time = now.elapsed();
ALLOCATOR.peak_allocated.store(0, Ordering::Release);
let input_values = (0..options.input_size as i32)
.into_iter()
.map(|_| rng.gen())
.collect::<Vec<_>>();
let now = Instant::now();
let proof = circuit.prove(&input_values)?;
let proof_mem = ALLOCATOR.peak_allocated.load(Ordering::Acquire);
let proof_time = now.elapsed();
let proof_size = proof.to_bytes().map_any()?.len() as u64;
info!("Prover memory usage: {}", ByteSize(proof_mem as u64));
let now = Instant::now();
circuit.verify(&proof)?;
let verify_time = now.elapsed();
println!(
"{},{},{},{},{},{},{}",
options.input_size,
options.output_size,
circuit_build_time.as_secs_f64(),
proof_time.as_secs_f64(),
proof_mem,
proof_size,
verify_time.as_secs_f64()
);
}
Ok(())
}
pub mod test { |
use super::*;
use proptest::proptest;
use tracing::{error, warn};
use tracing_test::traced_test; |
fn test_with_log_output() {
error!("logged on the error level");
assert!(logs_contain("logged on the error level"));
}
async |
fn async_test_with_log() {
info!("This is being logged on the info level");
tokio::spawn(async {
warn!("This is being logged on the warn level from a spawned task");
})
.await
.unwrap();
assert!(logs_contain("logged on the info level"));
assert!(logs_contain("logged on the warn level"));
assert!(!logs_contain("logged on the error level"));
}
}
pub mod bench {
use criterion::{black_box, BatchSize, Criterion};
use proptest::{
strategy::{Strategy, ValueTree},
test_runner::TestRunner,
};
use std::time::Duration;
use tokio::runtime;
pub |
fn group(criterion: &mut Criterion) {
bench_example_proptest(criterion);
bench_example_async(criterion);
}
pub(crate) fn runtime() -> runtime::Runtime {
runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
} |
fn bench_example_proptest(criterion: &mut Criterion) {
let input = (0..5, 0..5);
let mut runner = TestRunner::deterministic();
criterion.bench_function("example_proptest", move |bencher| {
bencher.iter_batched(
|| input.new_tree(&mut runner).unwrap().current(),
|(a, b)| {
black_box(a + b)
},
BatchSize::LargeInput,
);
});
} |
fn bench_example_async(criterion: &mut Criterion) {
let duration = Duration::from_micros(1);
criterion.bench_function("example_async", move |bencher| {
bencher.to_async(runtime()).iter(|| async {
tokio::time::sleep(duration).await;
});
});
}
} |
use crate::layers::{
conv::Convolution, flatten::Flatten, fully_connected::FullyConnected, maxpool::MaxPool,
normalize::Normalize, relu::Relu, NeuralNetwork,
};
use ndarray::{Array1, Array2, Array4};
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
pub fn create_neural_net() -> NeuralNetwork {
let seed = 694201337;
let mut rng = StdRng::seed_from_u64(seed);
let mut neural_net = NeuralNetwork::new();
let kernel = Array4::random_using((32, 5, 5, 3), Uniform::<f32>::new(-10., 10.), &mut rng);
neural_net.add_layer(Box::new(Convolution::new(kernel, vec![120, 80, 3])));
neural_net.add_layer(Box::new(MaxPool::new(2, vec![116, 76, 32])));
neural_net.add_layer(Box::new(Relu::new(vec![58, 38, 32])));
let kernel = Array4::random_using((32, 5, 5, 32), Uniform::<f32>::new(-10., 10.), &mut rng);
neural_net.add_layer(Box::new(Convolution::new(kernel, vec![58, 38, 32])));
neural_net.add_layer(Box::new(MaxPool::new(2, vec![54, 34, 32])));
neural_net.add_layer(Box::new(Relu::new(vec![27, 17, 32])));
neural_net.add_layer(Box::new(Flatten::new(vec![27, 17, 32])));
let weights = Array2::random_using((1000, 14688), Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let biases = Array1::random_using(1000, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
neural_net.add_layer(Box::new(FullyConnected::new(weights, biases)));
neural_net.add_layer(Box::new(Relu::new(vec![1000])));
let weights = Array2::random_using((5, 1000), Uniform::<f32>::new(-10.0, 10.0), &mut rng);
let biases = Array1::random_using(5, Uniform::<f32>::new(-10.0, 10.0), &mut rng);
neural_net.add_layer(Box::new(FullyConnected::new(weights, biases)));
neural_net.add_layer(Box::new(Normalize::new(vec![5])));
neural_net
}
pub |
fn log_nn_table() {
println!(
"{:<20} | {:<15} | {:<15} | {:<15}",
"layer", "output shape", "
);
println!("{:-<77}", "");
}
pub mod tests {
extern crate test;
use crate::{
layers::{
conv::Convolution, flatten::Flatten, fully_connected::FullyConnected, maxpool::MaxPool,
normalize::Normalize, relu::Relu, NeuralNetwork,
},
serialize::deserialize_model_json,
};
use ndarray::{ArcArray, Array1, Array2, Array3, Array4, Ix1, Ix2, Ix3, Ix4};
use ndarray_rand::{rand::SeedableRng, rand_distr::Uniform, RandomExt};
use rand::rngs::StdRng;
use std::fs;
use test::Bencher;
use super::log_nn_table; |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.