index
int64 0
0
| repo_id
stringclasses 179
values | file_path
stringlengths 26
186
| content
stringlengths 1
2.1M
| __index_level_0__
int64 0
9
|
---|---|---|---|---|
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/recurrent_gemma.rs | //! Recurrent Gemma model implementation
//!
//! Recurrent Gemma is a version of the Gemma language model that incorporates recurrent memory.
//! This allows the model to maintain state between predictions and have longer-range memory.
//!
//! Key characteristics:
//! - Real-gated linear recurrent units (RGLRU)
//! - 1D convolution for local context
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//! - Grouped query attention
//!
//! References:
//! - [Gemma: Open Models Based on Gemini Technology](https://blog.google/technology/developers/gemma-open-models/)
//! - [Recurrent Memory model architecture](https://arxiv.org/abs/2402.00441)
//!
//! This implementation is based on the python version from huggingface/transformers.
//! https://github.com/huggingface/transformers/blob/b109257f4fb8b1166e7c53cc5418632014ed53a5/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L2
//!
use candle::{DType, Device, IndexOp, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Linear, VarBuilder};
use std::sync::Arc;
#[derive(serde::Deserialize, Debug, Clone, Copy)]
#[serde(rename_all = "snake_case")]
pub enum TemporalBlockType {
Attention,
Recurrent,
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub num_hidden_layers: usize,
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub head_dim: usize,
pub lru_width: Option<usize>,
pub attention_window_size: usize,
pub conv1d_width: usize,
pub logits_soft_cap: f64,
pub hidden_activation: candle_nn::Activation,
pub partial_rotary_factor: f64,
pub rms_norm_eps: f64,
pub rope_theta: f64,
#[serde(alias = "_block_types")]
pub block_types: Vec<TemporalBlockType>,
pub attention_bias: bool,
#[serde(default = "default_max_seq_len")]
pub max_seq_len: usize,
}
fn default_max_seq_len() -> usize {
8192
}
#[derive(Debug, Clone)]
pub(crate) struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
pub(crate) fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
pub(crate) fn from_weight(weight: Tensor, eps: f64) -> Self {
Self { weight, eps }
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
pub(crate) struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
fn rotate_half(xs: &Tensor) -> Result<Tensor> {
let last_dim = xs.dim(D::Minus1)?;
let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?;
let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?;
Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1)
}
impl RotaryEmbedding {
pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
if cfg.partial_rotary_factor != 0.5 {
candle::bail!("partial-rotary-factor {} <> 0.5", cfg.partial_rotary_factor)
}
let dim = cfg.head_dim / 2;
let max_seq_len = cfg.max_seq_len;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
pub(crate) fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim)
let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?;
let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
struct Mlp {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl Mlp {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let intermediate_size = cfg.intermediate_size / 2;
let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?;
let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
(gate * xs.apply(&self.up_proj))?.apply(&self.down_proj)
}
}
// Real-Gated Linear Recurrent Unit
#[derive(Debug, Clone)]
pub(crate) struct Rglru {
pub(crate) recurrent_param: Tensor,
pub(crate) input_gate_weight: Tensor,
pub(crate) input_gate_bias: Tensor,
pub(crate) recurrent_gate_weight: Tensor,
pub(crate) recurrent_gate_bias: Tensor,
pub(crate) block_width: usize,
pub(crate) n_heads: usize,
pub(crate) recurrent_states: Option<Tensor>,
}
fn baddbmm(a: &Tensor, b: &Tensor, c: &Tensor) -> Result<Tensor> {
a.broadcast_add(&b.matmul(c)?)
}
fn softplus(xs: &Tensor) -> Result<Tensor> {
(xs.exp()? + 1.0)?.log()
}
impl Rglru {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let n_heads = cfg.num_attention_heads;
let block_width = lru_width / n_heads;
let recurrent_param = vb.get((lru_width,), "recurrent_param")?;
let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?;
let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?;
let recurrent_gate_weight =
vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?;
let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?;
Ok(Self {
recurrent_param,
input_gate_bias,
input_gate_weight,
recurrent_gate_bias,
recurrent_gate_weight,
block_width,
n_heads,
recurrent_states: None,
})
}
// https://github.com/huggingface/transformers/blob/0bd58f1ce0573c0e3269de4215a17d318add49b9/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L303
pub(crate) fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_sz, seq_len, lru_width) = xs.dims3()?;
let pos = Tensor::arange(pos as u32, (pos + seq_len) as u32, xs.device())?;
let reset = pos.eq(0u32)?.unsqueeze(1)?.unsqueeze(0)?;
let reshape_act = xs
.reshape((b_sz * seq_len, self.n_heads, self.block_width))?
.permute((1, 0, 2))?
.contiguous()?;
let res = baddbmm(
&self.input_gate_bias.unsqueeze(1)?,
&reshape_act,
&self.input_gate_weight,
)?;
let input_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?;
let input_gate = candle_nn::ops::sigmoid(&input_gate)?;
let res = baddbmm(
&self.recurrent_gate_bias.unsqueeze(1)?,
&reshape_act,
&self.recurrent_gate_weight,
)?;
let recurrent_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?;
let recurrent_gate = candle_nn::ops::sigmoid(&recurrent_gate)?;
let log_recurrent_gate =
(recurrent_gate * (-8.0))?.broadcast_mul(&softplus(&self.recurrent_param)?)?;
let recurrent_gate = log_recurrent_gate.exp()?;
let a_square = (log_recurrent_gate * 2.)?.exp()?;
// Gate the input.
let gated_inputs = (xs * input_gate)?;
let reset = reset.to_dtype(a_square.dtype())?;
let multiplier =
reset.broadcast_add(&((1.0 - &reset)?.broadcast_mul(&(1.0 - a_square)?.sqrt()?))?)?;
let normalized_x = (gated_inputs * multiplier.to_dtype(xs.dtype()))?;
let (hidden_states, recurrent_states) = rnn_scan(
&normalized_x,
&recurrent_gate,
&reset,
self.recurrent_states.as_ref(),
)?;
self.recurrent_states = Some(recurrent_states);
Ok(hidden_states)
}
}
fn rnn_scan(
hidden_states: &Tensor,
recurrent_gate: &Tensor,
reset: &Tensor,
recurrent_states: Option<&Tensor>,
) -> Result<(Tensor, Tensor)> {
let acc_dtype = DType::F32;
let dev = hidden_states.device();
let in_dtype = hidden_states.dtype();
let inv_reset = (1.0 - reset)?.to_dtype(recurrent_gate.dtype())?;
let recurrent_gate = recurrent_gate.broadcast_mul(&inv_reset)?;
let (c, r) = if hidden_states.dim(1)? == 1 {
match recurrent_states {
None => {
let next_state = hidden_states.i((.., 0))?.to_dtype(acc_dtype)?;
(hidden_states.clone(), next_state)
}
Some(recurrent_states) => {
let contextualized_states =
recurrent_gate.to_dtype(acc_dtype)? * recurrent_states.unsqueeze(1)?;
let contextualized_states =
(contextualized_states + hidden_states.to_dtype(acc_dtype)?)?;
let c = contextualized_states.to_dtype(in_dtype)?;
let l = contextualized_states.dim(1)?;
let r = contextualized_states.i((.., l - 1))?;
(c, r)
}
}
} else {
let mut recurrent_states = match recurrent_states {
None => Tensor::zeros(hidden_states.i((.., 0))?.shape(), acc_dtype, dev)?,
Some(r) => r.clone(),
};
let mut contextualized_states = vec![];
for t in 0..hidden_states.dim(1)? {
recurrent_states =
(recurrent_gate.i((.., t))?.to_dtype(acc_dtype)? * recurrent_states)?;
recurrent_states =
(recurrent_states + hidden_states.i((.., t))?.to_dtype(acc_dtype)?)?;
contextualized_states.push(recurrent_states.to_dtype(in_dtype)?)
}
let contextualized_states = Tensor::stack(&contextualized_states, 1)?;
(contextualized_states, recurrent_states)
};
Ok((c, r))
}
#[derive(Debug, Clone)]
struct RecurrentBlock {
linear_y: Linear,
linear_x: Linear,
linear_out: Linear,
conv_1d: candle_nn::Conv1d,
conv1d_state: Option<Tensor>,
conv1d_width: usize,
rg_lru: Rglru,
act_fn: candle_nn::Activation,
}
impl RecurrentBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let lru_width = cfg.lru_width.unwrap_or(h);
let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?;
let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?;
let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?;
let conv_1d = candle_nn::conv1d(
lru_width,
lru_width,
cfg.conv1d_width,
candle_nn::Conv1dConfig {
groups: lru_width,
padding: cfg.conv1d_width - 1,
..Default::default()
},
vb.pp("conv_1d"),
)?;
let rg_lru = Rglru::new(cfg, vb.pp("rg_lru"))?;
Ok(Self {
linear_y,
linear_x,
linear_out,
conv_1d,
conv1d_state: None,
conv1d_width: cfg.conv1d_width,
rg_lru,
act_fn: cfg.hidden_activation,
})
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (_b_sz, seq_len, _) = xs.dims3()?;
let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?;
let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?;
let x_branch = if pos == 0 {
let x_len = x_branch.dim(D::Minus1)?;
let pad = self.conv1d_width as i64 - x_len as i64 - 1;
let padded = match pad.cmp(&0) {
std::cmp::Ordering::Equal => x_branch.clone(),
std::cmp::Ordering::Less => {
let rev_pad = (-pad) as usize;
x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)?
}
std::cmp::Ordering::Greater => {
x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)?
}
};
self.conv1d_state = Some(padded);
x_branch
.apply(&self.conv_1d)?
.narrow(D::Minus1, 0, seq_len)?
} else {
let conv_state = match self.conv1d_state.as_ref() {
None => candle::bail!("empty cache despite pos > 0"),
Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?,
};
let w = self.conv_1d.weight().i((.., 0, ..))?;
let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?;
let x_branch = match self.conv_1d.bias() {
None => x_branch,
Some(b) => x_branch.broadcast_add(b)?,
};
let x_branch = x_branch.unsqueeze(D::Minus1)?;
self.conv1d_state = Some(conv_state.i((.., .., 1..))?);
x_branch
};
let x_branch = x_branch.transpose(1, 2)?;
let x_branch = self.rg_lru.forward(&x_branch, pos)?;
(x_branch * y_branch)?.apply(&self.linear_out)
}
}
#[derive(Debug, Clone)]
struct SdpaAttention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
n_heads: usize,
n_kv_heads: usize,
head_dim: usize,
hidden_size: usize,
kv_cache: Option<(Tensor, Tensor)>,
rotary_emb: Arc<RotaryEmbedding>,
}
impl SdpaAttention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let h = cfg.hidden_size;
let n_heads = cfg.num_attention_heads;
let n_kv_heads = cfg.num_key_value_heads;
let hd = cfg.head_dim;
let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?;
let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?;
let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?;
let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
n_heads,
n_kv_heads,
head_dim: hd,
hidden_size: h,
kv_cache: None,
rotary_emb,
})
}
fn repeat_kv(&self, x: Tensor) -> Result<Tensor> {
let n_rep = self.n_heads / self.n_kv_heads;
crate::utils::repeat_kv(x, n_rep)
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let (bsz, q_len, _) = xs.dims3()?;
let query_states = xs.apply(&self.q_proj)?;
let key_states = xs.apply(&self.k_proj)?;
let value_states = xs.apply(&self.v_proj)?;
let query_states = query_states
.reshape((bsz, q_len, self.n_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((bsz, q_len, self.n_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let query_states = query_states.chunk(2, D::Minus1)?;
let key_states = key_states.chunk(2, D::Minus1)?;
let (query_rot, key_rot) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?;
let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?;
let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = self.repeat_kv(key_states)?;
let value_states = self.repeat_kv(value_states)?;
let xs = {
let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?;
let att = if q_len == 1 {
att
} else {
match attention_mask {
None => att,
Some(mask) => att.broadcast_add(mask)?,
}
};
let att = candle_nn::ops::softmax_last_dim(&att)?;
att.matmul(&value_states.contiguous()?)?
};
let xs = xs
.transpose(1, 2)?
.reshape((bsz, q_len, self.hidden_size))?;
self.o_proj.forward(&xs)
}
}
#[derive(Debug, Clone)]
enum TemporalBlock {
Recurrent(RecurrentBlock),
Attention(SdpaAttention),
}
impl TemporalBlock {
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
match self {
Self::Recurrent(b) => b.forward(xs, pos),
Self::Attention(b) => b.forward(xs, attention_mask, pos),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
temporal_pre_norm: RmsNorm,
channel_pre_norm: RmsNorm,
temporal_block: TemporalBlock,
mlp_block: Mlp,
}
impl DecoderLayer {
fn new(
block_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let h = cfg.hidden_size;
let temporal_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?;
let channel_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?;
let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] {
TemporalBlockType::Recurrent => {
let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?;
TemporalBlock::Recurrent(block)
}
TemporalBlockType::Attention => {
let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?;
TemporalBlock::Attention(block)
}
};
let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?;
Ok(Self {
temporal_pre_norm,
channel_pre_norm,
temporal_block,
mlp_block,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
pos: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.temporal_pre_norm)?;
let xs = self.temporal_block.forward(&xs, attention_mask, pos)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?;
xs + residual
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
final_norm: RmsNorm,
lm_head: Linear,
hidden_size: usize,
logits_soft_cap: f64,
dtype: DType,
device: Device,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?);
let vb_b = vb.pp("layers");
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
for idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?;
layers.push(layer)
}
let final_norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
final_norm,
lm_head,
hidden_size: cfg.hidden_size,
logits_soft_cap: cfg.logits_soft_cap,
dtype: vb.dtype(),
device: vb.device().clone(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> {
let (b_size, seq_len) = xs.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?;
Some(mask)
};
let xs = xs.apply(&self.embed_tokens)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), pos)?;
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.final_norm)?
.apply(&self.lm_head)?;
let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?;
Ok(logits)
}
}
| 0 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/quantized_moondream.rs | //! Implementation of a quantized Moondream vision language model.
//!
//! Moondream is a lightweight vision-language model for image understanding and generation.
//! This module provides a quantized version for reduced memory usage and faster inference.
//!
//! Key features:
//! - ViT-based vision encoder
//! - Phi-2 text decoder model
//! - Memory efficient 8-bit quantization
//! - Optimized for efficient deployment
//!
//! References:
//! - [Moondream Model](https://github.com/vikhyat/moondream)
//!
use crate::models::moondream::{Config, VisionConfig};
use crate::models::quantized_mixformer::MixFormerSequentialForCausalLM as PhiModel;
use crate::quantized_nn::{layer_norm, linear_b, Linear};
use crate::quantized_var_builder::VarBuilder;
use candle::{IndexOp, Module, Result, Tensor, D};
fn scaled_dot_product_attention(q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> {
let dim = q.dim(D::Minus1)?;
let scale_factor = 1.0 / (dim as f64).sqrt();
let attn_weights = (q.matmul(&k.t()?)? * scale_factor)?;
candle_nn::ops::softmax_last_dim(&attn_weights)?.matmul(v)
}
#[derive(Debug, Clone)]
struct LinearPatchEmbedding {
linear: Linear,
}
impl LinearPatchEmbedding {
fn new(vb: VarBuilder) -> Result<Self> {
let linear = linear_b(588, 1152, true, vb.pp("linear"))?;
Ok(Self { linear })
}
}
impl Module for LinearPatchEmbedding {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.linear)
}
}
#[derive(Debug, Clone)]
struct Attention {
num_heads: usize,
head_dim: usize,
qkv: Linear,
proj: Linear,
}
impl Attention {
pub fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> {
let qkv = linear_b(dim, dim * 3, true, vb.pp("qkv"))?;
let proj = linear_b(dim, dim, true, vb.pp("proj"))?;
Ok(Self {
num_heads,
head_dim: dim / num_heads,
qkv,
proj,
})
}
}
impl Module for Attention {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, n, c) = xs.dims3()?;
let qkv = xs
.apply(&self.qkv)?
.reshape((b, n, 3, self.num_heads, self.head_dim))?
.permute((2, 0, 3, 1, 4))?;
let (q, k, v) = (
qkv.i(0)?.contiguous()?,
qkv.i(1)?.contiguous()?,
qkv.i(2)?.contiguous()?,
);
scaled_dot_product_attention(&q, &k, &v)?
.transpose(1, 2)?
.reshape((b, n, c))?
.apply(&self.proj)
}
}
#[derive(Debug, Clone)]
struct VitBlock {
attn: Attention,
mlp: Mlp,
norm1: candle_nn::LayerNorm,
norm2: candle_nn::LayerNorm,
}
impl VitBlock {
fn new(vb: VarBuilder, dim: usize, num_heads: usize, cfg: &VisionConfig) -> Result<Self> {
let attn = Attention::new(vb.pp("attn"), dim, num_heads)?;
let mlp = Mlp::new(vb.pp("mlp"), dim, cfg.hidden_features, dim, cfg.act)?;
let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?;
let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?;
Ok(Self {
attn,
mlp,
norm1,
norm2,
})
}
}
impl Module for VitBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs.apply(&self.norm1)?.apply(&self.attn)?;
let xs = (xs + &ys)?;
let ys = xs.apply(&self.norm2)?.apply(&self.mlp)?;
let xs = (&xs + &ys)?;
Ok(xs)
}
}
#[derive(Debug, Clone)]
struct VisionTransformer {
patch_embed: LinearPatchEmbedding,
pos_embed: Tensor,
blocks: Vec<VitBlock>,
norm: candle_nn::LayerNorm,
}
impl VisionTransformer {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let patch_embed = LinearPatchEmbedding::new(vb.pp("patch_embed"))?;
let pos_embed = vb
.get((1, cfg.embed_len, cfg.embed_dim), "pos_embed")?
.dequantize(vb.device())?;
let blocks = (0..cfg.num_blocks)
.map(|i| {
VitBlock::new(
vb.pp(format!("blocks.{}", i)),
cfg.embed_dim,
cfg.num_heads,
cfg,
)
})
.collect::<Result<_>>()?;
let norm = layer_norm(cfg.embed_dim, 1e-5, vb.pp("norm"))?;
Ok(Self {
patch_embed,
pos_embed,
blocks,
norm,
})
}
}
impl Module for VisionTransformer {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = (&xs.apply(&self.patch_embed)? + &self.pos_embed)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?;
}
xs.apply(&self.norm)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
model: VisionTransformer,
}
impl Encoder {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let model = VisionTransformer::new(cfg, vb.pp("model.visual"))?;
Ok(Self { model })
}
}
impl Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.model)
}
}
#[derive(Debug, Clone)]
struct Mlp {
fc1: Linear,
act: candle_nn::Activation,
fc2: Linear,
}
impl Mlp {
fn new(
vb: VarBuilder,
in_features: usize,
hidden_features: usize,
out_features: usize,
act: candle_nn::Activation,
) -> Result<Self> {
let fc1 = linear_b(in_features, hidden_features, true, vb.pp("fc1"))?;
let fc2 = linear_b(hidden_features, out_features, true, vb.pp("fc2"))?;
Ok(Self { fc1, act, fc2 })
}
}
impl Module for Mlp {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2)
}
}
#[derive(Debug, Clone)]
struct VisionProjection {
mlp: Mlp,
}
impl VisionProjection {
fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let mlp = Mlp::new(
vb.pp("mlp"),
cfg.image_embedding_dim,
cfg.hidden_dim,
cfg.model_dim,
cfg.act,
)?;
Ok(Self { mlp })
}
}
impl Module for VisionProjection {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.mlp)
}
}
#[derive(Debug, Clone)]
pub struct VisionEncoder {
encoder: Encoder,
projection: VisionProjection,
}
impl VisionEncoder {
pub fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> {
let encoder = Encoder::new(cfg, vb.pp("encoder"))?;
let projection = VisionProjection::new(cfg, vb.pp("projection"))?;
Ok(Self {
encoder,
projection,
})
}
}
impl Module for VisionEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b, c, hp1, wp2) = xs.dims4()?;
let (p1, p2) = (14, 14);
let h = hp1 / p1;
let w = wp2 / p2;
xs.reshape((b, c, h, p1, h, p2))?
.permute((0, 2, 4, 1, 3, 5))?
.reshape((b, h * w, c * p1 * p2))?
.apply(&self.encoder)?
.apply(&self.projection)
}
}
pub struct Model {
pub text_model: PhiModel,
pub vision_encoder: VisionEncoder,
}
impl Model {
pub fn new(config: &Config, vb: VarBuilder) -> Result<Self> {
let text_model = PhiModel::new_v2(&config.phi_config, vb.pp("text_model"))?;
let vision_encoder = VisionEncoder::new(&config.vision_config, vb.pp("vision_encoder"))?;
Ok(Self {
text_model,
vision_encoder,
})
}
pub fn vision_encoder(&self) -> &VisionEncoder {
&self.vision_encoder
}
pub fn text_model(&mut self) -> &mut PhiModel {
&mut self.text_model
}
}
| 1 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/dac.rs | //! Implementation of the Descript Audio Codec (DAC) model
//!
//! See: [Descript Audio Codec](https://github.com/descriptinc/descript-audio-codec)
//!
/// An efficient neural codec for compressing/decompressing audio
///
use crate::models::encodec;
use candle::{IndexOp, Result, Tensor, D};
use candle_nn::{Conv1d, Conv1dConfig, ConvTranspose1d, ConvTranspose1dConfig, VarBuilder};
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub num_codebooks: usize,
pub model_bitrate: u32,
pub codebook_size: usize,
pub latent_dim: usize,
pub frame_rate: u32,
pub sampling_rate: u32,
}
#[derive(Debug, Clone)]
pub struct Snake1d {
alpha: Tensor,
}
impl Snake1d {
pub fn new(channels: usize, vb: VarBuilder) -> Result<Self> {
let alpha = vb.get((1, channels, 1), "alpha")?;
Ok(Self { alpha })
}
}
impl candle::Module for Snake1d {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let xs_shape = xs.shape();
let xs = xs.flatten_from(2)?;
let sin = self.alpha.broadcast_mul(&xs)?.sin()?;
let sin = (&sin * &sin)?;
(xs + (&self.alpha + 1e-9)?.recip()?.broadcast_mul(&sin)?)?.reshape(xs_shape)
}
}
#[derive(Debug, Clone)]
pub struct ResidualUnit {
snake1: Snake1d,
conv1: Conv1d,
snake2: Snake1d,
conv2: Conv1d,
}
impl ResidualUnit {
pub fn new(dim: usize, dilation: usize, vb: VarBuilder) -> Result<Self> {
let pad = ((7 - 1) * dilation) / 2;
let vb = vb.pp("block");
let snake1 = Snake1d::new(dim, vb.pp(0))?;
let cfg1 = Conv1dConfig {
dilation,
padding: pad,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim, dim, 7, cfg1, vb.pp(1))?;
let snake2 = Snake1d::new(dim, vb.pp(2))?;
let conv2 = encodec::conv1d_weight_norm(dim, dim, 1, Default::default(), vb.pp(3))?;
Ok(Self {
snake1,
conv1,
snake2,
conv2,
})
}
}
impl candle::Module for ResidualUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let ys = xs
.apply(&self.snake1)?
.apply(&self.conv1)?
.apply(&self.snake2)?
.apply(&self.conv2)?;
let pad = (xs.dim(D::Minus1)? - ys.dim(D::Minus1)?) / 2;
if pad > 0 {
&ys + xs.narrow(D::Minus1, pad, ys.dim(D::Minus1)?)
} else {
ys + xs
}
}
}
#[derive(Debug, Clone)]
pub struct EncoderBlock {
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
snake1: Snake1d,
conv1: Conv1d,
}
impl EncoderBlock {
pub fn new(dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let res1 = ResidualUnit::new(dim / 2, 1, vb.pp(0))?;
let res2 = ResidualUnit::new(dim / 2, 3, vb.pp(1))?;
let res3 = ResidualUnit::new(dim / 2, 9, vb.pp(2))?;
let snake1 = Snake1d::new(dim / 2, vb.pp(3))?;
let cfg1 = Conv1dConfig {
stride,
padding: (stride + 1) / 2,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(dim / 2, dim, 2 * stride, cfg1, vb.pp(4))?;
Ok(Self {
res1,
res2,
res3,
snake1,
conv1,
})
}
}
impl candle::Module for EncoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)?
.apply(&self.snake1)?
.apply(&self.conv1)
}
}
#[derive(Debug, Clone)]
pub struct Encoder {
conv1: Conv1d,
blocks: Vec<EncoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl candle::Module for Encoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
impl Encoder {
pub fn new(
mut d_model: usize,
strides: &[usize],
d_latent: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("block");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(1, d_model, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(strides.len());
for (block_idx, stride) in strides.iter().enumerate() {
d_model *= 2;
let block = EncoderBlock::new(d_model, *stride, vb.pp(block_idx + 1))?;
blocks.push(block)
}
let snake1 = Snake1d::new(d_model, vb.pp(strides.len() + 1))?;
let cfg2 = Conv1dConfig {
padding: 1,
..Default::default()
};
let conv2 =
encodec::conv1d_weight_norm(d_model, d_latent, 3, cfg2, vb.pp(strides.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
#[derive(Debug, Clone)]
pub struct DecoderBlock {
snake1: Snake1d,
conv_tr1: ConvTranspose1d,
res1: ResidualUnit,
res2: ResidualUnit,
res3: ResidualUnit,
}
impl DecoderBlock {
pub fn new(in_dim: usize, out_dim: usize, stride: usize, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("block");
let snake1 = Snake1d::new(in_dim, vb.pp(0))?;
let cfg = ConvTranspose1dConfig {
stride,
padding: (stride + 1) / 2,
..Default::default()
};
let conv_tr1 = encodec::conv_transpose1d_weight_norm(
in_dim,
out_dim,
2 * stride,
true,
cfg,
vb.pp(1),
)?;
let res1 = ResidualUnit::new(out_dim, 1, vb.pp(2))?;
let res2 = ResidualUnit::new(out_dim, 3, vb.pp(3))?;
let res3 = ResidualUnit::new(out_dim, 9, vb.pp(4))?;
Ok(Self {
snake1,
conv_tr1,
res1,
res2,
res3,
})
}
}
impl candle_nn::Module for DecoderBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
xs.apply(&self.snake1)?
.apply(&self.conv_tr1)?
.apply(&self.res1)?
.apply(&self.res2)?
.apply(&self.res3)
}
}
#[derive(Debug, Clone)]
pub struct Decoder {
conv1: Conv1d,
blocks: Vec<DecoderBlock>,
snake1: Snake1d,
conv2: Conv1d,
}
impl Decoder {
pub fn new(
in_c: usize,
mut channels: usize,
rates: &[usize],
d_out: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = vb.pp("model");
let cfg1 = Conv1dConfig {
padding: 3,
..Default::default()
};
let conv1 = encodec::conv1d_weight_norm(in_c, channels, 7, cfg1, vb.pp(0))?;
let mut blocks = Vec::with_capacity(rates.len());
for (idx, stride) in rates.iter().enumerate() {
let block = DecoderBlock::new(channels, channels / 2, *stride, vb.pp(idx + 1))?;
channels /= 2;
blocks.push(block)
}
let snake1 = Snake1d::new(channels, vb.pp(rates.len() + 1))?;
let conv2 = encodec::conv1d_weight_norm(channels, d_out, 7, cfg1, vb.pp(rates.len() + 2))?;
Ok(Self {
conv1,
blocks,
snake1,
conv2,
})
}
}
impl candle::Module for Decoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.apply(&self.conv1)?;
for block in self.blocks.iter() {
xs = xs.apply(block)?
}
xs.apply(&self.snake1)?.apply(&self.conv2)
}
}
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct VectorQuantizer {
in_proj: Conv1d,
out_proj: Conv1d,
codebook: candle_nn::Embedding,
}
impl VectorQuantizer {
pub fn new(in_dim: usize, cb_size: usize, cb_dim: usize, vb: VarBuilder) -> Result<Self> {
let in_proj =
encodec::conv1d_weight_norm(in_dim, cb_dim, 1, Default::default(), vb.pp("in_proj"))?;
let out_proj =
encodec::conv1d_weight_norm(cb_dim, in_dim, 1, Default::default(), vb.pp("out_proj"))?;
let codebook = candle_nn::embedding(cb_size, cb_dim, vb.pp("codebook"))?;
Ok(Self {
in_proj,
out_proj,
codebook,
})
}
pub fn embed_code(&self, embed_id: &Tensor) -> Result<Tensor> {
embed_id.apply(&self.codebook)
}
pub fn decode_code(&self, embed_id: &Tensor) -> Result<Tensor> {
self.embed_code(embed_id)?.transpose(1, 2)
}
}
#[derive(Clone, Debug)]
pub struct ResidualVectorQuantizer {
quantizers: Vec<VectorQuantizer>,
}
impl ResidualVectorQuantizer {
pub fn new(
input_dim: usize,
n_codebooks: usize,
cb_size: usize,
cb_dim: usize,
vb: VarBuilder,
) -> Result<Self> {
let vb = &vb.pp("quantizers");
let quantizers = (0..n_codebooks)
.map(|i| VectorQuantizer::new(input_dim, cb_size, cb_dim, vb.pp(i)))
.collect::<Result<Vec<_>>>()?;
Ok(Self { quantizers })
}
pub fn from_codes(&self, codes: &Tensor) -> Result<Tensor> {
let mut sum = None;
for (idx, quantizer) in self.quantizers.iter().enumerate() {
let z_p_i = quantizer.decode_code(&codes.i((.., idx))?)?;
let z_q_i = z_p_i.apply(&quantizer.out_proj)?;
let s = match sum {
None => z_q_i,
Some(s) => (s + z_q_i)?,
};
sum = Some(s)
}
match sum {
Some(s) => Ok(s),
None => candle::bail!("empty codebooks"),
}
}
}
#[derive(Debug, Clone)]
pub struct Model {
pub encoder: Encoder,
pub quantizer: ResidualVectorQuantizer,
pub decoder: Decoder,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb = vb.pp("model");
let encoder = Encoder::new(64, &[2, 4, 8, 8], cfg.latent_dim, vb.pp("encoder"))?;
let quantizer = ResidualVectorQuantizer::new(
cfg.latent_dim,
cfg.num_codebooks,
cfg.codebook_size,
8,
vb.pp("quantizer"),
)?;
let decoder = Decoder::new(cfg.latent_dim, 1536, &[8, 8, 4, 2], 1, vb.pp("decoder"))?;
Ok(Self {
encoder,
decoder,
quantizer,
})
}
pub fn decode_codes(&self, audio_codes: &Tensor) -> Result<Tensor> {
let audio_values = self.quantizer.from_codes(audio_codes)?;
audio_values.apply(&self.decoder)
}
}
| 2 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/depth_anything_v2.rs | //! Implementation of the Depth Anything model from FAIR.
//!
//! See:
//! - ["Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data"](https://github.com/LiheYoung/Depth-Anything)
//!
use candle::D::Minus1;
use candle::{Module, Result, Tensor};
use candle_nn::ops::Identity;
use candle_nn::{
batch_norm, conv2d, conv2d_no_bias, conv_transpose2d, linear, seq, Activation, BatchNorm,
BatchNormConfig, Conv2d, Conv2dConfig, ConvTranspose2dConfig, Sequential, VarBuilder,
};
use crate::models::dinov2::DinoVisionTransformer;
pub struct DepthAnythingV2Config {
out_channel_sizes: [usize; 4],
in_channel_size: usize, // embed_dim in the Dino model
num_features: usize,
use_batch_norm: bool,
use_class_token: bool,
layer_ids_vits: Vec<usize>,
input_image_size: usize,
target_patch_size: usize,
}
impl DepthAnythingV2Config {
#[allow(clippy::too_many_arguments)]
pub fn new(
out_channel_sizes: [usize; 4],
in_channel_size: usize,
num_features: usize,
use_batch_norm: bool,
use_class_token: bool,
layer_ids_vits: Vec<usize>,
input_image_size: usize,
target_patch_size: usize,
) -> Self {
Self {
out_channel_sizes,
in_channel_size,
num_features,
use_batch_norm,
use_class_token,
layer_ids_vits,
input_image_size,
target_patch_size,
}
}
pub fn vit_small() -> Self {
Self {
out_channel_sizes: [48, 96, 192, 384],
in_channel_size: 384,
num_features: 64,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![2, 5, 8, 11],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_base() -> Self {
Self {
out_channel_sizes: [96, 192, 384, 768],
in_channel_size: 768,
num_features: 128,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![2, 5, 8, 11],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_large() -> Self {
Self {
out_channel_sizes: [256, 512, 1024, 1024],
in_channel_size: 1024,
num_features: 256,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![4, 11, 17, 23],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
pub fn vit_giant() -> Self {
Self {
out_channel_sizes: [1536, 1536, 1536, 1536],
in_channel_size: 1536,
num_features: 384,
use_batch_norm: false,
use_class_token: false,
layer_ids_vits: vec![9, 19, 29, 39],
input_image_size: 518,
target_patch_size: 518 / 14,
}
}
}
pub struct ResidualConvUnit {
activation: Activation,
conv1: Conv2d,
conv2: Conv2d,
batch_norm1: Option<BatchNorm>,
batch_norm2: Option<BatchNorm>,
}
impl ResidualConvUnit {
pub fn new(
conf: &DepthAnythingV2Config,
activation: Activation,
vb: VarBuilder,
) -> Result<Self> {
const KERNEL_SIZE: usize = 3;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
};
let conv1 = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("conv1"),
)?;
let conv2 = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("conv2"),
)?;
let (batch_norm1, batch_norm2) = match conf.use_batch_norm {
true => {
let batch_norm_cfg = BatchNormConfig {
eps: 1e-05,
remove_mean: false,
affine: true,
momentum: 0.1,
};
(
Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn1"))?),
Some(batch_norm(conf.num_features, batch_norm_cfg, vb.pp("bn2"))?),
)
}
false => (None, None),
};
Ok(Self {
activation,
conv1,
conv2,
batch_norm1,
batch_norm2,
})
}
}
impl Module for ResidualConvUnit {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let out = self.activation.forward(xs)?;
let out = self.conv1.forward(&out)?;
let out = if let Some(batch_norm1) = &self.batch_norm1 {
batch_norm1.forward_train(&out)?
} else {
out
};
let out = self.activation.forward(&out)?;
let out = self.conv2.forward(&out)?;
let out = if let Some(batch_norm2) = &self.batch_norm2 {
batch_norm2.forward_train(&out)?
} else {
out
};
out + xs
}
}
pub struct FeatureFusionBlock {
res_conv_unit1: ResidualConvUnit,
res_conv_unit2: ResidualConvUnit,
output_conv: Conv2d,
target_patch_size: usize,
}
impl FeatureFusionBlock {
pub fn new(
conf: &DepthAnythingV2Config,
target_patch_size: usize,
activation: Activation,
vb: VarBuilder,
) -> Result<Self> {
const KERNEL_SIZE: usize = 1;
let conv_cfg = Conv2dConfig {
padding: 0,
stride: 1,
dilation: 1,
groups: 1,
};
let output_conv = conv2d(
conf.num_features,
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("out_conv"),
)?;
let res_conv_unit1 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit1"))?;
let res_conv_unit2 = ResidualConvUnit::new(conf, activation, vb.pp("resConfUnit2"))?;
Ok(Self {
res_conv_unit1,
res_conv_unit2,
output_conv,
target_patch_size,
})
}
}
impl Module for FeatureFusionBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let out = self.res_conv_unit2.forward(xs)?;
let out = out.interpolate2d(self.target_patch_size, self.target_patch_size)?;
self.output_conv.forward(&out)
}
}
pub struct Scratch {
layer1_rn: Conv2d,
layer2_rn: Conv2d,
layer3_rn: Conv2d,
layer4_rn: Conv2d,
refine_net1: FeatureFusionBlock,
refine_net2: FeatureFusionBlock,
refine_net3: FeatureFusionBlock,
refine_net4: FeatureFusionBlock,
output_conv1: Conv2d,
output_conv2: Sequential,
}
impl Scratch {
pub fn new(conf: &DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> {
const KERNEL_SIZE: usize = 3;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
};
let layer1_rn = conv2d_no_bias(
conf.out_channel_sizes[0],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer1_rn"),
)?;
let layer2_rn = conv2d_no_bias(
conf.out_channel_sizes[1],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer2_rn"),
)?;
let layer3_rn = conv2d_no_bias(
conf.out_channel_sizes[2],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer3_rn"),
)?;
let layer4_rn = conv2d_no_bias(
conf.out_channel_sizes[3],
conf.num_features,
KERNEL_SIZE,
conv_cfg,
vb.pp("layer4_rn"),
)?;
let refine_net1 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 8,
Activation::Relu,
vb.pp("refinenet1"),
)?;
let refine_net2 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 4,
Activation::Relu,
vb.pp("refinenet2"),
)?;
let refine_net3 = FeatureFusionBlock::new(
conf,
conf.target_patch_size * 2,
Activation::Relu,
vb.pp("refinenet3"),
)?;
let refine_net4 = FeatureFusionBlock::new(
conf,
conf.target_patch_size,
Activation::Relu,
vb.pp("refinenet4"),
)?;
let conv_cfg = Conv2dConfig {
padding: 1,
stride: 1,
dilation: 1,
groups: 1,
};
let output_conv1 = conv2d(
conf.num_features,
conf.num_features / 2,
KERNEL_SIZE,
conv_cfg,
vb.pp("output_conv1"),
)?;
let output_conv2 = seq();
const HEAD_FEATURES_2: usize = 32;
const OUT_CHANNELS_2: usize = 1;
const KERNEL_SIZE_2: usize = 1;
let output_conv2 = output_conv2.add(conv2d(
conf.num_features / 2,
HEAD_FEATURES_2,
KERNEL_SIZE,
conv_cfg,
vb.pp("output_conv2").pp("0"),
)?);
let output_conv2 = output_conv2
.add(Activation::Relu)
.add(conv2d(
HEAD_FEATURES_2,
OUT_CHANNELS_2,
KERNEL_SIZE_2,
conv_cfg,
vb.pp("output_conv2").pp("2"),
)?)
.add(Activation::Relu);
Ok(Self {
layer1_rn,
layer2_rn,
layer3_rn,
layer4_rn,
refine_net1,
refine_net2,
refine_net3,
refine_net4,
output_conv1,
output_conv2,
})
}
}
const NUM_CHANNELS: usize = 4;
pub struct DPTHead<'a> {
conf: &'a DepthAnythingV2Config,
projections: Vec<Conv2d>,
resize_layers: Vec<Box<dyn Module>>,
readout_projections: Vec<Sequential>,
scratch: Scratch,
}
impl<'a> DPTHead<'a> {
pub fn new(conf: &'a DepthAnythingV2Config, vb: VarBuilder) -> Result<Self> {
let mut projections: Vec<Conv2d> = Vec::with_capacity(conf.out_channel_sizes.len());
for (conv_index, out_channel_size) in conf.out_channel_sizes.iter().enumerate() {
projections.push(conv2d(
conf.in_channel_size,
*out_channel_size,
1,
Default::default(),
vb.pp("projects").pp(conv_index.to_string()),
)?);
}
let resize_layers: Vec<Box<dyn Module>> = vec![
Box::new(conv_transpose2d(
conf.out_channel_sizes[0],
conf.out_channel_sizes[0],
4,
ConvTranspose2dConfig {
padding: 0,
stride: 4,
dilation: 1,
output_padding: 0,
},
vb.pp("resize_layers").pp("0"),
)?),
Box::new(conv_transpose2d(
conf.out_channel_sizes[1],
conf.out_channel_sizes[1],
2,
ConvTranspose2dConfig {
padding: 0,
stride: 2,
dilation: 1,
output_padding: 0,
},
vb.pp("resize_layers").pp("1"),
)?),
Box::new(Identity::new()),
Box::new(conv2d(
conf.out_channel_sizes[3],
conf.out_channel_sizes[3],
3,
Conv2dConfig {
padding: 1,
stride: 2,
dilation: 1,
groups: 1,
},
vb.pp("resize_layers").pp("3"),
)?),
];
let readout_projections = if conf.use_class_token {
let rop = Vec::with_capacity(NUM_CHANNELS);
for rop_index in 0..NUM_CHANNELS {
seq()
.add(linear(
2 * conf.in_channel_size,
conf.in_channel_size,
vb.pp("readout_projects").pp(rop_index.to_string()),
)?)
.add(Activation::Gelu);
}
rop
} else {
vec![]
};
let scratch = Scratch::new(conf, vb.pp("scratch"))?;
Ok(Self {
conf,
projections,
resize_layers,
readout_projections,
scratch,
})
}
}
impl Module for DPTHead<'_> {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut out: Vec<Tensor> = Vec::with_capacity(NUM_CHANNELS);
for i in 0..NUM_CHANNELS {
let x = if self.conf.use_class_token {
let x = xs.get(i)?.get(0)?;
let class_token = xs.get(i)?.get(1)?;
let readout = class_token.unsqueeze(1)?.expand(x.shape())?;
let to_cat = [x, readout];
let cat = Tensor::cat(&to_cat, Minus1)?;
self.readout_projections[i].forward(&cat)?
} else {
xs.get(i)?
};
let x_dims = x.dims();
let x = x.permute((0, 2, 1))?.reshape((
x_dims[0],
x_dims[x_dims.len() - 1],
self.conf.target_patch_size,
self.conf.target_patch_size,
))?;
let x = self.projections[i].forward(&x)?;
let x = self.resize_layers[i].forward(&x)?;
out.push(x);
}
let layer_1_rn = self.scratch.layer1_rn.forward(&out[0])?;
let layer_2_rn = self.scratch.layer2_rn.forward(&out[1])?;
let layer_3_rn = self.scratch.layer3_rn.forward(&out[2])?;
let layer_4_rn = self.scratch.layer4_rn.forward(&out[3])?;
let path4 = self.scratch.refine_net4.forward(&layer_4_rn)?;
let res3_out = self
.scratch
.refine_net3
.res_conv_unit1
.forward(&layer_3_rn)?;
let res3_out = path4.add(&res3_out)?;
let path3 = self.scratch.refine_net3.forward(&res3_out)?;
let res2_out = self
.scratch
.refine_net2
.res_conv_unit1
.forward(&layer_2_rn)?;
let res2_out = path3.add(&res2_out)?;
let path2 = self.scratch.refine_net2.forward(&res2_out)?;
let res1_out = self
.scratch
.refine_net1
.res_conv_unit1
.forward(&layer_1_rn)?;
let res1_out = path2.add(&res1_out)?;
let path1 = self.scratch.refine_net1.forward(&res1_out)?;
let out = self.scratch.output_conv1.forward(&path1)?;
let out = out.interpolate2d(self.conf.input_image_size, self.conf.input_image_size)?;
self.scratch.output_conv2.forward(&out)
}
}
pub struct DepthAnythingV2<'a> {
pretrained: &'a DinoVisionTransformer,
depth_head: DPTHead<'a>,
conf: &'a DepthAnythingV2Config,
}
impl<'a> DepthAnythingV2<'a> {
pub fn new(
pretrained: &'a DinoVisionTransformer,
conf: &'a DepthAnythingV2Config,
vb: VarBuilder,
) -> Result<Self> {
let depth_head = DPTHead::new(conf, vb.pp("depth_head"))?;
Ok(Self {
pretrained,
depth_head,
conf,
})
}
}
impl Module for DepthAnythingV2<'_> {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let features = self.pretrained.get_intermediate_layers(
xs,
&self.conf.layer_ids_vits,
false,
false,
true,
)?;
let depth = self.depth_head.forward(&features)?;
depth.relu()
}
}
| 3 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/gemma2.rs | //! Gemma LLM architecture (Google) inference implementation.
//!
//! See ["Gemma: Open Models Based on Gemini Technology"](https://blog.google/technology/developers/gemma-open-models/)
//!
//! Based on implementations from Google and OpenLLM
use std::sync::Arc;
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{linear_b as linear, Activation, Linear, VarBuilder};
fn default_max_position_embeddings() -> usize {
4096
}
#[derive(serde::Deserialize, Debug, Clone)]
pub struct Config {
pub attention_bias: bool,
pub head_dim: usize,
pub hidden_activation: Activation,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_attention_heads: usize,
pub num_hidden_layers: usize,
pub num_key_value_heads: usize,
pub rms_norm_eps: f64,
pub rope_theta: f64,
pub vocab_size: usize,
pub final_logit_softcapping: Option<f64>,
pub attn_logit_softcapping: Option<f64>,
pub query_pre_attn_scalar: usize,
// TODO: Handle the sliding window in the attention mask.
pub sliding_window: Option<usize>,
#[serde(default = "default_max_position_embeddings")]
pub max_position_embeddings: usize,
}
#[derive(Debug, Clone)]
struct RmsNorm {
weight: Tensor,
eps: f64,
}
impl RmsNorm {
fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> {
let weight = vb.get(dim, "weight")?;
Ok(Self { weight, eps })
}
}
impl Module for RmsNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(&(&self.weight + 1.0)?)
}
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.head_dim;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: candle_nn::Activation,
}
impl MLP {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let intermediate_sz = cfg.intermediate_size;
let gate_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("gate_proj"))?;
let up_proj = linear(hidden_sz, intermediate_sz, false, vb.pp("up_proj"))?;
let down_proj = linear(intermediate_sz, hidden_sz, false, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_activation,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
attn_logit_softcapping: Option<f64>,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
use_flash_attn: bool,
}
impl Attention {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = cfg.head_dim;
let bias = cfg.attention_bias;
let q_proj = linear(hidden_sz, num_heads * head_dim, bias, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, bias, vb.pp("v_proj"))?;
let o_proj = linear(num_heads * head_dim, hidden_sz, bias, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
attn_logit_softcapping: cfg.attn_logit_softcapping,
rotary_emb,
kv_cache: None,
use_flash_attn,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = if self.use_flash_attn {
// flash-attn expects (b_sz, seq_len, nheads, head_dim)
let q = query_states.transpose(1, 2)?;
let k = key_states.transpose(1, 2)?;
let v = value_states.transpose(1, 2)?;
let scale = 1f32 / (self.head_dim as f32).sqrt();
flash_attn(&q, &k, &v, scale, attention_mask.is_some())?.transpose(1, 2)?
} else {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match self.attn_logit_softcapping {
None => attn_weights,
Some(sc) => ((attn_weights / sc)?.tanh()? * sc)?,
};
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, ()))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
#[cfg(feature = "flash-attn")]
fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal)
}
#[cfg(not(feature = "flash-attn"))]
fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> {
unimplemented!("compile with '--features flash-attn'")
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MLP,
input_layernorm: RmsNorm,
pre_feedforward_layernorm: RmsNorm,
post_feedforward_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
rotary_emb: Arc<RotaryEmbedding>,
use_flash_attn: bool,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, use_flash_attn, cfg, vb.pp("self_attn"))?;
let mlp = MLP::new(cfg, vb.pp("mlp"))?;
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let pre_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("pre_feedforward_layernorm"),
)?;
let post_feedforward_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_feedforward_layernorm"),
)?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
pre_feedforward_layernorm,
post_feedforward_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = xs.apply(&self.post_attention_layernorm)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.pre_feedforward_layernorm)?;
let xs = xs.apply(&self.mlp)?;
let xs = xs.apply(&self.post_feedforward_layernorm)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
final_logit_softcapping: Option<f64>,
device: Device,
dtype: DType,
hidden_size: usize,
sliding_window: Option<usize>,
}
impl Model {
pub fn new(use_flash_attn: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer =
DecoderLayer::new(rotary_emb.clone(), use_flash_attn, cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = Linear::new(embed_tokens.embeddings().clone(), None);
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
final_logit_softcapping: cfg.final_logit_softcapping,
device: vb.device().clone(),
dtype: vb.dtype(),
hidden_size: cfg.hidden_size,
sliding_window: cfg.sliding_window,
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
let mask: Vec<_> = match self.sliding_window {
None => (0..tgt_len)
.flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. }))
.collect(),
Some(sliding_window) => (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect(),
};
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let xs = self.embed_tokens.forward(input_ids)?;
let mut xs = (xs * (self.hidden_size as f64).sqrt())?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
let logits = xs
.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)?;
let logits = match self.final_logit_softcapping {
None => logits,
Some(sc) => ((logits / sc)?.tanh()? * sc)?,
};
Ok(logits)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| 4 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/qwen2_moe.rs | //! Qwen2 model implementation with Mixture of Experts support.
//!
//! Qwen2 is a large language model using sparse Mixture of Experts (MoE).
//! This implementation provides support for sparsely activated MoE layers.
//!
//! Key characteristics:
//! - Mixture of Experts architecture
//! - Sparse expert activation
//! - Shared expert routing mechanism
//! - Grouped query attention (GQA)
//! - RMSNorm for layer normalization
//! - Rotary positional embeddings (RoPE)
//!
//! References:
//! - [Qwen2 Paper](https://arxiv.org/abs/2401.08985)
//! - [Model Card](https://huggingface.co/Qwen/Qwen2-7B-beta)
//!
use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm};
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{Activation, VarBuilder};
use std::sync::Arc;
#[derive(Debug, Clone, PartialEq, serde::Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub intermediate_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub num_key_value_heads: usize,
pub max_position_embeddings: usize,
pub sliding_window: usize,
pub max_window_layers: usize,
pub tie_word_embeddings: bool,
pub rope_theta: f64,
pub rms_norm_eps: f64,
pub use_sliding_window: bool,
pub hidden_act: Activation,
pub decoder_sparse_step: usize,
pub moe_intermediate_size: usize,
pub shared_expert_intermediate_size: usize,
pub num_experts_per_tok: usize,
pub num_experts: usize,
pub norm_topk_prob: bool,
}
#[derive(Debug, Clone)]
struct RotaryEmbedding {
sin: Tensor,
cos: Tensor,
}
impl RotaryEmbedding {
fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> {
let dim = cfg.hidden_size / cfg.num_attention_heads;
let max_seq_len = cfg.max_position_embeddings;
let inv_freq: Vec<_> = (0..dim)
.step_by(2)
.map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32)
.collect();
let inv_freq_len = inv_freq.len();
let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?;
let t = Tensor::arange(0u32, max_seq_len as u32, dev)?
.to_dtype(dtype)?
.reshape((max_seq_len, 1))?;
let freqs = t.matmul(&inv_freq)?;
Ok(Self {
sin: freqs.sin()?,
cos: freqs.cos()?,
})
}
fn apply_rotary_emb_qkv(
&self,
q: &Tensor,
k: &Tensor,
seqlen_offset: usize,
) -> Result<(Tensor, Tensor)> {
let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?;
let cos = self.cos.narrow(0, seqlen_offset, seq_len)?;
let sin = self.sin.narrow(0, seqlen_offset, seq_len)?;
let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?;
let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?;
Ok((q_embed, k_embed))
}
}
#[derive(Debug, Clone)]
#[allow(clippy::upper_case_acronyms)]
struct MLP {
gate_proj: Linear,
up_proj: Linear,
down_proj: Linear,
act_fn: Activation,
}
impl MLP {
fn new(intermediate_sz: usize, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?;
let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?;
let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?;
Ok(Self {
gate_proj,
up_proj,
down_proj,
act_fn: cfg.hidden_act,
})
}
}
impl Module for MLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?;
let rhs = xs.apply(&self.up_proj)?;
(lhs * rhs)?.apply(&self.down_proj)
}
}
#[derive(Debug, Clone)]
struct Attention {
q_proj: Linear,
k_proj: Linear,
v_proj: Linear,
o_proj: Linear,
num_heads: usize,
num_kv_heads: usize,
num_kv_groups: usize,
head_dim: usize,
hidden_size: usize,
rotary_emb: Arc<RotaryEmbedding>,
kv_cache: Option<(Tensor, Tensor)>,
}
impl Attention {
fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> {
let hidden_sz = cfg.hidden_size;
let num_heads = cfg.num_attention_heads;
let num_kv_heads = cfg.num_key_value_heads;
let num_kv_groups = num_heads / num_kv_heads;
let head_dim = hidden_sz / num_heads;
let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?;
let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?;
let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?;
let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?;
Ok(Self {
q_proj,
k_proj,
v_proj,
o_proj,
num_heads,
num_kv_heads,
num_kv_groups,
head_dim,
hidden_size: hidden_sz,
rotary_emb,
kv_cache: None,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let (b_sz, q_len, _) = xs.dims3()?;
let query_states = self.q_proj.forward(xs)?;
let key_states = self.k_proj.forward(xs)?;
let value_states = self.v_proj.forward(xs)?;
let query_states = query_states
.reshape((b_sz, q_len, self.num_heads, self.head_dim))?
.transpose(1, 2)?;
let key_states = key_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let value_states = value_states
.reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))?
.transpose(1, 2)?;
let (query_states, key_states) =
self.rotary_emb
.apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?;
let (key_states, value_states) = match &self.kv_cache {
None => (key_states, value_states),
Some((prev_k, prev_v)) => {
let key_states = Tensor::cat(&[prev_k, &key_states], 2)?;
let value_states = Tensor::cat(&[prev_v, &value_states], 2)?;
(key_states, value_states)
}
};
self.kv_cache = Some((key_states.clone(), value_states.clone()));
let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?;
let value_states =
crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?;
let attn_output = {
let scale = 1f64 / f64::sqrt(self.head_dim as f64);
let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?;
let attn_weights = match attention_mask {
None => attn_weights,
Some(mask) => attn_weights.broadcast_add(mask)?,
};
let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?;
attn_weights.matmul(&value_states)?
};
attn_output
.transpose(1, 2)?
.reshape((b_sz, q_len, self.hidden_size))?
.apply(&self.o_proj)
}
fn clear_kv_cache(&mut self) {
self.kv_cache = None
}
}
// https://github.com/huggingface/transformers/blob/536ea2aca234fb48c5c69769431d643b0d93b233/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py#L800
#[derive(Debug, Clone)]
struct SparseMoeBlock {
gate: Linear,
experts: Vec<MLP>,
shared_expert: MLP,
shared_expert_gate: Linear,
norm_topk_prob: bool,
num_experts_per_tok: usize,
}
impl SparseMoeBlock {
fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let gate = linear_no_bias(cfg.hidden_size, cfg.num_experts, vb.pp("gate"))?;
let mut experts = Vec::with_capacity(cfg.num_experts);
let vb_e = vb.pp("experts");
for idx in 0..cfg.num_experts {
let expert = MLP::new(cfg.moe_intermediate_size, cfg, vb_e.pp(idx))?;
experts.push(expert)
}
let shared_expert = MLP::new(
cfg.shared_expert_intermediate_size,
cfg,
vb.pp("shared_expert"),
)?;
let shared_expert_gate = linear_no_bias(cfg.hidden_size, 1, vb.pp("shared_expert_gate"))?;
Ok(Self {
gate,
experts,
shared_expert,
shared_expert_gate,
norm_topk_prob: cfg.norm_topk_prob,
num_experts_per_tok: cfg.num_experts_per_tok,
})
}
}
impl Module for SparseMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let (b_size, seq_len, hidden_dim) = xs.dims3()?;
let xs = xs.reshape(((), hidden_dim))?;
let router_logits = xs.apply(&self.gate)?;
let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?;
// In order to extract topk, we extract the data from the tensor and manipulate it
// directly. Maybe we will want to use some custom ops instead at some point.
let experts_per_tok = routing_weights
.arg_sort_last_dim(false)?
.narrow(D::Minus1, 0, self.num_experts_per_tok)?
.contiguous()?;
let routing_weights = routing_weights.gather(&experts_per_tok, D::Minus1)?;
// routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
// top_x contains the row indexes to evaluate for each expert.
let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?;
let experts_per_tok = experts_per_tok.to_vec2::<u32>()?;
let mut top_x = vec![vec![]; self.experts.len()];
let mut selected_experts = vec![vec![]; self.experts.len()];
for (row_idx, (rw, expert_idxs)) in routing_weights
.iter()
.zip(experts_per_tok.iter())
.enumerate()
{
let sum_rw = rw.iter().sum::<f32>();
for (&rw, &expert_idx) in rw.iter().zip(expert_idxs.iter()) {
top_x[expert_idx as usize].push(row_idx as u32);
let rw = if self.norm_topk_prob { rw / sum_rw } else { rw };
selected_experts[expert_idx as usize].push(rw)
}
}
let mut ys = xs.zeros_like()?;
for (expert_idx, expert_layer) in self.experts.iter().enumerate() {
let top_x = &top_x[expert_idx];
if top_x.is_empty() {
continue;
}
let top_x = Tensor::new(top_x.as_slice(), xs.device())?;
let selected_experts =
Tensor::new(selected_experts[expert_idx].as_slice(), xs.device())?
.reshape(((), 1))?
.to_dtype(xs.dtype())?;
// Index the correct hidden states and compute the expert hidden state for
// the current expert. We need to make sure to multiply the output hidden
// states by `routing_weights` on the corresponding tokens (top-1 and top-2)
let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?;
// current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None])
let current_hidden_states = expert_layer.forward(¤t_state)?;
let current_hidden_states = current_hidden_states.broadcast_mul(&selected_experts)?;
ys = ys.index_add(&top_x, ¤t_hidden_states, 0)?;
}
let shared_expert_output = xs.apply(&self.shared_expert)?;
let shared_expert_output = shared_expert_output.broadcast_mul(&candle_nn::ops::sigmoid(
&xs.apply(&self.shared_expert_gate)?,
)?)?;
let ys = (ys + shared_expert_output)?;
let ys = ys.reshape((b_size, seq_len, hidden_dim))?;
Ok(ys)
}
}
#[derive(Debug, Clone)]
enum MlpOrMoeBlock {
Mlp(MLP),
MoeBlock(SparseMoeBlock),
}
impl Module for MlpOrMoeBlock {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::MoeBlock(m) => m.forward(xs),
Self::Mlp(m) => m.forward(xs),
}
}
}
#[derive(Debug, Clone)]
struct DecoderLayer {
self_attn: Attention,
mlp: MlpOrMoeBlock,
input_layernorm: RmsNorm,
post_attention_layernorm: RmsNorm,
}
impl DecoderLayer {
fn new(
layer_idx: usize,
rotary_emb: Arc<RotaryEmbedding>,
cfg: &Config,
vb: VarBuilder,
) -> Result<Self> {
let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?;
let mlp = if cfg.num_experts > 0 && (layer_idx + 1) % cfg.decoder_sparse_step == 0 {
MlpOrMoeBlock::MoeBlock(SparseMoeBlock::new(cfg, vb.pp("mlp"))?)
} else {
MlpOrMoeBlock::Mlp(MLP::new(cfg.intermediate_size, cfg, vb.pp("mlp"))?)
};
let input_layernorm =
RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?;
let post_attention_layernorm = RmsNorm::new(
cfg.hidden_size,
cfg.rms_norm_eps,
vb.pp("post_attention_layernorm"),
)?;
Ok(Self {
self_attn,
mlp,
input_layernorm,
post_attention_layernorm,
})
}
fn forward(
&mut self,
xs: &Tensor,
attention_mask: Option<&Tensor>,
seqlen_offset: usize,
) -> Result<Tensor> {
let residual = xs;
let xs = self.input_layernorm.forward(xs)?;
let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?;
let xs = (xs + residual)?;
let residual = &xs;
let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?;
residual + xs
}
fn clear_kv_cache(&mut self) {
self.self_attn.clear_kv_cache()
}
}
#[derive(Debug, Clone)]
pub struct Model {
embed_tokens: candle_nn::Embedding,
layers: Vec<DecoderLayer>,
norm: RmsNorm,
lm_head: Linear,
sliding_window: usize,
device: Device,
dtype: DType,
}
impl Model {
pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> {
let vb_m = vb.pp("model");
let embed_tokens =
candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?;
let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?);
let mut layers = Vec::with_capacity(cfg.num_hidden_layers);
let vb_l = vb_m.pp("layers");
for layer_idx in 0..cfg.num_hidden_layers {
let layer = DecoderLayer::new(layer_idx, rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?;
layers.push(layer)
}
let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?;
let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?;
Ok(Self {
embed_tokens,
layers,
norm,
lm_head,
sliding_window: cfg.sliding_window,
device: vb.device().clone(),
dtype: vb.dtype(),
})
}
fn prepare_decoder_attention_mask(
&self,
b_size: usize,
tgt_len: usize,
seqlen_offset: usize,
) -> Result<Tensor> {
// Sliding window mask?
let mask: Vec<_> = (0..tgt_len)
.flat_map(|i| {
(0..tgt_len).map(move |j| {
if i < j || j + self.sliding_window < i {
f32::NEG_INFINITY
} else {
0.
}
})
})
.collect();
let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?;
let mask = if seqlen_offset > 0 {
let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?;
Tensor::cat(&[&mask0, &mask], D::Minus1)?
} else {
mask
};
mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))?
.to_dtype(self.dtype)
}
pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> {
let (b_size, seq_len) = input_ids.dims2()?;
let attention_mask = if seq_len <= 1 {
None
} else {
let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?;
Some(mask)
};
let mut xs = self.embed_tokens.forward(input_ids)?;
for layer in self.layers.iter_mut() {
xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)?
}
xs.narrow(1, seq_len - 1, 1)?
.apply(&self.norm)?
.apply(&self.lm_head)
}
pub fn clear_kv_cache(&mut self) {
for layer in self.layers.iter_mut() {
layer.clear_kv_cache()
}
}
}
| 5 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/jina_bert.rs | //! # JinaBERT inference implementation
//!
//! Based on implementation from huggingface for Jina BERT and its variants
//!
//! See: [Jina Embeddings on HuggingFace](https://huggingface.co/jinaai/jina-embeddings-v2-base-en)
use super::with_tracing::{linear, linear_no_bias, Embedding, Linear};
use candle::{DType, Device, IndexOp, Result, Tensor, D};
use candle_nn::{layer_norm, LayerNorm, Module, VarBuilder};
use serde::Deserialize;
pub const DTYPE: DType = DType::F32;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PositionEmbeddingType {
Absolute,
Alibi,
}
// https://huggingface.co/jinaai/jina-bert-implementation/blob/main/configuration_bert.py
#[derive(Debug, Clone, PartialEq, Deserialize)]
pub struct Config {
pub vocab_size: usize,
pub hidden_size: usize,
pub num_hidden_layers: usize,
pub num_attention_heads: usize,
pub intermediate_size: usize,
pub hidden_act: candle_nn::Activation,
pub max_position_embeddings: usize,
pub type_vocab_size: usize,
pub initializer_range: f64,
pub layer_norm_eps: f64,
pub pad_token_id: usize,
pub position_embedding_type: PositionEmbeddingType,
}
impl Config {
pub fn v2_base() -> Self {
// https://huggingface.co/jinaai/jina-embeddings-v2-base-en/blob/main/config.json
Self {
vocab_size: 30528,
hidden_size: 768,
num_hidden_layers: 12,
num_attention_heads: 12,
intermediate_size: 3072,
hidden_act: candle_nn::Activation::Gelu,
max_position_embeddings: 8192,
type_vocab_size: 2,
initializer_range: 0.02,
layer_norm_eps: 1e-12,
pad_token_id: 0,
position_embedding_type: PositionEmbeddingType::Alibi,
}
}
#[allow(clippy::too_many_arguments)]
pub fn new(
vocab_size: usize,
hidden_size: usize,
num_hidden_layers: usize,
num_attention_heads: usize,
intermediate_size: usize,
hidden_act: candle_nn::Activation,
max_position_embeddings: usize,
type_vocab_size: usize,
initializer_range: f64,
layer_norm_eps: f64,
pad_token_id: usize,
position_embedding_type: PositionEmbeddingType,
) -> Self {
Config {
vocab_size,
hidden_size,
num_hidden_layers,
num_attention_heads,
intermediate_size,
hidden_act,
max_position_embeddings,
type_vocab_size,
initializer_range,
layer_norm_eps,
pad_token_id,
position_embedding_type,
}
}
}
#[derive(Clone, Debug)]
struct BertEmbeddings {
word_embeddings: Embedding,
// no position_embeddings as we only support alibi.
token_type_embeddings: Embedding,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl BertEmbeddings {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let word_embeddings =
Embedding::new(cfg.vocab_size, cfg.hidden_size, vb.pp("word_embeddings"))?;
let token_type_embeddings = Embedding::new(
cfg.type_vocab_size,
cfg.hidden_size,
vb.pp("token_type_embeddings"),
)?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
word_embeddings,
token_type_embeddings,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "embeddings"),
})
}
}
impl Module for BertEmbeddings {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let (b_size, seq_len) = input_ids.dims2()?;
let input_embeddings = self.word_embeddings.forward(input_ids)?;
let token_type_embeddings = Tensor::zeros(seq_len, DType::U32, input_ids.device())?
.broadcast_left(b_size)?
.apply(&self.token_type_embeddings)?;
let embeddings = (&input_embeddings + token_type_embeddings)?;
let embeddings = self.layer_norm.forward(&embeddings)?;
Ok(embeddings)
}
}
#[derive(Clone, Debug)]
struct BertSelfAttention {
query: Linear,
key: Linear,
value: Linear,
num_attention_heads: usize,
attention_head_size: usize,
span: tracing::Span,
span_softmax: tracing::Span,
}
impl BertSelfAttention {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attention_head_size = cfg.hidden_size / cfg.num_attention_heads;
let all_head_size = cfg.num_attention_heads * attention_head_size;
let hidden_size = cfg.hidden_size;
let query = linear(hidden_size, all_head_size, vb.pp("query"))?;
let value = linear(hidden_size, all_head_size, vb.pp("value"))?;
let key = linear(hidden_size, all_head_size, vb.pp("key"))?;
Ok(Self {
query,
key,
value,
num_attention_heads: cfg.num_attention_heads,
attention_head_size,
span: tracing::span!(tracing::Level::TRACE, "self-attn"),
span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"),
})
}
fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> {
let mut x_shape = xs.dims().to_vec();
x_shape.pop();
x_shape.push(self.num_attention_heads);
x_shape.push(self.attention_head_size);
xs.reshape(x_shape)?.transpose(1, 2)?.contiguous()
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let query_layer = self.query.forward(xs)?;
let key_layer = self.key.forward(xs)?;
let value_layer = self.value.forward(xs)?;
let query_layer = self.transpose_for_scores(&query_layer)?;
let key_layer = self.transpose_for_scores(&key_layer)?;
let value_layer = self.transpose_for_scores(&value_layer)?;
let attention_scores = query_layer.matmul(&key_layer.t()?)?;
let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?;
let attention_scores = attention_scores.broadcast_add(bias)?;
let attention_probs = {
let _enter_sm = self.span_softmax.enter();
candle_nn::ops::softmax_last_dim(&attention_scores)?
};
let context_layer = attention_probs.matmul(&value_layer)?;
let context_layer = context_layer.transpose(1, 2)?.contiguous()?;
let context_layer = context_layer.flatten_from(D::Minus2)?;
Ok(context_layer)
}
}
#[derive(Clone, Debug)]
struct BertSelfOutput {
dense: Linear,
layer_norm: LayerNorm,
span: tracing::Span,
}
impl BertSelfOutput {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let dense = linear(cfg.hidden_size, cfg.hidden_size, vb.pp("dense"))?;
let layer_norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("LayerNorm"))?;
Ok(Self {
dense,
layer_norm,
span: tracing::span!(tracing::Level::TRACE, "self-out"),
})
}
fn forward(&self, xs: &Tensor, input_tensor: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let xs = self.dense.forward(xs)?;
self.layer_norm.forward(&(xs + input_tensor)?)
}
}
#[derive(Clone, Debug)]
struct BertAttention {
self_attention: BertSelfAttention,
self_output: BertSelfOutput,
span: tracing::Span,
}
impl BertAttention {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let self_attention = BertSelfAttention::new(vb.pp("self"), cfg)?;
let self_output = BertSelfOutput::new(vb.pp("output"), cfg)?;
Ok(Self {
self_attention,
self_output,
span: tracing::span!(tracing::Level::TRACE, "attn"),
})
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let self_outputs = self.self_attention.forward(xs, bias)?;
let attention_output = self.self_output.forward(&self_outputs, xs)?;
Ok(attention_output)
}
}
#[derive(Clone, Debug)]
struct BertGLUMLP {
gated_layers: Linear,
act: candle_nn::Activation,
wo: Linear,
layernorm: LayerNorm,
intermediate_size: usize,
}
impl BertGLUMLP {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let gated_layers = linear_no_bias(
cfg.hidden_size,
cfg.intermediate_size * 2,
vb.pp("gated_layers"),
)?;
let act = candle_nn::Activation::Gelu; // geglu
let wo = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("wo"))?;
let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("layernorm"))?;
Ok(Self {
gated_layers,
act,
wo,
layernorm,
intermediate_size: cfg.intermediate_size,
})
}
}
impl Module for BertGLUMLP {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let residual = xs;
let xs = xs.apply(&self.gated_layers)?;
let gated = xs.narrow(D::Minus1, 0, self.intermediate_size)?;
let non_gated = xs.narrow(D::Minus1, self.intermediate_size, self.intermediate_size)?;
let xs = (gated.apply(&self.act) * non_gated)?.apply(&self.wo);
(xs + residual)?.apply(&self.layernorm)
}
}
#[derive(Clone, Debug)]
struct BertLayer {
attention: BertAttention,
mlp: BertGLUMLP,
span: tracing::Span,
}
impl BertLayer {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let attention = BertAttention::new(vb.pp("attention"), cfg)?;
let mlp = BertGLUMLP::new(vb.pp("mlp"), cfg)?;
Ok(Self {
attention,
mlp,
span: tracing::span!(tracing::Level::TRACE, "layer"),
})
}
fn forward(&self, xs: &Tensor, bias: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
self.attention.forward(xs, bias)?.apply(&self.mlp)
}
}
fn build_alibi_bias(cfg: &Config) -> Result<Tensor> {
let n_heads = cfg.num_attention_heads;
let seq_len = cfg.max_position_embeddings;
let alibi_bias = Tensor::arange(0, seq_len as i64, &Device::Cpu)?.to_dtype(DType::F32)?;
let alibi_bias = {
let a1 = alibi_bias.reshape((1, seq_len))?;
let a2 = alibi_bias.reshape((seq_len, 1))?;
a1.broadcast_sub(&a2)?.abs()?.broadcast_left(n_heads)?
};
let mut n_heads2 = 1;
while n_heads2 < n_heads {
n_heads2 *= 2
}
let slopes = (1..=n_heads2)
.map(|v| -1f32 / 2f32.powf((v * 8) as f32 / n_heads2 as f32))
.collect::<Vec<_>>();
let slopes = if n_heads2 == n_heads {
slopes
} else {
slopes
.iter()
.skip(1)
.step_by(2)
.chain(slopes.iter().step_by(2))
.take(n_heads)
.cloned()
.collect::<Vec<f32>>()
};
let slopes = Tensor::new(slopes, &Device::Cpu)?.reshape((1, (), 1, 1))?;
alibi_bias.to_dtype(DType::F32)?.broadcast_mul(&slopes)
}
#[derive(Clone, Debug)]
struct BertEncoder {
alibi: Tensor,
layers: Vec<BertLayer>,
span: tracing::Span,
}
impl BertEncoder {
fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
if cfg.position_embedding_type != PositionEmbeddingType::Alibi {
candle::bail!("only alibi is supported as a position-embedding-type")
}
let layers = (0..cfg.num_hidden_layers)
.map(|index| BertLayer::new(vb.pp(format!("layer.{index}")), cfg))
.collect::<Result<Vec<_>>>()?;
let span = tracing::span!(tracing::Level::TRACE, "encoder");
let alibi = build_alibi_bias(cfg)?.to_device(vb.device())?;
Ok(Self {
alibi,
layers,
span,
})
}
}
impl Module for BertEncoder {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let seq_len = xs.dim(1)?;
let alibi_bias = self.alibi.i((.., .., ..seq_len, ..seq_len))?;
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs, &alibi_bias)?
}
Ok(xs)
}
}
#[derive(Clone, Debug)]
pub struct BertModel {
embeddings: BertEmbeddings,
encoder: BertEncoder,
pub device: Device,
span: tracing::Span,
}
impl BertModel {
pub fn new(vb: VarBuilder, cfg: &Config) -> Result<Self> {
let embeddings = BertEmbeddings::new(vb.pp("embeddings"), cfg)?;
let encoder = BertEncoder::new(vb.pp("encoder"), cfg)?;
Ok(Self {
embeddings,
encoder,
device: vb.device().clone(),
span: tracing::span!(tracing::Level::TRACE, "model"),
})
}
}
impl Module for BertModel {
fn forward(&self, input_ids: &Tensor) -> Result<Tensor> {
let _enter = self.span.enter();
let embedding_output = self.embeddings.forward(input_ids)?;
let sequence_output = self.encoder.forward(&embedding_output)?;
Ok(sequence_output)
}
}
| 6 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/hiera.rs | //! Hiera inference implementation based on timm.
//!
//!
//! - 💻 [Hiera](https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/hiera.py)
//! - 📝 [Paper](https://arxiv.org/abs/2306.00989). Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles
use candle::{Result, D};
use candle_nn::{conv2d, layer_norm, linear, ops::softmax, Conv2dConfig, Func, VarBuilder};
#[derive(Debug, Clone, serde::Deserialize)]
pub struct Config {
channels: usize,
heads: usize,
stages: [usize; 4],
}
impl Config {
pub fn tiny() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 7, 2],
}
}
pub fn small() -> Self {
Self {
channels: 96,
heads: 1,
stages: [1, 2, 11, 2],
}
}
pub fn base() -> Self {
Self {
channels: 96,
heads: 1,
stages: [2, 3, 16, 3],
}
}
pub fn base_plus() -> Self {
Self {
channels: 112,
heads: 2,
stages: [2, 3, 16, 3],
}
}
pub fn large() -> Self {
Self {
channels: 144,
heads: 2,
stages: [2, 6, 36, 4],
}
}
pub fn huge() -> Self {
Self {
channels: 256,
heads: 4,
stages: [2, 6, 36, 4],
}
}
}
const NUM_TOKENS: usize = 56 * 56;
fn hiera_embeddings(channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let conv_cfg = Conv2dConfig {
stride: 4,
padding: 3,
..Default::default()
};
let proj = conv2d(3, channels, 7, conv_cfg, vb.pp("patch_embed.proj"))?;
let pos_embed = vb.get((1, NUM_TOKENS, channels), "pos_embed")?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&proj)?;
let (b, c, _, _) = xs.dims4()?;
let xs = xs.reshape((b, c, ()))?.transpose(1, 2)?;
let xs = xs.broadcast_add(&pos_embed)?;
Ok(xs)
}))
}
fn hiera_unroll() -> Result<Func<'static>> {
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let (mut b, _, c) = xs.dims3()?;
let mut size = 56;
xs = xs.reshape((b, size, size, c))?;
for _ in 0..3 {
size /= 2;
let new_shape = &[b, size, 2, size, 2, c];
xs = xs.reshape(new_shape)?;
xs = xs.permute((0, 2, 4, 1, 3, 5))?;
xs = xs.flatten(0, 2)?;
b *= 4;
}
xs = xs.reshape(((), NUM_TOKENS, c))?;
Ok(xs)
}))
}
fn hiera_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> {
let fc1 = linear(in_channels, out_channels, vb.pp("fc1"))?;
let fc2 = linear(out_channels, in_channels, vb.pp("fc2"))?;
Ok(Func::new(move |xs| {
let xs = xs.apply(&fc1)?.gelu()?.apply(&fc2)?;
Ok(xs)
}))
}
fn hiera_attention(
in_channels: usize,
out_channels: usize,
heads: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let head_dim = out_channels / heads;
let scale = (head_dim as f64).powf(-0.5);
let proj = linear(out_channels, out_channels, vb.pp("proj"))?;
let qkv = linear(in_channels, out_channels * 3, vb.pp("qkv"))?;
Ok(Func::new(move |xs| {
let (b, n, _) = xs.dims3()?;
let num_windows = if use_mask_attention {
n / (q_stride * window_size)
} else {
1
};
let qkv = xs.apply(&qkv)?;
let ec = qkv.elem_count();
let s = ec / (b * num_windows * 3 * heads * head_dim);
let qkv = qkv
.reshape((b, s, num_windows, 3, heads, head_dim))?
.permute((3, 0, 4, 2, 1, 5))?;
let mut q = qkv.get(0)?;
let k = qkv.get(1)?;
let v = qkv.get(2)?;
if q_stride > 1 {
let ec = q.elem_count();
let s = ec / (b * num_windows * q_stride * heads * head_dim);
q = q
.reshape((b, heads, num_windows, q_stride, s, head_dim))?
.max(3)?;
}
let q = (q * scale)?;
// Q, K and V are 6 dimensional with the first dimension being 1.
// Squeeze them for the attention calculation since 6 dimensional matmuls are not supported.
let att = q
.squeeze(0)?
.matmul(&k.squeeze(0)?.transpose(D::Minus2, D::Minus1)?)?;
let att = softmax(&att, D::Minus1)?;
let xs = att.matmul(&v.squeeze(0)?)?.unsqueeze(0)?;
let xs = xs.transpose(1, 3)?.reshape((b, (), out_channels))?;
let xs = xs.apply(&proj)?;
Ok(xs)
}))
}
fn hiera_block(
heads: usize,
in_channels: usize,
out_channels: usize,
q_stride: usize,
window_size: usize,
use_mask_attention: bool,
vb: VarBuilder,
) -> Result<Func<'static>> {
let norm1 = layer_norm(in_channels, 1e-6, vb.pp("norm1"))?;
let norm2 = layer_norm(out_channels, 1e-6, vb.pp("norm2"))?;
let proj = linear(in_channels, out_channels, vb.pp("proj"));
let stride = 4;
let mlp = hiera_mlp(out_channels, out_channels * 4, vb.pp("mlp"))?;
let attn = hiera_attention(
in_channels,
out_channels,
heads,
q_stride,
window_size,
use_mask_attention,
vb.pp("attn"),
)?;
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
let xs_norm = xs.apply_t(&norm1, false)?;
if let Ok(p) = &proj {
xs = xs_norm.apply(p)?;
let (a, _, d) = xs.dims3()?;
xs = xs.reshape((a, stride, (), d))?.max(1)?;
}
let xs = (xs + &xs_norm.apply(&attn)?)?;
let xs = (&xs + &xs.apply_t(&norm2, false)?.apply(&mlp)?)?;
Ok(xs)
}))
}
fn hiera_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
let nblocks = cfg.stages.iter().sum();
let mut blocks = Vec::with_capacity(nblocks);
let mut out_channels = cfg.channels;
let mut in_channels = out_channels;
let mut heads = cfg.heads;
let mut b = 0;
let mut q_stride = 1;
let mut window_size = 64;
for s in 0..4 {
let use_mask_attention = s < 2;
for _ in 0..cfg.stages[s] {
blocks.push(hiera_block(
heads,
in_channels,
out_channels,
q_stride,
window_size,
use_mask_attention,
vb.pp(b),
)?);
b += 1;
in_channels = out_channels;
q_stride = 1;
}
q_stride = 4;
out_channels *= 2;
heads *= 2;
window_size /= 4;
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for block in blocks.iter() {
xs = xs.apply(block)?
}
Ok(xs)
}))
}
fn hiera_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
let norm = layer_norm(outputs, 1e-6, vb.pp("norm"))?;
let linear = linear(outputs, nclasses, vb.pp("fc"))?;
Ok(Func::new(move |xs| {
xs.apply_t(&norm, false)?.apply(&linear)
}))
}
// Build a hiera model for a given configuration.
fn hiera_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> {
let cls = match nclasses {
None => None,
Some(nclasses) => {
let outputs = cfg.channels * 8;
let head = hiera_head(outputs, nclasses, vb.pp("head"))?;
Some(head)
}
};
let embeddings = hiera_embeddings(cfg.channels, vb.clone())?;
let unroll = hiera_unroll()?;
let blocks = hiera_blocks(cfg, vb.pp("blocks"))?;
Ok(Func::new(move |xs| {
let xs = xs
.apply(&embeddings)?
.apply(&unroll)?
.apply(&blocks)?
.mean(1)?;
match &cls {
None => Ok(xs),
Some(cls) => xs.apply(cls),
}
}))
}
pub fn hiera(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, Some(nclasses), vb)
}
pub fn hiera_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> {
hiera_model(cfg, None, vb)
}
| 7 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/resnet.rs | //! # ResNet Implementation
//!
//! Implementation of ResNet architectures as described in the paper:
//!
//! ## Reference
//!
//! [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385)
//! He et al. (2015)
//!
//! This paper introduced ResNet, a deep neural network architecture that utilizes
//! skip connections ("residual connections") to enable training of very deep networks.
use candle::{Result, D};
use candle_nn::{batch_norm, Conv2d, Func, VarBuilder};
fn conv2d(
c_in: usize,
c_out: usize,
ksize: usize,
padding: usize,
stride: usize,
vb: VarBuilder,
) -> Result<Conv2d> {
let conv2d_cfg = candle_nn::Conv2dConfig {
stride,
padding,
..Default::default()
};
candle_nn::conv2d_no_bias(c_in, c_out, ksize, conv2d_cfg, vb)
}
fn downsample(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> {
if stride != 1 || c_in != c_out {
let conv = conv2d(c_in, c_out, 1, 0, stride, vb.pp(0))?;
let bn = batch_norm(c_out, 1e-5, vb.pp(1))?;
Ok(Func::new(move |xs| xs.apply(&conv)?.apply_t(&bn, false)))
} else {
Ok(Func::new(|xs| Ok(xs.clone())))
}
}
fn basic_block(c_in: usize, c_out: usize, stride: usize, vb: VarBuilder) -> Result<Func> {
let conv1 = conv2d(c_in, c_out, 3, 1, stride, vb.pp("conv1"))?;
let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?;
let conv2 = conv2d(c_out, c_out, 3, 1, 1, vb.pp("conv2"))?;
let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?;
let downsample = downsample(c_in, c_out, stride, vb.pp("downsample"))?;
Ok(Func::new(move |xs| {
let ys = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.apply(&conv2)?
.apply_t(&bn2, false)?;
(xs.apply(&downsample)? + ys)?.relu()
}))
}
fn basic_layer(
c_in: usize,
c_out: usize,
stride: usize,
cnt: usize,
vb: VarBuilder,
) -> Result<Func> {
let mut layers = Vec::with_capacity(cnt);
for index in 0..cnt {
let l_in = if index == 0 { c_in } else { c_out };
let stride = if index == 0 { stride } else { 1 };
layers.push(basic_block(l_in, c_out, stride, vb.pp(index))?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
fn resnet(
nclasses: Option<usize>,
c1: usize,
c2: usize,
c3: usize,
c4: usize,
vb: VarBuilder,
) -> Result<Func> {
let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?;
let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?;
let layer1 = basic_layer(64, 64, 1, c1, vb.pp("layer1"))?;
let layer2 = basic_layer(64, 128, 2, c2, vb.pp("layer2"))?;
let layer3 = basic_layer(128, 256, 2, c3, vb.pp("layer3"))?;
let layer4 = basic_layer(256, 512, 2, c4, vb.pp("layer4"))?;
let fc = match nclasses {
None => None,
Some(nclasses) => {
let linear = candle_nn::linear(512, nclasses, vb.pp("fc"))?;
Some(linear)
}
};
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.pad_with_same(D::Minus1, 1, 1)?
.pad_with_same(D::Minus2, 1, 1)?
.max_pool2d_with_stride(3, 2)?
.apply(&layer1)?
.apply(&layer2)?
.apply(&layer3)?
.apply(&layer4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &fc {
None => Ok(xs),
Some(fc) => xs.apply(fc),
}
}))
}
/// Creates a ResNet-18 model.
pub fn resnet18(num_classes: usize, vb: VarBuilder) -> Result<Func> {
resnet(Some(num_classes), 2, 2, 2, 2, vb)
}
pub fn resnet18_no_final_layer(vb: VarBuilder) -> Result<Func> {
resnet(None, 2, 2, 2, 2, vb)
}
/// Creates a ResNet-34 model.
pub fn resnet34(num_classes: usize, vb: VarBuilder) -> Result<Func> {
resnet(Some(num_classes), 3, 4, 6, 3, vb)
}
pub fn resnet34_no_final_layer(vb: VarBuilder) -> Result<Func> {
resnet(None, 3, 4, 6, 3, vb)
}
// Bottleneck versions for ResNet 50, 101, and 152.
fn bottleneck_block(
c_in: usize,
c_out: usize,
stride: usize,
e: usize,
vb: VarBuilder,
) -> Result<Func> {
let e_dim = e * c_out;
let conv1 = conv2d(c_in, c_out, 1, 0, 1, vb.pp("conv1"))?;
let bn1 = batch_norm(c_out, 1e-5, vb.pp("bn1"))?;
let conv2 = conv2d(c_out, c_out, 3, 1, stride, vb.pp("conv2"))?;
let bn2 = batch_norm(c_out, 1e-5, vb.pp("bn2"))?;
let conv3 = conv2d(c_out, e_dim, 1, 0, 1, vb.pp("conv3"))?;
let bn3 = batch_norm(e_dim, 1e-5, vb.pp("bn3"))?;
let downsample = downsample(c_in, e_dim, stride, vb.pp("downsample"))?;
Ok(Func::new(move |xs| {
let ys = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.apply(&conv2)?
.apply_t(&bn2, false)?
.relu()?
.apply(&conv3)?
.apply_t(&bn3, false)?;
(xs.apply(&downsample)? + ys)?.relu()
}))
}
fn bottleneck_layer(
c_in: usize,
c_out: usize,
stride: usize,
cnt: usize,
vb: VarBuilder,
) -> Result<Func> {
let mut layers = Vec::with_capacity(cnt);
for index in 0..cnt {
let l_in = if index == 0 { c_in } else { 4 * c_out };
let stride = if index == 0 { stride } else { 1 };
layers.push(bottleneck_block(l_in, c_out, stride, 4, vb.pp(index))?)
}
Ok(Func::new(move |xs| {
let mut xs = xs.clone();
for layer in layers.iter() {
xs = xs.apply(layer)?
}
Ok(xs)
}))
}
fn bottleneck_resnet(
nclasses: Option<usize>,
c1: usize,
c2: usize,
c3: usize,
c4: usize,
vb: VarBuilder,
) -> Result<Func> {
let conv1 = conv2d(3, 64, 7, 3, 2, vb.pp("conv1"))?;
let bn1 = batch_norm(64, 1e-5, vb.pp("bn1"))?;
let layer1 = bottleneck_layer(64, 64, 1, c1, vb.pp("layer1"))?;
let layer2 = bottleneck_layer(4 * 64, 128, 2, c2, vb.pp("layer2"))?;
let layer3 = bottleneck_layer(4 * 128, 256, 2, c3, vb.pp("layer3"))?;
let layer4 = bottleneck_layer(4 * 256, 512, 2, c4, vb.pp("layer4"))?;
let fc = match nclasses {
None => None,
Some(nclasses) => {
let linear = candle_nn::linear(4 * 512, nclasses, vb.pp("fc"))?;
Some(linear)
}
};
Ok(Func::new(move |xs| {
let xs = xs
.apply(&conv1)?
.apply_t(&bn1, false)?
.relu()?
.pad_with_same(D::Minus1, 1, 1)?
.pad_with_same(D::Minus2, 1, 1)?
.max_pool2d_with_stride(3, 2)?
.apply(&layer1)?
.apply(&layer2)?
.apply(&layer3)?
.apply(&layer4)?
.mean(D::Minus1)?
.mean(D::Minus1)?;
match &fc {
None => Ok(xs),
Some(fc) => xs.apply(fc),
}
}))
}
pub fn resnet50(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 4, 6, 3, vb)
}
pub fn resnet50_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 4, 6, 3, vb)
}
pub fn resnet101(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 4, 23, 3, vb)
}
pub fn resnet101_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 4, 23, 3, vb)
}
pub fn resnet152(num_classes: usize, vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(Some(num_classes), 3, 8, 36, 3, vb)
}
pub fn resnet152_no_final_layer(vb: VarBuilder) -> Result<Func> {
bottleneck_resnet(None, 3, 8, 36, 3, vb)
}
| 8 |
0 | hf_public_repos/candle/candle-transformers/src | hf_public_repos/candle/candle-transformers/src/models/colpali.rs | //! Colpali Model for text/image similarity scoring.
//!
//! Colpali combines a vision encoder with an efficient LM for retrieving content.
//!
use candle::{Module, Result, Tensor};
use candle_nn::VarBuilder;
use super::paligemma;
use candle_nn::{linear, Linear};
pub struct Model {
pub model: paligemma::Model,
pub custom_text_projection: Linear,
}
impl Model {
pub fn new(config: &paligemma::Config, vb: VarBuilder) -> Result<Self> {
let model = paligemma::Model::new(config, vb.pp("model"))?;
let custom_text_projection = linear(
config.text_config.hidden_size,
128,
vb.pp("custom_text_proj"),
)?;
Ok(Self {
model,
custom_text_projection,
})
}
pub fn forward_images(&mut self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<Tensor> {
let outputs = self
.model
.setup_without_projection(pixel_values, input_ids)?;
let outputs = self.custom_text_projection.forward(&outputs)?;
let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?;
Ok(outputs)
}
pub fn forward_text(&mut self, input_ids: &Tensor) -> Result<Tensor> {
let outputs = self.model.forward_without_projection(input_ids)?;
let outputs = self.custom_text_projection.forward(&outputs)?;
let outputs = outputs.broadcast_div(&outputs.sqr()?.sum_keepdim(2)?.sqrt()?)?;
Ok(outputs)
}
}
| 9 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/jina-bert/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle_transformers::models::jina_bert::{BertModel, Config, PositionEmbeddingType};
use anyhow::Error as E;
use candle::{DType, Module, Tensor};
use candle_nn::VarBuilder;
use clap::Parser;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: Option<String>,
/// The number of times to run the prompt.
#[arg(long, default_value = "1")]
n: usize,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
#[arg(long)]
tokenizer: Option<String>,
#[arg(long)]
model: Option<String>,
#[arg(long)]
model_file: Option<String>,
}
impl Args {
fn build_model_and_tokenizer(&self) -> anyhow::Result<(BertModel, tokenizers::Tokenizer)> {
use hf_hub::{api::sync::Api, Repo, RepoType};
let model_name = match self.model.as_ref() {
Some(model) => model.to_string(),
None => "jinaai/jina-embeddings-v2-base-en".to_string(),
};
let model = match &self.model_file {
Some(model_file) => std::path::PathBuf::from(model_file),
None => Api::new()?
.repo(Repo::new(model_name.to_string(), RepoType::Model))
.get("model.safetensors")?,
};
let tokenizer = match &self.tokenizer {
Some(file) => std::path::PathBuf::from(file),
None => Api::new()?
.repo(Repo::new(model_name.to_string(), RepoType::Model))
.get("tokenizer.json")?,
};
let device = candle_examples::device(self.cpu)?;
let tokenizer = tokenizers::Tokenizer::from_file(tokenizer).map_err(E::msg)?;
let config = Config::new(
tokenizer.get_vocab_size(true),
768,
12,
12,
3072,
candle_nn::Activation::Gelu,
8192,
2,
0.02,
1e-12,
0,
PositionEmbeddingType::Alibi,
);
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? };
let model = BertModel::new(vb, &config)?;
Ok((model, tokenizer))
}
}
fn main() -> anyhow::Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
println!("tracing...");
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let start = std::time::Instant::now();
let (model, mut tokenizer) = args.build_model_and_tokenizer()?;
let device = &model.device;
if let Some(prompt) = args.prompt {
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
println!("Loaded and encoded {:?}", start.elapsed());
let start = std::time::Instant::now();
let embeddings = model.forward(&token_ids)?;
let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?;
let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?;
println!("pooled_embeddigns: {embeddings}");
let embeddings = if args.normalize_embeddings {
normalize_l2(&embeddings)?
} else {
embeddings
};
if args.normalize_embeddings {
println!("normalized_embeddings: {embeddings}");
}
println!("Took {:?}", start.elapsed());
} else {
let sentences = [
"The cat sits outside",
"A man is playing guitar",
"I love pasta",
"The new movie is awesome",
"The cat plays in the garden",
"A woman watches TV",
"The new movie is so great",
"Do you like pizza?",
];
let n_sentences = sentences.len();
if let Some(pp) = tokenizer.get_padding_mut() {
pp.strategy = tokenizers::PaddingStrategy::BatchLongest
} else {
let pp = tokenizers::PaddingParams {
strategy: tokenizers::PaddingStrategy::BatchLongest,
..Default::default()
};
tokenizer.with_padding(Some(pp));
}
let tokens = tokenizer
.encode_batch(sentences.to_vec(), true)
.map_err(E::msg)?;
let token_ids = tokens
.iter()
.map(|tokens| {
let tokens = tokens.get_ids().to_vec();
Tensor::new(tokens.as_slice(), device)
})
.collect::<candle::Result<Vec<_>>>()?;
let token_ids = Tensor::stack(&token_ids, 0)?;
println!("running inference on batch {:?}", token_ids.shape());
let embeddings = model.forward(&token_ids)?;
println!("generated embeddings {:?}", embeddings.shape());
// Apply some avg-pooling by taking the mean embedding value for all tokens (including padding)
let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?;
let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?;
let embeddings = if args.normalize_embeddings {
normalize_l2(&embeddings)?
} else {
embeddings
};
println!("pooled embeddings {:?}", embeddings.shape());
let mut similarities = vec![];
for i in 0..n_sentences {
let e_i = embeddings.get(i)?;
for j in (i + 1)..n_sentences {
let e_j = embeddings.get(j)?;
let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?;
let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?;
let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt();
similarities.push((cosine_similarity, i, j))
}
}
similarities.sort_by(|u, v| v.0.total_cmp(&u.0));
for &(score, i, j) in similarities[..5].iter() {
println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j])
}
}
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> candle::Result<Tensor> {
v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)
}
| 0 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/jina-bert/README.md | # candle-jina-bert
Jina-Bert is a general large language model with a context size of 8192, [model
card](https://huggingface.co/jinaai/jina-embeddings-v2-base-en). In this example
it can be used for two different tasks:
- Compute sentence embeddings for a prompt.
- Compute similarities between a set of sentences.
## Sentence embeddings
Jina-Bert is used to compute the sentence embeddings for a prompt. The model weights
are downloaded from the hub on the first run.
```bash
cargo run --example jina-bert --release -- --prompt "Here is a test sentence"
> [[[ 0.1595, -0.9885, 0.6494, ..., 0.3003, -0.6901, -1.2355],
> [ 0.0374, -0.1798, 1.3359, ..., 0.6731, 0.2133, -1.6807],
> [ 0.1700, -0.8534, 0.8924, ..., -0.1785, -0.0727, -1.5087],
> ...
> [-0.3113, -1.3665, 0.2027, ..., -0.2519, 0.1711, -1.5811],
> [ 0.0907, -1.0492, 0.5382, ..., 0.0242, -0.7077, -1.0830],
> [ 0.0369, -0.6343, 0.6105, ..., 0.0671, 0.3778, -1.1505]]]
> Tensor[[1, 7, 768], f32]
```
## Similarities
In this example, Jina-Bert is used to compute the sentence embeddings for a set of
sentences (hardcoded in the examples). Then cosine similarities are computed for
each sentence pair and they are reported by decreasing values, hence the first
reported pair contains the two sentences that have the highest similarity score.
The sentence embeddings are computed using average pooling through all the
sentence tokens, including some potential padding.
```bash
cargo run --example jina-bert --release
> score: 0.94 'The new movie is awesome' 'The new movie is so great'
> score: 0.81 'The cat sits outside' 'The cat plays in the garden'
> score: 0.78 'I love pasta' 'Do you like pizza?'
> score: 0.68 'I love pasta' 'The new movie is awesome'
> score: 0.67 'A man is playing guitar' 'A woman watches TV'
```
| 1 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/distilbert/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use candle_transformers::models::distilbert::{Config, DistilBertModel, DTYPE};
use anyhow::{Error as E, Result};
use candle::{Device, Tensor};
use candle_nn::VarBuilder;
use clap::Parser;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
/// The model to use, check out available models: https://huggingface.co/models?library=sentence-transformers&sort=trending
#[arg(long)]
model_id: Option<String>,
#[arg(long)]
revision: Option<String>,
/// When set, compute embeddings for this prompt.
#[arg(long)]
prompt: String,
/// Use the pytorch weights rather than the safetensors ones
#[arg(long)]
use_pth: bool,
/// The number of times to run the prompt.
#[arg(long, default_value = "1")]
n: usize,
/// L2 normalization for embeddings.
#[arg(long, default_value = "true")]
normalize_embeddings: bool,
}
impl Args {
fn build_model_and_tokenizer(&self) -> Result<(DistilBertModel, Tokenizer)> {
let device = candle_examples::device(self.cpu)?;
let default_model = "distilbert-base-uncased".to_string();
let default_revision = "main".to_string();
let (model_id, revision) = match (self.model_id.to_owned(), self.revision.to_owned()) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let repo = Repo::with_revision(model_id, RepoType::Model, revision);
let (config_filename, tokenizer_filename, weights_filename) = {
let api = Api::new()?;
let api = api.repo(repo);
let config = api.get("config.json")?;
let tokenizer = api.get("tokenizer.json")?;
let weights = if self.use_pth {
api.get("pytorch_model.bin")?
} else {
api.get("model.safetensors")?
};
(config, tokenizer, weights)
};
let config = std::fs::read_to_string(config_filename)?;
let config: Config = serde_json::from_str(&config)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let vb = if self.use_pth {
VarBuilder::from_pth(&weights_filename, DTYPE, &device)?
} else {
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], DTYPE, &device)? }
};
let model = DistilBertModel::load(vb, &config)?;
Ok((model, tokenizer))
}
}
fn get_mask(size: usize, device: &Device) -> Tensor {
let mask: Vec<_> = (0..size)
.flat_map(|i| (0..size).map(move |j| u8::from(j > i)))
.collect();
Tensor::from_slice(&mask, (size, size), device).unwrap()
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
println!("tracing...");
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let (model, mut tokenizer) = args.build_model_and_tokenizer()?;
let device = &model.device;
let tokenizer = tokenizer
.with_padding(None)
.with_truncation(None)
.map_err(E::msg)?;
let tokens = tokenizer
.encode(args.prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?;
let mask = get_mask(tokens.len(), device);
println!("token_ids: {:?}", token_ids.to_vec2::<u32>());
println!("mask: {:?}", mask.to_vec2::<u8>());
let ys = model.forward(&token_ids, &mask)?;
println!("{ys}");
Ok(())
}
pub fn normalize_l2(v: &Tensor) -> Result<Tensor> {
Ok(v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?)?)
}
| 2 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/distilbert/README.md | # candle-distilbert
DistilBert is a distiled version of the Bert model.
## Sentence embeddings
DistilBert is used to compute the sentence embeddings for a prompt. The model weights
are downloaded from the hub on the first run.
```bash
cargo run --example distilbert --release -- --prompt "Here is a test sentence"
> [[[ 0.5109, 0.1280, -0.2635, ..., 0.3462, -1.0434, 0.1441],
> [ 0.1735, 0.0818, -0.5549, ..., 0.3472, -0.8264, -0.0244],
> [ 0.0702, -0.1311, -0.4914, ..., 0.3483, -0.6194, 0.1829],
> ...
> [ 0.2993, -0.0106, -0.4640, ..., 0.2844, -0.6732, 0.0042],
> [ 0.1066, -0.0081, -0.4299, ..., 0.3435, -0.7729, 0.0190],
> [ 0.8903, 0.2055, -0.2541, ..., 0.3208, -0.6585, 0.0586]]]
> Tensor[[1, 7, 768], f32]
```
| 3 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/mixtral/main.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::{Error as E, Result};
use clap::Parser;
use candle_transformers::models::mixtral::{Config, Model};
use candle::{DType, Device, Tensor};
use candle_examples::token_output_stream::TokenOutputStream;
use candle_nn::VarBuilder;
use candle_transformers::generation::LogitsProcessor;
use hf_hub::{api::sync::Api, Repo, RepoType};
use tokenizers::Tokenizer;
struct TextGeneration {
model: Model,
device: Device,
tokenizer: TokenOutputStream,
logits_processor: LogitsProcessor,
repeat_penalty: f32,
repeat_last_n: usize,
}
impl TextGeneration {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
temp: Option<f64>,
top_p: Option<f64>,
repeat_penalty: f32,
repeat_last_n: usize,
device: &Device,
) -> Self {
let logits_processor = LogitsProcessor::new(seed, temp, top_p);
Self {
model,
tokenizer: TokenOutputStream::new(tokenizer),
logits_processor,
repeat_penalty,
repeat_last_n,
device: device.clone(),
}
}
fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> {
use std::io::Write;
self.tokenizer.clear();
let mut tokens = self
.tokenizer
.tokenizer()
.encode(prompt, true)
.map_err(E::msg)?
.get_ids()
.to_vec();
for &t in tokens.iter() {
if let Some(t) = self.tokenizer.next_token(t)? {
print!("{t}")
}
}
std::io::stdout().flush()?;
let mut generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_token("</s>") {
Some(token) => token,
None => anyhow::bail!("cannot find the </s> token"),
};
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?;
let logits = self.model.forward(&input, start_pos)?;
let logits = logits.squeeze(0)?.squeeze(0)?.to_dtype(DType::F32)?;
let logits = if self.repeat_penalty == 1. {
logits
} else {
let start_at = tokens.len().saturating_sub(self.repeat_last_n);
candle_transformers::utils::apply_repeat_penalty(
&logits,
self.repeat_penalty,
&tokens[start_at..],
)?
};
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
generated_tokens += 1;
if next_token == eos_token {
break;
}
if let Some(t) = self.tokenizer.next_token(next_token)? {
print!("{t}");
std::io::stdout().flush()?;
}
}
let dt = start_gen.elapsed();
if let Some(rest) = self.tokenizer.decode_rest().map_err(E::msg)? {
print!("{rest}");
}
std::io::stdout().flush()?;
println!(
"\n{generated_tokens} tokens generated ({:.2} token/s)",
generated_tokens as f64 / dt.as_secs_f64(),
);
Ok(())
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
use_flash_attn: bool,
#[arg(long)]
prompt: String,
/// The temperature used to generate samples.
#[arg(long)]
temperature: Option<f64>,
/// Nucleus sampling probability cutoff.
#[arg(long)]
top_p: Option<f64>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// The length of the sample to generate (in tokens).
#[arg(long, short = 'n', default_value_t = 10000)]
sample_len: usize,
#[arg(long, default_value = "mistralai/Mixtral-8x7B-v0.1")]
model_id: String,
#[arg(long, default_value = "main")]
revision: String,
#[arg(long)]
tokenizer_file: Option<String>,
#[arg(long)]
weight_files: Option<String>,
/// Penalty to be applied for repeating tokens, 1. means no penalty.
#[arg(long, default_value_t = 1.1)]
repeat_penalty: f32,
/// The context size to consider for the repeat penalty.
#[arg(long, default_value_t = 64)]
repeat_last_n: usize,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
println!(
"avx: {}, neon: {}, simd128: {}, f16c: {}",
candle::utils::with_avx(),
candle::utils::with_neon(),
candle::utils::with_simd128(),
candle::utils::with_f16c()
);
println!(
"temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}",
args.temperature.unwrap_or(0.),
args.repeat_penalty,
args.repeat_last_n
);
let start = std::time::Instant::now();
let api = Api::new()?;
let repo = api.repo(Repo::with_revision(
args.model_id,
RepoType::Model,
args.revision,
));
let tokenizer_filename = match args.tokenizer_file {
Some(file) => std::path::PathBuf::from(file),
None => repo.get("tokenizer.json")?,
};
let filenames = match args.weight_files {
Some(files) => files
.split(',')
.map(std::path::PathBuf::from)
.collect::<Vec<_>>(),
None => candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?,
};
println!("retrieved the files in {:?}", start.elapsed());
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let start = std::time::Instant::now();
let config = Config::v0_1_8x7b(args.use_flash_attn);
let device = candle_examples::device(args.cpu)?;
let dtype = device.bf16_default_to_f32();
let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? };
let model = Model::new(&config, vb)?;
println!("loaded the model in {:?}", start.elapsed());
let mut pipeline = TextGeneration::new(
model,
tokenizer,
args.seed,
args.temperature,
args.top_p,
args.repeat_penalty,
args.repeat_last_n,
&device,
);
pipeline.run(&args.prompt, args.sample_len)?;
Ok(())
}
| 4 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/mixtral/README.md | # candle-mixtral: 8x7b LLM using a sparse mixture of experts.
Mixtral-8x7B-v0.1 is a pretrained generative LLM with 56 billion parameters.
- [Blog post](https://mistral.ai/news/mixtral-of-experts/) from Mistral announcing the model release.
- [Model card](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on the HuggingFace Hub.
## Running the example
```bash
$ cargo run --example mixtral --release -- --prompt "def print_prime(n): "
def print_prime(n): # n is the number of prime numbers to be printed
i = 2
count = 0
while (count < n):
if (isPrime(i)):
print(i)
count += 1
i += 1
def isPrime(n):
for x in range(2, int(n**0.5)+1):
if (n % x == 0):
...
```
| 5 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/whisper/main.rs | // https://github.com/openai/whisper/blob/main/whisper/model.py/rgs
// TODO:
// - Batch size greater than 1.
// - More token filters (SuppressBlanks, ApplyTimestampRules).
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::{Error as E, Result};
use candle::{Device, IndexOp, Tensor};
use candle_nn::{ops::softmax, VarBuilder};
use clap::{Parser, ValueEnum};
use hf_hub::{api::sync::Api, Repo, RepoType};
use rand::{distributions::Distribution, SeedableRng};
use tokenizers::Tokenizer;
mod multilingual;
mod pcm_decode;
use candle_transformers::models::whisper::{self as m, audio, Config};
pub enum Model {
Normal(m::model::Whisper),
Quantized(m::quantized_model::Whisper),
}
// Maybe we should use some traits rather than doing the dispatch for all these.
impl Model {
pub fn config(&self) -> &Config {
match self {
Self::Normal(m) => &m.config,
Self::Quantized(m) => &m.config,
}
}
pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.encoder.forward(x, flush),
Self::Quantized(m) => m.encoder.forward(x, flush),
}
}
pub fn decoder_forward(
&mut self,
x: &Tensor,
xa: &Tensor,
flush: bool,
) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.forward(x, xa, flush),
Self::Quantized(m) => m.decoder.forward(x, xa, flush),
}
}
pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> {
match self {
Self::Normal(m) => m.decoder.final_linear(x),
Self::Quantized(m) => m.decoder.final_linear(x),
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct DecodingResult {
tokens: Vec<u32>,
text: String,
avg_logprob: f64,
no_speech_prob: f64,
temperature: f64,
compression_ratio: f64,
}
#[allow(dead_code)]
#[derive(Debug, Clone)]
struct Segment {
start: f64,
duration: f64,
dr: DecodingResult,
}
struct Decoder {
model: Model,
rng: rand::rngs::StdRng,
task: Option<Task>,
timestamps: bool,
verbose: bool,
tokenizer: Tokenizer,
suppress_tokens: Tensor,
sot_token: u32,
transcribe_token: u32,
translate_token: u32,
eot_token: u32,
no_speech_token: u32,
no_timestamps_token: u32,
language_token: Option<u32>,
}
impl Decoder {
#[allow(clippy::too_many_arguments)]
fn new(
model: Model,
tokenizer: Tokenizer,
seed: u64,
device: &Device,
language_token: Option<u32>,
task: Option<Task>,
timestamps: bool,
verbose: bool,
) -> Result<Self> {
let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?;
// Suppress the notimestamps token when in timestamps mode.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L452
let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32)
.map(|i| {
if model.config().suppress_tokens.contains(&i)
|| timestamps && i == no_timestamps_token
{
f32::NEG_INFINITY
} else {
0f32
}
})
.collect();
let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?;
let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?;
let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?;
let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?;
let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?;
let no_speech_token = m::NO_SPEECH_TOKENS
.iter()
.find_map(|token| token_id(&tokenizer, token).ok());
let no_speech_token = match no_speech_token {
None => anyhow::bail!("unable to find any non-speech token"),
Some(n) => n,
};
Ok(Self {
model,
rng: rand::rngs::StdRng::seed_from_u64(seed),
tokenizer,
task,
timestamps,
verbose,
suppress_tokens,
sot_token,
transcribe_token,
translate_token,
eot_token,
no_speech_token,
language_token,
no_timestamps_token,
})
}
fn decode(&mut self, mel: &Tensor, t: f64) -> Result<DecodingResult> {
let model = &mut self.model;
let audio_features = model.encoder_forward(mel, true)?;
if self.verbose {
println!("audio features: {:?}", audio_features.dims());
}
let sample_len = model.config().max_target_positions / 2;
let mut sum_logprob = 0f64;
let mut no_speech_prob = f64::NAN;
let mut tokens = vec![self.sot_token];
if let Some(language_token) = self.language_token {
tokens.push(language_token);
}
match self.task {
None | Some(Task::Transcribe) => tokens.push(self.transcribe_token),
Some(Task::Translate) => tokens.push(self.translate_token),
}
if !self.timestamps {
tokens.push(self.no_timestamps_token);
}
for i in 0..sample_len {
let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?;
// The model expects a batch dim but this inference loop does not handle
// it so we add it at this point.
let tokens_t = tokens_t.unsqueeze(0)?;
let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?;
// Extract the no speech probability on the first iteration by looking at the first
// token logits and the probability for the according token.
if i == 0 {
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
no_speech_prob = softmax(&logits, 0)?
.i(self.no_speech_token as usize)?
.to_scalar::<f32>()? as f64;
}
let (_, seq_len, _) = ys.dims3()?;
let logits = model
.decoder_final_linear(&ys.i((..1, seq_len - 1..))?)?
.i(0)?
.i(0)?;
// TODO: Besides suppress tokens, we should apply the heuristics from
// ApplyTimestampRules, i.e.:
// - Timestamps come in pairs, except before EOT.
// - Timestamps should be non-decreasing.
// - If the sum of the probabilities of timestamps is higher than any other tokens,
// only consider timestamps when sampling.
// https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439
let logits = logits.broadcast_add(&self.suppress_tokens)?;
let next_token = if t > 0f64 {
let prs = softmax(&(&logits / t)?, 0)?;
let logits_v: Vec<f32> = prs.to_vec1()?;
let distr = rand::distributions::WeightedIndex::new(&logits_v)?;
distr.sample(&mut self.rng) as u32
} else {
let logits_v: Vec<f32> = logits.to_vec1()?;
logits_v
.iter()
.enumerate()
.max_by(|(_, u), (_, v)| u.total_cmp(v))
.map(|(i, _)| i as u32)
.unwrap()
};
tokens.push(next_token);
let prob = softmax(&logits, candle::D::Minus1)?
.i(next_token as usize)?
.to_scalar::<f32>()? as f64;
if next_token == self.eot_token || tokens.len() > model.config().max_target_positions {
break;
}
sum_logprob += prob.ln();
}
let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?;
let avg_logprob = sum_logprob / tokens.len() as f64;
Ok(DecodingResult {
tokens,
text,
avg_logprob,
no_speech_prob,
temperature: t,
compression_ratio: f64::NAN,
})
}
fn decode_with_fallback(&mut self, segment: &Tensor) -> Result<DecodingResult> {
for (i, &t) in m::TEMPERATURES.iter().enumerate() {
let dr: Result<DecodingResult> = self.decode(segment, t);
if i == m::TEMPERATURES.len() - 1 {
return dr;
}
// On errors, we try again with a different temperature.
match dr {
Ok(dr) => {
let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD
|| dr.avg_logprob < m::LOGPROB_THRESHOLD;
if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD {
return Ok(dr);
}
}
Err(err) => {
println!("Error running at {t}: {err}")
}
}
}
unreachable!()
}
fn run(&mut self, mel: &Tensor) -> Result<Vec<Segment>> {
let (_, _, content_frames) = mel.dims3()?;
let mut seek = 0;
let mut segments = vec![];
while seek < content_frames {
let start = std::time::Instant::now();
let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let segment_size = usize::min(content_frames - seek, m::N_FRAMES);
let mel_segment = mel.narrow(2, seek, segment_size)?;
let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64;
let dr = self.decode_with_fallback(&mel_segment)?;
seek += segment_size;
if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD {
println!("no speech detected, skipping {seek} {dr:?}");
continue;
}
let segment = Segment {
start: time_offset,
duration: segment_duration,
dr,
};
if self.timestamps {
println!(
"{:.1}s -- {:.1}s",
segment.start,
segment.start + segment.duration,
);
let mut tokens_to_decode = vec![];
let mut prev_timestamp_s = 0f32;
for &token in segment.dr.tokens.iter() {
if token == self.sot_token || token == self.eot_token {
continue;
}
// The no_timestamp_token is the last before the timestamp ones.
if token > self.no_timestamps_token {
let timestamp_s = (token - self.no_timestamps_token + 1) as f32 / 50.;
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
println!(" {:.1}s-{:.1}s: {}", prev_timestamp_s, timestamp_s, text);
tokens_to_decode.clear()
}
prev_timestamp_s = timestamp_s;
} else {
tokens_to_decode.push(token)
}
}
if !tokens_to_decode.is_empty() {
let text = self
.tokenizer
.decode(&tokens_to_decode, true)
.map_err(E::msg)?;
if !text.is_empty() {
println!(" {:.1}s-...: {}", prev_timestamp_s, text);
}
tokens_to_decode.clear()
}
} else {
println!(
"{:.1}s -- {:.1}s: {}",
segment.start,
segment.start + segment.duration,
segment.dr.text,
)
}
if self.verbose {
println!("{seek}: {segment:?}, in {:?}", start.elapsed());
}
segments.push(segment)
}
Ok(segments)
}
}
pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> {
match tokenizer.token_to_id(token) {
None => candle::bail!("no token-id for {token}"),
Some(id) => Ok(id),
}
}
#[derive(Clone, Copy, Debug, ValueEnum)]
enum Task {
Transcribe,
Translate,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, ValueEnum)]
enum WhichModel {
Tiny,
#[value(name = "tiny.en")]
TinyEn,
Base,
#[value(name = "base.en")]
BaseEn,
Small,
#[value(name = "small.en")]
SmallEn,
Medium,
#[value(name = "medium.en")]
MediumEn,
Large,
LargeV2,
LargeV3,
LargeV3Turbo,
#[value(name = "distil-medium.en")]
DistilMediumEn,
#[value(name = "distil-large-v2")]
DistilLargeV2,
#[value(name = "distil-large-v3")]
DistilLargeV3,
}
impl WhichModel {
fn is_multilingual(&self) -> bool {
match self {
Self::Tiny
| Self::Base
| Self::Small
| Self::Medium
| Self::Large
| Self::LargeV2
| Self::LargeV3
| Self::LargeV3Turbo
| Self::DistilLargeV2
| Self::DistilLargeV3 => true,
Self::TinyEn | Self::BaseEn | Self::SmallEn | Self::MediumEn | Self::DistilMediumEn => {
false
}
}
}
fn model_and_revision(&self) -> (&'static str, &'static str) {
match self {
Self::Tiny => ("openai/whisper-tiny", "main"),
Self::TinyEn => ("openai/whisper-tiny.en", "refs/pr/15"),
Self::Base => ("openai/whisper-base", "refs/pr/22"),
Self::BaseEn => ("openai/whisper-base.en", "refs/pr/13"),
Self::Small => ("openai/whisper-small", "main"),
Self::SmallEn => ("openai/whisper-small.en", "refs/pr/10"),
Self::Medium => ("openai/whisper-medium", "main"),
Self::MediumEn => ("openai/whisper-medium.en", "main"),
Self::Large => ("openai/whisper-large", "refs/pr/36"),
Self::LargeV2 => ("openai/whisper-large-v2", "refs/pr/57"),
Self::LargeV3 => ("openai/whisper-large-v3", "main"),
Self::LargeV3Turbo => ("openai/whisper-large-v3-turbo", "main"),
Self::DistilMediumEn => ("distil-whisper/distil-medium.en", "main"),
Self::DistilLargeV2 => ("distil-whisper/distil-large-v2", "main"),
Self::DistilLargeV3 => ("distil-whisper/distil-large-v3", "main"),
}
}
}
#[derive(Parser, Debug)]
#[command(author, version, about, long_about = None)]
struct Args {
/// Run on CPU rather than on GPU.
#[arg(long)]
cpu: bool,
#[arg(long)]
model_id: Option<String>,
/// The model to use, check out available models:
/// https://huggingface.co/models?search=whisper
#[arg(long)]
revision: Option<String>,
/// The model to be used, can be tiny, small, medium.
#[arg(long, default_value = "tiny.en")]
model: WhichModel,
/// The input to be processed, in wav format, will default to `jfk.wav`. Alternatively
/// this can be set to sample:jfk, sample:gb1, ... to fetch a sample from the following
/// repo: https://huggingface.co/datasets/Narsil/candle_demo/
#[arg(long)]
input: Option<String>,
/// The seed to use when generating random samples.
#[arg(long, default_value_t = 299792458)]
seed: u64,
/// Enable tracing (generates a trace-timestamp.json file).
#[arg(long)]
tracing: bool,
#[arg(long)]
quantized: bool,
/// Language.
#[arg(long)]
language: Option<String>,
/// Task, when no task is specified, the input tokens contain only the sot token which can
/// improve things when in no-timestamp mode.
#[arg(long)]
task: Option<Task>,
/// Timestamps mode, this is not fully implemented yet.
#[arg(long)]
timestamps: bool,
/// Print the full DecodingResult structure rather than just the text.
#[arg(long)]
verbose: bool,
}
fn main() -> Result<()> {
use tracing_chrome::ChromeLayerBuilder;
use tracing_subscriber::prelude::*;
let args = Args::parse();
let _guard = if args.tracing {
let (chrome_layer, guard) = ChromeLayerBuilder::new().build();
tracing_subscriber::registry().with(chrome_layer).init();
Some(guard)
} else {
None
};
let device = candle_examples::device(args.cpu)?;
let (default_model, default_revision) = if args.quantized {
("lmz/candle-whisper", "main")
} else {
args.model.model_and_revision()
};
let default_model = default_model.to_string();
let default_revision = default_revision.to_string();
let (model_id, revision) = match (args.model_id, args.revision) {
(Some(model_id), Some(revision)) => (model_id, revision),
(Some(model_id), None) => (model_id, "main".to_string()),
(None, Some(revision)) => (default_model, revision),
(None, None) => (default_model, default_revision),
};
let (config_filename, tokenizer_filename, weights_filename, input) = {
let api = Api::new()?;
let dataset = api.dataset("Narsil/candle-examples".to_string());
let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision));
let sample = if let Some(input) = args.input {
if let Some(sample) = input.strip_prefix("sample:") {
dataset.get(&format!("samples_{sample}.wav"))?
} else {
std::path::PathBuf::from(input)
}
} else {
println!("No audio file submitted: Downloading https://huggingface.co/datasets/Narsil/candle_demo/blob/main/samples_jfk.wav");
dataset.get("samples_jfk.wav")?
};
let (config, tokenizer, model) = if args.quantized {
let ext = match args.model {
WhichModel::TinyEn => "tiny-en",
WhichModel::Tiny => "tiny",
_ => unimplemented!("no quantized support for {:?}", args.model),
};
(
repo.get(&format!("config-{ext}.json"))?,
repo.get(&format!("tokenizer-{ext}.json"))?,
repo.get(&format!("model-{ext}-q80.gguf"))?,
)
} else {
let config = repo.get("config.json")?;
let tokenizer = repo.get("tokenizer.json")?;
let model = repo.get("model.safetensors")?;
(config, tokenizer, model)
};
(config, tokenizer, model, sample)
};
let config: Config = serde_json::from_str(&std::fs::read_to_string(config_filename)?)?;
let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?;
let mel_bytes = match config.num_mel_bins {
80 => include_bytes!("melfilters.bytes").as_slice(),
128 => include_bytes!("melfilters128.bytes").as_slice(),
nmel => anyhow::bail!("unexpected num_mel_bins {nmel}"),
};
let mut mel_filters = vec![0f32; mel_bytes.len() / 4];
<byteorder::LittleEndian as byteorder::ByteOrder>::read_f32_into(mel_bytes, &mut mel_filters);
let (pcm_data, sample_rate) = pcm_decode::pcm_decode(input)?;
if sample_rate != m::SAMPLE_RATE as u32 {
anyhow::bail!("input file must have a {} sampling rate", m::SAMPLE_RATE)
}
println!("pcm data loaded {}", pcm_data.len());
let mel = audio::pcm_to_mel(&config, &pcm_data, &mel_filters);
let mel_len = mel.len();
let mel = Tensor::from_vec(
mel,
(1, config.num_mel_bins, mel_len / config.num_mel_bins),
&device,
)?;
println!("loaded mel: {:?}", mel.dims());
let mut model = if args.quantized {
let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf(
&weights_filename,
&device,
)?;
Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?)
} else {
let vb =
unsafe { VarBuilder::from_mmaped_safetensors(&[weights_filename], m::DTYPE, &device)? };
Model::Normal(m::model::Whisper::load(&vb, config)?)
};
let language_token = match (args.model.is_multilingual(), args.language) {
(true, None) => Some(multilingual::detect_language(&mut model, &tokenizer, &mel)?),
(false, None) => None,
(true, Some(language)) => match token_id(&tokenizer, &format!("<|{language}|>")) {
Ok(token_id) => Some(token_id),
Err(_) => anyhow::bail!("language {language} is not supported"),
},
(false, Some(_)) => {
anyhow::bail!("a language cannot be set for non-multilingual models")
}
};
let mut dc = Decoder::new(
model,
tokenizer,
args.seed,
&device,
language_token,
args.task,
args.timestamps,
args.verbose,
)?;
dc.run(&mel)?;
Ok(())
}
| 6 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/whisper/multilingual.rs | use candle::{IndexOp, Result, Tensor, D};
use tokenizers::Tokenizer;
const LANGUAGES: [(&str, &str); 99] = [
("en", "english"),
("zh", "chinese"),
("de", "german"),
("es", "spanish"),
("ru", "russian"),
("ko", "korean"),
("fr", "french"),
("ja", "japanese"),
("pt", "portuguese"),
("tr", "turkish"),
("pl", "polish"),
("ca", "catalan"),
("nl", "dutch"),
("ar", "arabic"),
("sv", "swedish"),
("it", "italian"),
("id", "indonesian"),
("hi", "hindi"),
("fi", "finnish"),
("vi", "vietnamese"),
("he", "hebrew"),
("uk", "ukrainian"),
("el", "greek"),
("ms", "malay"),
("cs", "czech"),
("ro", "romanian"),
("da", "danish"),
("hu", "hungarian"),
("ta", "tamil"),
("no", "norwegian"),
("th", "thai"),
("ur", "urdu"),
("hr", "croatian"),
("bg", "bulgarian"),
("lt", "lithuanian"),
("la", "latin"),
("mi", "maori"),
("ml", "malayalam"),
("cy", "welsh"),
("sk", "slovak"),
("te", "telugu"),
("fa", "persian"),
("lv", "latvian"),
("bn", "bengali"),
("sr", "serbian"),
("az", "azerbaijani"),
("sl", "slovenian"),
("kn", "kannada"),
("et", "estonian"),
("mk", "macedonian"),
("br", "breton"),
("eu", "basque"),
("is", "icelandic"),
("hy", "armenian"),
("ne", "nepali"),
("mn", "mongolian"),
("bs", "bosnian"),
("kk", "kazakh"),
("sq", "albanian"),
("sw", "swahili"),
("gl", "galician"),
("mr", "marathi"),
("pa", "punjabi"),
("si", "sinhala"),
("km", "khmer"),
("sn", "shona"),
("yo", "yoruba"),
("so", "somali"),
("af", "afrikaans"),
("oc", "occitan"),
("ka", "georgian"),
("be", "belarusian"),
("tg", "tajik"),
("sd", "sindhi"),
("gu", "gujarati"),
("am", "amharic"),
("yi", "yiddish"),
("lo", "lao"),
("uz", "uzbek"),
("fo", "faroese"),
("ht", "haitian creole"),
("ps", "pashto"),
("tk", "turkmen"),
("nn", "nynorsk"),
("mt", "maltese"),
("sa", "sanskrit"),
("lb", "luxembourgish"),
("my", "myanmar"),
("bo", "tibetan"),
("tl", "tagalog"),
("mg", "malagasy"),
("as", "assamese"),
("tt", "tatar"),
("haw", "hawaiian"),
("ln", "lingala"),
("ha", "hausa"),
("ba", "bashkir"),
("jw", "javanese"),
("su", "sundanese"),
];
/// Returns the token id for the selected language.
pub fn detect_language(
model: &mut super::Model,
tokenizer: &Tokenizer,
mel: &Tensor,
) -> Result<u32> {
let (_bsize, _, seq_len) = mel.dims3()?;
let mel = mel.narrow(
2,
0,
usize::min(seq_len, model.config().max_source_positions),
)?;
let device = mel.device();
let language_token_ids = LANGUAGES
.iter()
.map(|(t, _)| crate::token_id(tokenizer, &format!("<|{t}|>")))
.collect::<Result<Vec<_>>>()?;
let sot_token = crate::token_id(tokenizer, crate::m::SOT_TOKEN)?;
let audio_features = model.encoder_forward(&mel, true)?;
let tokens = Tensor::new(&[[sot_token]], device)?;
let language_token_ids = Tensor::new(language_token_ids.as_slice(), device)?;
let ys = model.decoder_forward(&tokens, &audio_features, true)?;
let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?;
let logits = logits.index_select(&language_token_ids, 0)?;
let probs = candle_nn::ops::softmax(&logits, D::Minus1)?;
let probs = probs.to_vec1::<f32>()?;
let mut probs = LANGUAGES.iter().zip(probs.iter()).collect::<Vec<_>>();
probs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1));
for ((_, language), p) in probs.iter().take(5) {
println!("{language}: {p}")
}
let language = crate::token_id(tokenizer, &format!("<|{}|>", probs[0].0 .0))?;
Ok(language)
}
| 7 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/whisper/extract_weights.py | # Get the checkpoint from
# https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt
import torch
from safetensors.torch import save_file
data = torch.load("tiny.en.pt")
weights = {}
for k, v in data["model_state_dict"].items():
weights[k] = v.contiguous()
print(k, v.shape, v.dtype)
save_file(weights, "tiny.en.safetensors")
print(data["dims"])
| 8 |
0 | hf_public_repos/candle/candle-examples/examples | hf_public_repos/candle/candle-examples/examples/whisper/pcm_decode.rs | use symphonia::core::audio::{AudioBufferRef, Signal};
use symphonia::core::codecs::{DecoderOptions, CODEC_TYPE_NULL};
use symphonia::core::conv::FromSample;
fn conv<T>(samples: &mut Vec<f32>, data: std::borrow::Cow<symphonia::core::audio::AudioBuffer<T>>)
where
T: symphonia::core::sample::Sample,
f32: symphonia::core::conv::FromSample<T>,
{
samples.extend(data.chan(0).iter().map(|v| f32::from_sample(*v)))
}
pub(crate) fn pcm_decode<P: AsRef<std::path::Path>>(path: P) -> anyhow::Result<(Vec<f32>, u32)> {
// Open the media source.
let src = std::fs::File::open(path)?;
// Create the media source stream.
let mss = symphonia::core::io::MediaSourceStream::new(Box::new(src), Default::default());
// Create a probe hint using the file's extension. [Optional]
let hint = symphonia::core::probe::Hint::new();
// Use the default options for metadata and format readers.
let meta_opts: symphonia::core::meta::MetadataOptions = Default::default();
let fmt_opts: symphonia::core::formats::FormatOptions = Default::default();
// Probe the media source.
let probed = symphonia::default::get_probe().format(&hint, mss, &fmt_opts, &meta_opts)?;
// Get the instantiated format reader.
let mut format = probed.format;
// Find the first audio track with a known (decodeable) codec.
let track = format
.tracks()
.iter()
.find(|t| t.codec_params.codec != CODEC_TYPE_NULL)
.expect("no supported audio tracks");
// Use the default options for the decoder.
let dec_opts: DecoderOptions = Default::default();
// Create a decoder for the track.
let mut decoder = symphonia::default::get_codecs()
.make(&track.codec_params, &dec_opts)
.expect("unsupported codec");
let track_id = track.id;
let sample_rate = track.codec_params.sample_rate.unwrap_or(0);
let mut pcm_data = Vec::new();
// The decode loop.
while let Ok(packet) = format.next_packet() {
// Consume any new metadata that has been read since the last packet.
while !format.metadata().is_latest() {
format.metadata().pop();
}
// If the packet does not belong to the selected track, skip over it.
if packet.track_id() != track_id {
continue;
}
match decoder.decode(&packet)? {
AudioBufferRef::F32(buf) => pcm_data.extend(buf.chan(0)),
AudioBufferRef::U8(data) => conv(&mut pcm_data, data),
AudioBufferRef::U16(data) => conv(&mut pcm_data, data),
AudioBufferRef::U24(data) => conv(&mut pcm_data, data),
AudioBufferRef::U32(data) => conv(&mut pcm_data, data),
AudioBufferRef::S8(data) => conv(&mut pcm_data, data),
AudioBufferRef::S16(data) => conv(&mut pcm_data, data),
AudioBufferRef::S24(data) => conv(&mut pcm_data, data),
AudioBufferRef::S32(data) => conv(&mut pcm_data, data),
AudioBufferRef::F64(data) => conv(&mut pcm_data, data),
}
}
Ok((pcm_data, sample_rate))
}
| 9 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/batch_norm.rs | //! Batch Normalization.
//!
//! This layer applies Batch Normalization over a mini-batch of inputs as described in [`Batch
//! Normalization`]. The input is expected to have at least three dimensions.
//!
//! Note that this implementation is for inference only, there is no possibility to track the
//! running stats.
//!
//! [`Batch Normalization`]: https://arxiv.org/abs/1502.03167
use candle::{DType, Result, Tensor, Var};
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct BatchNormConfig {
pub eps: f64,
pub remove_mean: bool,
/// The meaning of affine here is different from LayerNorm: when false there is no learnable
/// parameter at all, 1 used for gamma and 0 for beta.
pub affine: bool,
/// Controls exponential moving average of running stats. Defaults to 0.1
///
/// `running_stat * (1.0 - momentum) + stat * momentum`.
pub momentum: f64,
}
impl Default for BatchNormConfig {
fn default() -> Self {
Self {
eps: 1e-5,
remove_mean: true,
affine: true,
momentum: 0.1,
}
}
}
impl From<f64> for BatchNormConfig {
fn from(eps: f64) -> Self {
Self {
eps,
..Default::default()
}
}
}
#[derive(Clone, Debug)]
pub struct BatchNorm {
running_mean: Var,
running_var: Var,
weight_and_bias: Option<(Tensor, Tensor)>,
remove_mean: bool,
eps: f64,
momentum: f64,
}
impl BatchNorm {
fn check_validity(&self, num_features: usize) -> Result<()> {
if self.eps < 0. {
candle::bail!("batch-norm eps cannot be negative {}", self.eps)
}
if !(0.0..=1.0).contains(&self.momentum) {
candle::bail!(
"batch-norm momentum must be between 0 and 1, is {}",
self.momentum
)
}
if self.running_mean.dims() != [num_features] {
candle::bail!(
"batch-norm running mean has unexpected shape {:?} should have shape [{num_features}]",
self.running_mean.shape(),
)
}
if self.running_var.dims() != [num_features] {
candle::bail!(
"batch-norm running variance has unexpected shape {:?} should have shape [{num_features}]",
self.running_var.shape(),
)
}
if let Some((ref weight, ref bias)) = self.weight_and_bias.as_ref() {
if weight.dims() != [num_features] {
candle::bail!(
"batch-norm weight has unexpected shape {:?} should have shape [{num_features}]",
weight.shape(),
)
}
if bias.dims() != [num_features] {
candle::bail!(
"batch-norm weight has unexpected shape {:?} should have shape [{num_features}]",
bias.shape(),
)
}
}
Ok(())
}
pub fn new(
num_features: usize,
running_mean: Tensor,
running_var: Tensor,
weight: Tensor,
bias: Tensor,
eps: f64,
) -> Result<Self> {
let out = Self {
running_mean: Var::from_tensor(&running_mean)?,
running_var: Var::from_tensor(&running_var)?,
weight_and_bias: Some((weight, bias)),
remove_mean: true,
eps,
momentum: 0.1,
};
out.check_validity(num_features)?;
Ok(out)
}
pub fn new_no_bias(
num_features: usize,
running_mean: Tensor,
running_var: Tensor,
eps: f64,
) -> Result<Self> {
let out = Self {
running_mean: Var::from_tensor(&running_mean)?,
running_var: Var::from_tensor(&running_var)?,
weight_and_bias: None,
remove_mean: true,
eps,
momentum: 0.1,
};
out.check_validity(num_features)?;
Ok(out)
}
pub fn new_with_momentum(
num_features: usize,
running_mean: Tensor,
running_var: Tensor,
weight: Tensor,
bias: Tensor,
eps: f64,
momentum: f64,
) -> Result<Self> {
let out = Self {
running_mean: Var::from_tensor(&running_mean)?,
running_var: Var::from_tensor(&running_var)?,
weight_and_bias: Some((weight, bias)),
remove_mean: true,
eps,
momentum,
};
out.check_validity(num_features)?;
Ok(out)
}
pub fn new_no_bias_with_momentum(
num_features: usize,
running_mean: Tensor,
running_var: Tensor,
eps: f64,
momentum: f64,
) -> Result<Self> {
let out = Self {
running_mean: Var::from_tensor(&running_mean)?,
running_var: Var::from_tensor(&running_var)?,
weight_and_bias: None,
remove_mean: true,
eps,
momentum,
};
out.check_validity(num_features)?;
Ok(out)
}
pub fn running_mean(&self) -> &Tensor {
self.running_mean.as_tensor()
}
pub fn running_var(&self) -> &Tensor {
self.running_var.as_tensor()
}
pub fn eps(&self) -> f64 {
self.eps
}
pub fn weight_and_bias(&self) -> Option<(&Tensor, &Tensor)> {
self.weight_and_bias.as_ref().map(|v| (&v.0, &v.1))
}
pub fn momentum(&self) -> f64 {
self.momentum
}
pub fn forward_train(&self, x: &Tensor) -> Result<Tensor> {
let num_features = self.running_mean.as_tensor().dim(0)?;
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
if x.rank() < 2 {
candle::bail!(
"batch-norm input tensor must have at least two dimensions ({:?})",
x.shape()
)
}
if x.dim(1)? != num_features {
candle::bail!(
"batch-norm input doesn't have the expected number of features ({:?} <> {})",
x.shape(),
num_features
)
}
let x = x.to_dtype(internal_dtype)?;
let x = x.transpose(0, 1)?;
let x_dims_post_transpose = x.dims();
// Flatten all the dimensions exception the channel one as this performs a Spatial Batch
// Normalization.
let x = x.flatten_from(1)?.contiguous()?;
let x = if self.remove_mean {
// The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above.
let mean_x = x.mean_keepdim(1)?;
let updated_running_mean = ((self.running_mean.as_tensor() * (1.0 - self.momentum))?
+ (mean_x.flatten_all()? * self.momentum)?)?;
self.running_mean.set(&updated_running_mean)?;
x.broadcast_sub(&mean_x)?
} else {
x
};
// The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above.
let norm_x = x.sqr()?.mean_keepdim(1)?;
let updated_running_var = {
let batch_size = x.dim(1)? as f64;
let running_var_weight = 1.0 - self.momentum;
let norm_x_weight = self.momentum * batch_size / (batch_size - 1.0);
((self.running_var.as_tensor() * running_var_weight)?
+ (&norm_x.flatten_all()? * norm_x_weight)?)?
};
self.running_var.set(&updated_running_var)?;
let x = x
.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?
.to_dtype(x_dtype)?;
let x = match &self.weight_and_bias {
None => x,
Some((weight, bias)) => {
let weight = weight.reshape(((), 1))?;
let bias = bias.reshape(((), 1))?;
x.broadcast_mul(&weight)?.broadcast_add(&bias)?
}
};
x.reshape(x_dims_post_transpose)?.transpose(0, 1)
}
fn forward_eval(&self, x: &Tensor) -> Result<Tensor> {
let target_shape: Vec<usize> = x
.dims()
.iter()
.enumerate()
.map(|(idx, v)| if idx == 1 { *v } else { 1 })
.collect();
let target_shape = target_shape.as_slice();
let x = x
.broadcast_sub(
&self
.running_mean
.as_detached_tensor()
.reshape(target_shape)?,
)?
.broadcast_div(
&(self
.running_var
.as_detached_tensor()
.reshape(target_shape)?
+ self.eps)?
.sqrt()?,
)?;
match &self.weight_and_bias {
None => Ok(x),
Some((weight, bias)) => {
let weight = weight.reshape(target_shape)?;
let bias = bias.reshape(target_shape)?;
x.broadcast_mul(&weight)?.broadcast_add(&bias)
}
}
}
}
impl crate::ModuleT for BatchNorm {
fn forward_t(&self, x: &Tensor, train: bool) -> Result<Tensor> {
if train {
self.forward_train(x)
} else {
self.forward_eval(x)
}
}
}
pub fn batch_norm<C: Into<BatchNormConfig>>(
num_features: usize,
config: C,
vb: crate::VarBuilder,
) -> Result<BatchNorm> {
use crate::Init;
let config = config.into();
if config.eps < 0. {
candle::bail!("batch-norm eps cannot be negative {}", config.eps)
}
let running_mean = vb.get_with_hints(num_features, "running_mean", Init::Const(0.))?;
let running_var = vb.get_with_hints(num_features, "running_var", Init::Const(1.))?;
let weight_and_bias = if config.affine {
let weight = vb.get_with_hints(num_features, "weight", Init::Const(1.))?;
let bias = vb.get_with_hints(num_features, "bias", Init::Const(0.))?;
Some((weight, bias))
} else {
None
};
Ok(BatchNorm {
running_mean: Var::from_tensor(&running_mean)?,
running_var: Var::from_tensor(&running_var)?,
weight_and_bias,
remove_mean: config.remove_mean,
eps: config.eps,
momentum: config.momentum,
})
}
| 0 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/ops.rs | //! Tensor ops.
//!
use candle::{CpuStorage, DType, Layout, Module, Result, Shape, Tensor, D};
use rayon::prelude::*;
/// Applies the softmax function to the input tensor, rescaling the element so that elements on
/// a slice of fixed index on dimension `dim` are between 0 and 1 and sum to 1.
///
/// ```rust
/// use candle::{Tensor, Device, test_utils::to_vec2_round};
/// let a = Tensor::new(&[[0f32, 1., 0., 1.], [-2., 2., 3., -3.]], &Device::Cpu)?;
/// let a = candle_nn::ops::softmax(&a, 1)?;
/// assert_eq!(
/// to_vec2_round(&a, 4)?,
/// &[
/// [0.1345, 0.3655, 0.1345, 0.3655],
/// [0.0049, 0.2671, 0.7262, 0.0018]
/// ]);
/// # Ok::<(), candle::Error>(())
/// ```
pub fn softmax<D: candle::shape::Dim>(xs: &Tensor, dim: D) -> Result<Tensor> {
let dim = dim.to_index(xs.shape(), "softmax")?;
let max = xs.max_keepdim(dim)?;
let diff = xs.broadcast_sub(&max)?;
let num = diff.exp()?;
let den = num.sum_keepdim(dim)?;
num.broadcast_div(&den)
}
pub fn log_softmax<D: candle::shape::Dim>(xs: &Tensor, d: D) -> Result<Tensor> {
let d = d.to_index(xs.shape(), "log-softmax")?;
let max = xs.max_keepdim(d)?;
let diff = xs.broadcast_sub(&max)?;
let sum_exp = diff.exp()?.sum_keepdim(d)?;
let log_sm = diff.broadcast_sub(&sum_exp.log()?)?;
Ok(log_sm)
}
pub fn silu(xs: &Tensor) -> Result<Tensor> {
xs.silu()
}
pub fn swiglu(xs: &Tensor) -> Result<Tensor> {
let xs = xs.chunk(2, D::Minus1)?;
&xs[0].silu()? * &xs[1]
}
struct Sigmoid;
impl candle::CustomOp1 for Sigmoid {
fn name(&self) -> &'static str {
"sigmoid"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
use candle::backend::BackendStorage;
fn fwd<T: num_traits::Float>(v: T) -> T {
(v.neg().exp() + T::one()).recip()
}
// FIXME: using `candle::map_dtype` causes compilation errors.
let storage = match storage {
CpuStorage::BF16(slice) => {
CpuStorage::BF16(candle::cpu_backend::unary_map(slice, layout, fwd))
}
CpuStorage::F16(slice) => {
CpuStorage::F16(candle::cpu_backend::unary_map(slice, layout, fwd))
}
CpuStorage::F32(slice) => {
CpuStorage::F32(candle::cpu_backend::unary_map(slice, layout, fwd))
}
CpuStorage::F64(slice) => {
CpuStorage::F64(candle::cpu_backend::unary_map(slice, layout, fwd))
}
_ => Err(candle::Error::UnsupportedDTypeForOp(
storage.dtype(),
self.name(),
))?,
};
Ok((storage, layout.shape().clone()))
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &candle::CudaStorage,
layout: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, ValidAsZeroBits,
};
use candle::cuda_backend::SlicePtrOrNull;
use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr};
use candle::{CudaDevice, WithDType};
struct S;
impl Map1 for S {
fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &Layout,
) -> Result<CudaSlice<T>> {
let shape = layout.shape();
let dims = shape.dims();
let el_count = shape.elem_count();
let cfg = LaunchConfig::for_num_elems(el_count as u32);
let ds = SlicePtrOrNull::params_from_layout(dev, layout)?;
let src = &src.slice(layout.start_offset()..);
let func = dev.get_or_load_func(&kernel_name::<T>("usigmoid"), kernels::UNARY)?;
// SAFETY: Set later by running the kernel.
let out = unsafe { dev.alloc::<T>(el_count) }.w()?;
let params = (el_count, dims.len(), &ds, src, &out);
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(out)
}
}
let dev = storage.device();
let slice = S.map(&storage.slice, dev, layout)?;
let dst = candle::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, layout.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
storage: &candle::MetalStorage,
layout: &Layout,
) -> Result<(candle::MetalStorage, Shape)> {
use candle::backend::BackendStorage;
use candle::MetalError;
let device = storage.device();
let dtype = storage.dtype();
let shape = layout.shape();
let el_count = shape.elem_count();
let buffer = device.new_buffer(el_count, dtype, "sigmoid")?;
let command_buffer = device.command_buffer()?;
command_buffer.set_label("sigmoid");
let src = candle_metal_kernels::BufferOffset {
buffer: storage.buffer(),
offset_in_bytes: layout.start_offset() * storage.dtype().size_in_bytes(),
};
match (el_count % 2, dtype, layout.is_contiguous()) {
(0, DType::BF16 | DType::F16, true) => {
use candle_metal_kernels::unary::contiguous_tiled;
let kernel_name = match dtype {
DType::F16 => contiguous_tiled::sigmoid::HALF,
DType::F32 => contiguous_tiled::sigmoid::FLOAT,
DType::BF16 => contiguous_tiled::sigmoid::BFLOAT,
dtype => {
candle::bail!(
"Metal contiguous_tiled unary sigmoid {dtype:?} not implemented"
)
}
};
candle_metal_kernels::call_unary_contiguous_tiled(
device.metal_device(),
&command_buffer,
device.kernels(),
kernel_name,
el_count,
src,
&buffer,
)
.map_err(MetalError::from)?;
}
(_, _, true) => {
use candle_metal_kernels::unary::contiguous;
let kernel_name = match dtype {
DType::F16 => contiguous::sigmoid::HALF,
DType::F32 => contiguous::sigmoid::FLOAT,
DType::BF16 => contiguous::sigmoid::BFLOAT,
dtype => {
candle::bail!("Metal contiguous unary sigmoid {dtype:?} not implemented")
}
};
candle_metal_kernels::call_unary_contiguous(
device.metal_device(),
&command_buffer,
device.kernels(),
kernel_name,
el_count,
src,
&buffer,
)
.map_err(MetalError::from)?;
}
(_, _, false) => {
use candle_metal_kernels::unary::strided;
let kernel_name = match dtype {
DType::F16 => strided::sigmoid::HALF,
DType::F32 => strided::sigmoid::FLOAT,
DType::BF16 => strided::sigmoid::BFLOAT,
dtype => {
candle::bail!("Metal strided unary sigmoid {dtype:?} not implemented")
}
};
let dst = candle_metal_kernels::BufferOffset::zero_offset(&buffer);
candle_metal_kernels::call_unary_strided(
device.metal_device(),
&command_buffer,
device.kernels(),
kernel_name,
layout.dims(),
src,
layout.stride(),
dst,
)
.map_err(MetalError::from)?;
}
}
let new_storage = candle::MetalStorage::new(buffer, device.clone(), el_count, dtype);
Ok((new_storage, layout.shape().clone()))
}
fn bwd(&self, _arg: &Tensor, res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> {
// d/dx sigmoid(x) = (1 - sigmoid(x)) * sigmoid(x)
let d_dx_sigmoid = res.ones_like()?.sub(res)?.mul(res)?;
Ok(Some(grad_res.mul(&d_dx_sigmoid)?))
}
}
pub fn sigmoid(xs: &Tensor) -> Result<Tensor> {
xs.apply_op1(Sigmoid)
}
pub fn hard_sigmoid(xs: &Tensor) -> Result<Tensor> {
// TODO: Should we have a specialized op for this?
((xs + 3.0)? / 6.0)?.clamp(0f32, 1f32)
}
pub fn leaky_relu(xs: &Tensor, negative_slope: f64) -> Result<Tensor> {
let zeros = xs.zeros_like()?;
xs.maximum(&zeros)? + xs.minimum(&zeros)? * negative_slope
}
pub fn dropout(xs: &Tensor, drop_p: f32) -> Result<Tensor> {
// This implementation is inefficient as it stores the full mask for the backward pass.
// Instead we could just store the seed and have a specialized kernel that would both
// generate the random mask and apply it.
// Another easier optimization would be to be able to generate boolean mask using just a bit of
// entropy per element rather than generating a full float per element.
if !(0. ..1.).contains(&drop_p) {
candle::bail!("dropout probability has to be in [0, 1), got {drop_p}")
}
let rand = Tensor::rand(0f32, 1f32, xs.shape(), xs.device())?;
let scale = 1.0 / (1.0 - drop_p as f64);
let drop_p = Tensor::new(drop_p, xs.device())?.broadcast_as(xs.shape())?;
let mask = (rand.ge(&drop_p)?.to_dtype(xs.dtype())? * scale)?;
xs * mask
}
#[derive(Clone, Debug)]
pub struct Dropout {
drop_p: f32,
}
impl Dropout {
pub fn new(drop_p: f32) -> Dropout {
Self { drop_p }
}
pub fn forward(&self, xs: &Tensor, train: bool) -> Result<Tensor> {
if train {
dropout(xs, self.drop_p)
} else {
Ok(xs.clone())
}
}
}
impl candle::ModuleT for Dropout {
fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> {
self.forward(xs, train)
}
}
struct SoftmaxLastDim;
impl candle::CustomOp1 for SoftmaxLastDim {
fn name(&self) -> &'static str {
"softmax-last-dim"
}
fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)> {
fn softmax<T: candle::WithDType + num_traits::Float>(
src: &[T],
layout: &Layout,
) -> Result<(CpuStorage, Shape)> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => &src[o1..o2],
};
let el_count = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let mut dst = vec![T::zero(); el_count];
src.par_chunks(dim_m1)
.zip(dst.par_chunks_mut(dim_m1))
.for_each(|(src, dst)| {
let mut max = T::neg_infinity();
unsafe { T::vec_reduce_max(src.as_ptr(), &mut max, dim_m1) };
for (s, d) in src.iter().zip(dst.iter_mut()) {
*d = (*s - max).exp();
}
let mut sum_exp = T::zero();
unsafe { T::vec_reduce_sum(dst.as_ptr(), &mut sum_exp, dim_m1) };
for d in dst.iter_mut() {
*d /= sum_exp
}
});
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, Shape::from_dims(dims)))
}
match storage {
CpuStorage::BF16(slice) => softmax::<half::bf16>(slice, layout),
CpuStorage::F16(slice) => softmax::<half::f16>(slice, layout),
CpuStorage::F32(slice) => softmax::<f32>(slice, layout),
CpuStorage::F64(slice) => softmax::<f64>(slice, layout),
_ => candle::bail!("unsupported dtype for softmax {:?}", storage),
}
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
storage: &candle::CudaStorage,
layout: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig,
};
use candle::cuda_backend::{kernel_name, kernels, Map1, WrapErr};
use candle::{CudaDevice, WithDType};
struct S;
impl Map1 for S {
fn f<T: DeviceRepr + WithDType>(
&self,
src: &CudaSlice<T>,
dev: &CudaDevice,
layout: &Layout,
) -> Result<CudaSlice<T>> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => src.slice(o1..o2),
};
let el = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let (n_rows, n_cols) = (el / dim_m1, dim_m1);
let cfg = LaunchConfig {
grid_dim: (n_rows as u32, 1, 1),
block_dim: (1, 32, 1),
shared_mem_bytes: 0,
};
let func = dev.get_or_load_func(&kernel_name::<T>("softmax"), kernels::REDUCE)?;
// SAFETY: Set later by running the kernel.
let dst = unsafe { dev.alloc::<T>(el) }.w()?;
let params = (&src, &dst, n_cols as i32);
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(dst)
}
}
use candle::backend::BackendStorage;
let dev = storage.device();
let slice = S.map(&storage.slice, dev, layout)?;
let dst = candle::cuda_backend::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, layout.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
storage: &candle::MetalStorage,
layout: &Layout,
) -> Result<(candle::MetalStorage, Shape)> {
use candle::backend::BackendStorage;
let device = storage.device();
let command_buffer = device.command_buffer()?;
let kernels = device.kernels();
let name = match storage.dtype() {
DType::F32 => "softmax_f32",
DType::F16 => "softmax_f16",
DType::BF16 => "softmax_bf16",
dtype => candle::bail!("softmax-last-dim is not implemented for {dtype:?}"),
};
let n = layout.stride().len();
if !(layout.is_contiguous() && layout.stride()[n - 1] == 1) {
candle::bail!("Non contiguous softmax-last-dim is not implemented");
}
let last_dim = layout.dims()[layout.shape().rank() - 1];
let elem_count = layout.shape().elem_count();
let output = device.new_buffer(elem_count, storage.dtype(), "softmax")?;
candle_metal_kernels::call_last_softmax(
device.metal_device(),
&command_buffer,
kernels,
name,
elem_count,
last_dim,
storage.buffer(),
layout.start_offset() * storage.dtype().size_in_bytes(),
&output,
)
.map_err(candle::Error::wrap)?;
let newstorage =
candle::MetalStorage::new(output, device.clone(), elem_count, storage.dtype());
Ok((newstorage, layout.shape().clone()))
}
}
pub fn softmax_last_dim(xs: &Tensor) -> Result<Tensor> {
xs.apply_op1_no_bwd(&SoftmaxLastDim)
}
#[derive(Debug, Clone)]
struct RmsNorm {
eps: f32,
}
impl candle::CustomOp2 for RmsNorm {
fn name(&self) -> &'static str {
"rms-norm"
}
fn cpu_fwd(
&self,
s1: &CpuStorage,
l1: &Layout,
s2: &CpuStorage,
l2: &Layout,
) -> Result<(CpuStorage, Shape)> {
use candle::backend::BackendStorage;
let eps = self.eps;
fn inner<
T: candle::WithDType
+ num_traits::Float
+ num_traits::AsPrimitive<f32>
+ num_traits::FromPrimitive,
>(
src: &[T],
layout: &Layout,
alpha: &[T],
alpha_layout: &Layout,
eps: f32,
) -> Result<(CpuStorage, Shape)> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => &src[o1..o2],
};
let alpha = match alpha_layout.contiguous_offsets() {
None => candle::bail!("alpha has to be contiguous"),
Some((o1, o2)) => &alpha[o1..o2],
};
let el_count = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let mut dst = vec![T::zero(); el_count];
src.par_chunks(dim_m1)
.zip(dst.par_chunks_mut(dim_m1))
.for_each(|(src, dst)| {
let sum2 = src
.iter()
.map(|&v| {
let v = v.as_();
v * v
})
.sum::<f32>();
let m = (sum2 / dim_m1 as f32 + eps).sqrt();
let m = T::from_f32(m).unwrap_or_else(T::nan);
for ((d, s), alpha) in dst.iter_mut().zip(src.iter()).zip(alpha) {
*d = *s / m * *alpha
}
});
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, Shape::from_dims(dims)))
}
use CpuStorage as C;
match (s1, s2) {
(C::BF16(s1), C::BF16(s2)) => inner::<half::bf16>(s1, l1, s2, l2, eps),
(C::F16(s1), C::F16(s2)) => inner::<half::f16>(s1, l1, s2, l2, eps),
(C::F32(s1), C::F32(s2)) => inner::<f32>(s1, l1, s2, l2, eps),
_ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()),
}
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
s1: &candle::CudaStorage,
l1: &Layout,
s2: &candle::CudaStorage,
l2: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig,
};
use candle::cuda_backend::{kernel_name, kernels, Map2, WrapErr};
use candle::{CudaDevice, WithDType};
struct S {
eps: f32,
}
impl Map2 for S {
fn f<T: DeviceRepr + WithDType>(
&self,
src: &CudaSlice<T>,
layout: &Layout,
alpha: &CudaSlice<T>,
alpha_layout: &Layout,
dev: &CudaDevice,
) -> Result<CudaSlice<T>> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => src.slice(o1..o2),
};
let alpha = match alpha_layout.contiguous_offsets() {
None => candle::bail!("alpha has to be contiguous"),
Some((o1, o2)) => alpha.slice(o1..o2),
};
let el = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let (n_rows, n_cols) = (el / dim_m1, dim_m1);
let block_size = if n_cols < 1024 { 32 } else { 1024 };
let cfg = LaunchConfig {
grid_dim: (n_rows as u32, 1, 1),
block_dim: (block_size, 1, 1),
shared_mem_bytes: 0,
};
let func = dev.get_or_load_func(&kernel_name::<T>("rmsnorm"), kernels::REDUCE)?;
// SAFETY: Set later by running the kernel.
let dst = unsafe { dev.alloc::<T>(el) }.w()?;
let params = (
&src,
&dst,
&alpha,
n_cols as i32,
block_size as i32,
self.eps,
);
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(dst)
}
}
use candle::backend::BackendStorage;
let dev = s1.device();
let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, dev)?;
let dst = candle::cuda_backend::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, l1.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
s1: &candle::MetalStorage,
l1: &Layout,
s2: &candle::MetalStorage,
l2: &Layout,
) -> Result<(candle::MetalStorage, Shape)> {
use candle::backend::BackendStorage;
let device = s1.device();
let command_buffer = device.command_buffer()?;
let kernels = device.kernels();
let name = match (s1.dtype(), s2.dtype()) {
(DType::F32, DType::F32) => "rmsnorm_f32",
(DType::F16, DType::F16) => "rmsnorm_f16",
(DType::BF16, DType::BF16) => "rmsnorm_bf16",
(dt1, dt2) => candle::bail!("rmsnorm is not implemented for {dt1:?} {dt2:?}"),
};
if !(l1.is_contiguous() && l2.is_contiguous()) {
candle::bail!("Non contiguous rmsnorm is not implemented");
}
let last_dim = l1.dims()[l1.shape().rank() - 1];
let elem_count = l1.shape().elem_count();
let output = device.new_buffer(elem_count, s1.dtype(), "rmsnorm")?;
candle_metal_kernels::call_rms_norm(
device.metal_device(),
&command_buffer,
kernels,
name,
elem_count,
last_dim,
self.eps,
s1.buffer(),
l1.start_offset() * s1.dtype().size_in_bytes(),
s2.buffer(),
l2.start_offset() * s2.dtype().size_in_bytes(),
&output,
)
.map_err(candle::Error::wrap)?;
let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype());
Ok((newstorage, l1.shape().clone()))
}
}
pub fn rms_norm_slow(x: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?;
x_normed.to_dtype(x_dtype)?.broadcast_mul(alpha)
}
pub fn rms_norm(xs: &Tensor, alpha: &Tensor, eps: f32) -> Result<Tensor> {
let hidden_size_xs = xs.dim(D::Minus1)?;
let hidden_size_alpha = alpha.dims1()?;
if hidden_size_xs != hidden_size_alpha {
candle::bail!(
"shape mismatch in rms-norm {:?} {:?}",
xs.shape(),
alpha.shape()
)
}
xs.apply_op2_no_bwd(alpha, &RmsNorm { eps })
}
#[derive(Debug, Clone)]
struct LayerNorm {
eps: f32,
}
impl candle::CustomOp3 for LayerNorm {
fn name(&self) -> &'static str {
"layer-norm"
}
fn cpu_fwd(
&self,
s1: &CpuStorage,
l1: &Layout,
s2: &CpuStorage,
l2: &Layout,
s3: &CpuStorage,
l3: &Layout,
) -> Result<(CpuStorage, Shape)> {
use candle::backend::BackendStorage;
let eps = self.eps;
fn inner<
T: candle::WithDType
+ num_traits::Float
+ num_traits::AsPrimitive<f32>
+ num_traits::FromPrimitive,
>(
src: &[T],
layout: &Layout,
alpha: &[T],
alpha_layout: &Layout,
beta: &[T],
beta_layout: &Layout,
eps: f32,
) -> Result<(CpuStorage, Shape)> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => &src[o1..o2],
};
let alpha = match alpha_layout.contiguous_offsets() {
None => candle::bail!("alpha has to be contiguous"),
Some((o1, o2)) => &alpha[o1..o2],
};
let beta = match beta_layout.contiguous_offsets() {
None => candle::bail!("beta has to be contiguous"),
Some((o1, o2)) => &beta[o1..o2],
};
let el_count = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let mut dst = vec![T::zero(); el_count];
src.par_chunks(dim_m1)
.zip(dst.par_chunks_mut(dim_m1))
.for_each(|(src, dst)| {
let mut sum = 0f32;
let mut sum2 = 0f32;
for v in src {
let v = v.as_();
sum += v;
sum2 += v * v;
}
let mean = sum / dim_m1 as f32;
let var = sum2 / dim_m1 as f32 - mean * mean;
let inv_std = (var + eps).sqrt().recip();
for ((d, s), (alpha, beta)) in
dst.iter_mut().zip(src.iter()).zip(alpha.iter().zip(beta))
{
let alpha = alpha.as_();
let beta = beta.as_();
let d_ = (s.as_() - mean) * inv_std * alpha + beta;
*d = T::from_f32(d_).unwrap_or_else(T::nan);
}
});
let storage = candle::WithDType::to_cpu_storage_owned(dst);
Ok((storage, Shape::from_dims(dims)))
}
use CpuStorage as C;
match (s1, s2, s3) {
(C::BF16(s1), C::BF16(s2), C::BF16(s3)) => {
inner::<half::bf16>(s1, l1, s2, l2, s3, l3, eps)
}
(C::F16(s1), C::F16(s2), C::F16(s3)) => inner::<half::f16>(s1, l1, s2, l2, s3, l3, eps),
(C::F32(s1), C::F32(s2), C::F32(s3)) => inner::<f32>(s1, l1, s2, l2, s3, l3, eps),
_ => candle::bail!("unsupported dtype for rmsnorm {:?}", s1.dtype()),
}
}
#[cfg(feature = "cuda")]
fn cuda_fwd(
&self,
s1: &candle::CudaStorage,
l1: &Layout,
s2: &candle::CudaStorage,
l2: &Layout,
s3: &candle::CudaStorage,
l3: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
use candle::cuda_backend::cudarc::driver::{
CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig,
};
use candle::cuda_backend::{kernel_name, kernels, Map3, WrapErr};
use candle::{CudaDevice, WithDType};
struct S {
eps: f32,
}
impl Map3 for S {
fn f<T: DeviceRepr + WithDType>(
&self,
src: &CudaSlice<T>,
layout: &Layout,
alpha: &CudaSlice<T>,
alpha_layout: &Layout,
beta: &CudaSlice<T>,
beta_layout: &Layout,
dev: &CudaDevice,
) -> Result<CudaSlice<T>> {
let src = match layout.contiguous_offsets() {
None => candle::bail!("input has to be contiguous"),
Some((o1, o2)) => src.slice(o1..o2),
};
let alpha = match alpha_layout.contiguous_offsets() {
None => candle::bail!("alpha has to be contiguous"),
Some((o1, o2)) => alpha.slice(o1..o2),
};
let beta = match beta_layout.contiguous_offsets() {
None => candle::bail!("beta has to be contiguous"),
Some((o1, o2)) => beta.slice(o1..o2),
};
let el = layout.shape().elem_count();
let dims = layout.shape().dims();
let dim_m1 = dims[dims.len() - 1];
let (n_rows, n_cols) = (el / dim_m1, dim_m1);
let block_size = if n_cols < 1024 { 32 } else { 1024 };
let cfg = LaunchConfig {
grid_dim: (n_rows as u32, 1, 1),
block_dim: (block_size, 1, 1),
shared_mem_bytes: 0,
};
let func = dev.get_or_load_func(&kernel_name::<T>("layernorm"), kernels::REDUCE)?;
// SAFETY: Set later by running the kernel.
let dst = unsafe { dev.alloc::<T>(el) }.w()?;
let params = (
&src,
&dst,
&alpha,
&beta,
n_cols as i32,
block_size as i32,
self.eps,
);
// SAFETY: ffi.
unsafe { func.launch(cfg, params) }.w()?;
Ok(dst)
}
}
use candle::backend::BackendStorage;
let dev = s1.device();
let slice = S { eps: self.eps }.map(&s1.slice, l1, &s2.slice, l2, &s3.slice, l3, dev)?;
let dst = candle::cuda_backend::CudaStorage {
slice,
device: dev.clone(),
};
Ok((dst, l1.shape().clone()))
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
s1: &candle::MetalStorage,
l1: &Layout,
s2: &candle::MetalStorage,
l2: &Layout,
s3: &candle::MetalStorage,
l3: &Layout,
) -> Result<(candle::MetalStorage, Shape)> {
use candle::backend::BackendStorage;
let device = s1.device();
let command_buffer = device.command_buffer()?;
let kernels = device.kernels();
let name = match (s1.dtype(), s2.dtype(), s3.dtype()) {
(DType::F32, DType::F32, DType::F32) => "layernorm_f32",
(DType::F16, DType::F16, DType::F16) => "layernorm_f16",
(DType::BF16, DType::BF16, DType::BF16) => "layernorm_bf16",
(dt1, dt2, dt3) => {
candle::bail!("layernorm is not implemented for {dt1:?} {dt2:?} {dt3:?}")
}
};
if !(l1.is_contiguous() && l2.is_contiguous() && l3.is_contiguous()) {
candle::bail!("Non contiguous layernorm is not implemented");
}
let last_dim = l1.dims()[l1.shape().rank() - 1];
let elem_count = l1.shape().elem_count();
let output = device.new_buffer(elem_count, s1.dtype(), "layernorm")?;
candle_metal_kernels::call_layer_norm(
device.metal_device(),
&command_buffer,
kernels,
name,
elem_count,
last_dim,
self.eps,
s1.buffer(),
l1.start_offset() * s1.dtype().size_in_bytes(),
s2.buffer(),
l2.start_offset() * s2.dtype().size_in_bytes(),
s3.buffer(),
l3.start_offset() * s3.dtype().size_in_bytes(),
&output,
)
.map_err(candle::Error::wrap)?;
let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, s1.dtype());
Ok((newstorage, l1.shape().clone()))
}
}
pub fn layer_norm_slow(x: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> {
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let x = {
let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
x.broadcast_sub(&mean_x)?
};
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + eps as f64)?.sqrt()?)?;
x_normed
.to_dtype(x_dtype)?
.broadcast_mul(alpha)?
.broadcast_add(beta)
}
pub fn layer_norm(xs: &Tensor, alpha: &Tensor, beta: &Tensor, eps: f32) -> Result<Tensor> {
let hidden_size_xs = xs.dim(D::Minus1)?;
let hidden_size_alpha = alpha.dims1()?;
let hidden_size_beta = beta.dims1()?;
if hidden_size_xs != hidden_size_alpha || hidden_size_xs != hidden_size_beta {
candle::bail!(
"shape mismatch in layer-norm src: {:?} alpha: {:?} beta: {:?}",
xs.shape(),
alpha.shape(),
beta.shape()
)
}
xs.apply_op3_no_bwd(alpha, beta, &LayerNorm { eps })
}
// https://pytorch.org/docs/stable/generated/torch.nn.PixelShuffle.html
pub fn pixel_shuffle(xs: &Tensor, upscale_factor: usize) -> Result<Tensor> {
let (b_size, c, h, w) = xs.dims4()?;
let out_c = c / upscale_factor / upscale_factor;
xs.reshape((b_size, out_c, upscale_factor, upscale_factor, h, w))?
.permute((0, 1, 4, 2, 5, 3))?
.reshape((b_size, out_c, h * upscale_factor, w * upscale_factor))
}
pub fn pixel_unshuffle(xs: &Tensor, downscale_factor: usize) -> Result<Tensor> {
let (b_size, c, h, w) = xs.dims4()?;
let out_c = c * downscale_factor * downscale_factor;
xs.reshape((
b_size,
c,
h / downscale_factor,
downscale_factor,
w / downscale_factor,
downscale_factor,
))?
.permute((0, 1, 3, 5, 2, 4))?
.reshape((b_size, out_c, h / downscale_factor, w / downscale_factor))
}
// https://pytorch.org/docs/stable/generated/torch.nn.ReplicationPad2d.html
pub fn replication_pad2d(xs: &Tensor, pad: usize) -> Result<Tensor> {
match pad {
0 => Ok(xs.clone()),
1 => {
let (_b_size, _c, h, w) = xs.dims4()?;
let (first, last) = (xs.narrow(3, 0, 1)?, xs.narrow(3, w - 1, 1)?);
let xs = Tensor::cat(&[&first, xs, &last], 3)?;
let (first, last) = (xs.narrow(2, 0, 1)?, xs.narrow(2, h - 1, 1)?);
Tensor::cat(&[&first, &xs, &last], 2)
}
n => candle::bail!("replication-pad with a size of {n} is not supported"),
}
}
#[derive(Clone, Debug)]
pub struct Identity;
impl Identity {
pub fn new() -> Identity {
Self
}
}
impl Default for Identity {
fn default() -> Self {
Self
}
}
impl Module for Identity {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
Ok(xs.clone())
}
}
#[allow(dead_code)]
struct Sdpa {
scale: f32,
softcapping: f32,
}
impl candle::CustomOp3 for Sdpa {
fn name(&self) -> &'static str {
"metal-sdpa"
}
fn cpu_fwd(
&self,
_s1: &CpuStorage,
_l1: &Layout,
_s2: &CpuStorage,
_l2: &Layout,
_s3: &CpuStorage,
_l3: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("SDPA has no cpu impl")
}
#[cfg(feature = "metal")]
fn metal_fwd(
&self,
q: &candle::MetalStorage,
q_l: &Layout,
k: &candle::MetalStorage,
k_l: &Layout,
v: &candle::MetalStorage,
v_l: &Layout,
) -> Result<(candle::MetalStorage, Shape)> {
use candle::backend::BackendStorage;
use candle_metal_kernels::SdpaDType;
let device = q.device();
let out_dims = vec![q_l.dim(0)?, q_l.dim(1)?, q_l.dim(2)?, v_l.dim(3)?];
let elem_count: usize = out_dims.iter().product();
let output = device.new_buffer(elem_count, q.dtype(), "sdpa_o")?;
// q,k must have matching emb dim
if q_l.dim(D::Minus1)? != k_l.dim(D::Minus1)? {
candle::bail!("`q` and `k` last dims must match");
}
// k,v must have matching n kv heads
if v_l.dim(D::Minus(3))? != k_l.dim(D::Minus(3))? {
candle::bail!("`k` and `v` head dims must match");
}
// n_heads % n_kv_heads == 0; n_heads >= 1, n_kv_heads >= 1.
if q_l.dim(D::Minus(3))? % k_l.dim(D::Minus(3))? != 0 {
candle::bail!("query `n_heads` must be a multiple of `n_kv_heads`");
}
let k_head = k_l.dim(D::Minus1)?;
let q_head = q_l.dim(D::Minus1)?;
let q_seq = q_l.dim(2)?;
let mut implementation_supports_use_case = q_head == k_head;
let supported_head_dim =
q_head == 32 || q_head == 64 || q_head == 96 || q_head == 128 || q_head == 256;
const SDPA_FULL_THRESHOLD: usize = 2;
let supports_sdpa_full =
q_seq >= SDPA_FULL_THRESHOLD && supported_head_dim && q_head == k_head;
let supports_sdpa_vector = q_seq == 1 && supported_head_dim;
implementation_supports_use_case &= supports_sdpa_full || supports_sdpa_vector;
if !supported_head_dim {
candle::bail!(
"Meta SDPA does not support q head dim {q_head}: q dims {:?}, k dims {:?}, v dims {:?}.",
q_l.dims(),
k_l.dims(),
v_l.dims()
);
}
if !implementation_supports_use_case {
candle::bail!(
"Meta SDPA does not support q dims {:?}, k dims {:?}, v dims {:?}.",
q_l.dims(),
k_l.dims(),
v_l.dims()
);
}
for t in [k.dtype(), v.dtype()] {
if q.dtype() != t {
candle::bail!("all q, k, v dtypes must match.");
}
}
let itype = match q.dtype() {
DType::BF16 => SdpaDType::BF16,
DType::F16 => SdpaDType::F16,
DType::F32 => SdpaDType::F32,
other => candle::bail!("unsupported sdpa type {other:?}"),
};
let command_buffer = q.device().command_buffer()?;
if supports_sdpa_vector {
command_buffer.set_label("vector_attention");
candle_metal_kernels::call_sdpa_vector(
q.device().device(),
&command_buffer,
q.device().kernels(),
q_l.start_offset(),
q_l.dims(),
q.buffer(),
k_l.start_offset(),
k_l.dims(),
k_l.stride(),
k.buffer(),
v_l.start_offset(),
v_l.stride(),
v.buffer(),
&output,
self.scale,
self.softcapping,
itype,
)
.map_err(candle::Error::wrap)?;
} else if supports_sdpa_full {
if q_l.dim(2)? != k_l.dim(2)? {
candle::bail!(
"query and key sequence length must be equal if using full metal sdpa"
)
}
command_buffer.set_label("full_attention");
candle_metal_kernels::call_sdpa_full(
q.device().device(),
&command_buffer,
q.device().kernels(),
q_l.start_offset(),
q_l.dims(),
q.buffer(),
k_l.start_offset(),
k.buffer(),
v_l.start_offset(),
v.buffer(),
&output,
self.scale,
self.softcapping,
itype,
)
.map_err(candle::Error::wrap)?;
} else {
candle::bail!("must be vector or full sdpa kernel");
}
let newstorage = candle::MetalStorage::new(output, device.clone(), elem_count, q.dtype());
Ok((newstorage, Shape::from_dims(&out_dims)))
}
}
/// Scaled dot product attention with a fused kernel.
///
/// Computes softmax(qk^T*scale)v.
///
/// **Inputs shapes:**
/// - `q`: (bs, qhead, seq, hidden)
/// - `k`: (bs, kv_head, kv_seq, hidden)
/// - `k`: (bs, kv_head, kv_seq, v_hidden)
/// - `scale` is applied before softmax.
/// - If `softcapping` != 1.0:
/// - Computation is: softmax(tanh(qk^T*scale/cap)*cap)v
///
/// **Output shape:** (bs, qhead, seq, v_hidden)
///
/// **Supported head dims:** 32, 64, 96, 128, 256.
///
/// ## On Metal:
/// - If `seq` == 1:
/// - Use a vectorized kernel
/// - Supports `seq` != `kv_seq` (cross attn. support)
/// - Supports GQA when `qhead` is a multiple of `kv_head`
/// - Otherwise:
/// - Use an alternate kernel
/// - Requires `seq` == `kv_seq`
/// - GQA is not supported (requires `qhead` == `kv_head`)
pub fn sdpa(q: &Tensor, k: &Tensor, v: &Tensor, scale: f32, softcapping: f32) -> Result<Tensor> {
q.apply_op3_no_bwd(k, v, &Sdpa { scale, softcapping })
}
| 1 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/loss.rs | //! Loss Calculations
//!
use candle::{Result, Tensor};
/// The negative log likelihood loss.
///
/// Arguments
///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to contain log probabilities.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N`.
///
/// The resulting tensor is a scalar containing the average value over the batch.
pub fn nll(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
let b_sz = match target.dims() {
&[b_sz] => b_sz,
dims => candle::bail!("the target tensor should have a single dimension ({dims:?})"),
};
match inp.dims() {
&[inp_b_sz, _] => {
if inp_b_sz != b_sz {
candle::bail!("batch size mismatch between inp ({inp_b_sz}) and target ({b_sz})")
}
}
dims => candle::bail!("the target tensor should have two dimensions ({dims:?})"),
}
inp.gather(&target.unsqueeze(1)?, 1)?
.sum_all()?
.affine(-1f64 / b_sz as f64, 0.)
}
/// The cross-entropy loss.
///
/// Arguments
///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to raw logits.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N`.
///
/// The resulting tensor is a scalar containing the average value over the batch.
pub fn cross_entropy(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
if inp.rank() != 2 {
candle::bail!("cross_entropy expects an input tensor of rank 2")
}
let inp = crate::ops::log_softmax(inp, 1)?;
nll(&inp, target)
}
/// The mean squared error loss.
pub fn mse(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
(inp - target)?.sqr()?.mean_all()
}
/// The binary cross-entropy with logit loss.
///
/// Arguments
///
/// * [inp]: The input tensor of dimensions `N, C` where `N` is the batch size and `C` the number
/// of categories. This is expected to raw logits.
/// * [target]: The ground truth labels as a tensor of u32 of dimension `N, C` where `N` is the batch size and `C` the number
/// of categories.
///
/// The resulting tensor is a scalar containing the average value over the batch.
pub fn binary_cross_entropy_with_logit(inp: &Tensor, target: &Tensor) -> Result<Tensor> {
let inp = crate::ops::sigmoid(inp)?;
let left_side = target * inp.log()?;
let right_side = (target.affine(-1., 1.))? * inp.affine(-1., 1.)?.log()?;
let loss = left_side? + right_side?;
let loss = loss?.neg()?.mean_all()?;
Ok(loss)
}
| 2 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/linear.rs | //! Linear layer
//!
//! This layer applies a linear transformation to the incoming data, `y = [email protected]() + b`.
//! The bias is optional. The `forward` method can be used to apply the layer, it supports input
//! with a batch dimension (so of shape `(b_sz, in_c)`) or without (of shape `(in_c,)`), the
//! output has shape `(b_sz, out_c)` and `(out_c,)` respectively.
//!
//! ```rust
//! use candle::{Tensor, Device::Cpu};
//! use candle_nn::{Linear, Module};
//! # fn main() -> candle::Result<()> {
//!
//! let w = Tensor::new(&[[1f32, 2.], [3., 4.], [5., 6.]], &Cpu)?;
//! let layer = Linear::new(w, None); // Use no bias.
//! let xs = Tensor::new(&[[10f32, 100.]], &Cpu)?;
//! let ys = layer.forward(&xs)?;
//! assert_eq!(ys.to_vec2::<f32>()?, &[[210.0, 430.0, 650.0]]);
//! # Ok(()) }
//! ```
use candle::{Result, Tensor};
#[derive(Clone, Debug)]
pub struct Linear {
weight: Tensor,
bias: Option<Tensor>,
}
impl Linear {
pub fn new(weight: Tensor, bias: Option<Tensor>) -> Self {
Self { weight, bias }
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl super::Module for Linear {
fn forward(&self, x: &Tensor) -> candle::Result<Tensor> {
let w = match *x.dims() {
[b1, b2, _, _] => self.weight.broadcast_left((b1, b2))?.t()?,
[bsize, _, _] => self.weight.broadcast_left(bsize)?.t()?,
_ => self.weight.t()?,
};
let x = x.matmul(&w)?;
match &self.bias {
None => Ok(x),
Some(bias) => x.broadcast_add(bias),
}
}
}
/// Create or initialize a new linear layer.
///
/// This uses some default names for weights and biases, namely `"weight"` and `"bias"`.
pub fn linear(in_dim: usize, out_dim: usize, vb: crate::VarBuilder) -> Result<Linear> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints((out_dim, in_dim), "weight", init_ws)?;
let bound = 1. / (in_dim as f64).sqrt();
let init_bs = crate::Init::Uniform {
lo: -bound,
up: bound,
};
let bs = vb.get_with_hints(out_dim, "bias", init_bs)?;
Ok(Linear::new(ws, Some(bs)))
}
/// Create or initialize a new linear layer without biases.
pub fn linear_no_bias(in_dim: usize, out_dim: usize, vb: crate::VarBuilder) -> Result<Linear> {
let init_ws = crate::init::DEFAULT_KAIMING_NORMAL;
let ws = vb.get_with_hints((out_dim, in_dim), "weight", init_ws)?;
Ok(Linear::new(ws, None))
}
pub fn linear_b(
in_dim: usize,
out_dim: usize,
bias: bool,
vb: crate::VarBuilder,
) -> Result<Linear> {
if bias {
linear(in_dim, out_dim, vb)
} else {
linear_no_bias(in_dim, out_dim, vb)
}
}
| 3 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/var_map.rs | //! A `VarMap` is a store that holds named variables.
//!
use candle::{DType, Device, Result, Shape, Tensor, Var};
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
/// A `VarMap` is a store that holds named variables. Variables can be retrieved from the stores
/// and new variables can be added by providing some initialization config in case they are
/// missing.
/// `VarMap` structures can be serialized in the safetensors format.
#[derive(Clone)]
pub struct VarMap {
data: Arc<Mutex<HashMap<String, Var>>>,
}
impl VarMap {
/// Create a new empty `VarMap`.
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
let data = Arc::new(Mutex::new(HashMap::new()));
Self { data }
}
/// Retrieve all the variables currently stored in the map.
pub fn all_vars(&self) -> Vec<Var> {
let tensor_data = self.data.lock().unwrap();
#[allow(clippy::map_clone)]
tensor_data.values().map(|c| c.clone()).collect::<Vec<_>>()
}
/// Save the map in the safetensors format.
pub fn save<P: AsRef<std::path::Path>>(&self, path: P) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
let data = tensor_data.iter().map(|(k, v)| (k, v.as_tensor()));
safetensors::tensor::serialize_to_file(data, &None, path.as_ref())?;
Ok(())
}
/// Load some values from a safetensors file and modify the existing variables to have these
/// values.
///
/// Note that values for variables that are currently not in the map are not kept.
pub fn load<P: AsRef<std::path::Path>>(&mut self, path: P) -> Result<()> {
let path = path.as_ref();
let data = unsafe { candle::safetensors::MmapedSafetensors::new(path)? };
let mut tensor_data = self.data.lock().unwrap();
for (name, var) in tensor_data.iter_mut() {
let data = data.load(name, var.device())?;
if let Err(err) = var.set(&data) {
candle::bail!("error setting {name} using data from {path:?}: {err}",)
}
}
Ok(())
}
/// Set a named variable to some value.
pub fn set_one<K: AsRef<str>, V: AsRef<Tensor>>(&mut self, name: K, value: V) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
let name = name.as_ref();
match tensor_data.get(name) {
None => candle::bail!("cannot find {name} in VarMap"),
Some(var) => {
if let Err(err) = var.set(value.as_ref()) {
candle::bail!("error setting {name}: {err}",)
}
}
}
Ok(())
}
/// Set some named variables to some values.
///
/// If an error is returned, some of the variables might have already been set to their new
/// values.
pub fn set<I: Iterator<Item = (K, V)>, K: AsRef<str>, V: AsRef<Tensor>>(
&mut self,
iter: I,
) -> Result<()> {
let tensor_data = self.data.lock().unwrap();
for (name, value) in iter {
let name = name.as_ref();
match tensor_data.get(name) {
None => candle::bail!("cannot find {name} in VarMap"),
Some(var) => {
if let Err(err) = var.set(value.as_ref()) {
candle::bail!("error setting {name}: {err}",)
}
}
}
}
Ok(())
}
/// Retrieve or add a new variable.
pub fn get<S: Into<Shape>>(
&self,
shape: S,
path: &str,
init: crate::Init,
dtype: DType,
device: &Device,
) -> Result<Tensor> {
let shape = shape.into();
let mut tensor_data = self.data.lock().unwrap();
if let Some(tensor) = tensor_data.get(path) {
let tensor_shape = tensor.shape();
if &shape != tensor_shape {
candle::bail!("shape mismatch on {path}: {shape:?} <> {tensor_shape:?}")
}
return Ok(tensor.as_tensor().clone());
}
let var = init.var(shape, dtype, device)?;
let tensor = var.as_tensor().clone();
tensor_data.insert(path.to_string(), var);
Ok(tensor)
}
pub fn data(&self) -> &Mutex<HashMap<String, Var>> {
&self.data
}
}
| 4 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/sequential.rs | //! Sequential Layer
//!
//! A sequential layer used to chain multiple layers and closures.
use candle::{Module, Result, Tensor};
/// A sequential layer combining multiple other layers.
pub struct Sequential {
layers: Vec<Box<dyn Module>>,
}
/// Creates a new empty sequential layer.
pub fn seq() -> Sequential {
Sequential { layers: vec![] }
}
impl Sequential {
/// The number of sub-layers embedded in this layer.
pub fn len(&self) -> i64 {
self.layers.len() as i64
}
/// Returns true if this layer does not have any sub-layer.
pub fn is_empty(&self) -> bool {
self.layers.is_empty()
}
}
impl Module for Sequential {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs)?
}
Ok(xs)
}
}
impl Sequential {
/// Appends a layer after all the current layers.
#[allow(clippy::should_implement_trait)]
pub fn add<M: Module + 'static>(mut self, layer: M) -> Self {
self.layers.push(Box::new(layer));
self
}
/// Appends a closure after all the current layers.
pub fn add_fn<F>(self, f: F) -> Self
where
F: 'static + Fn(&Tensor) -> Result<Tensor> + Send + Sync,
{
self.add(super::func(f))
}
/// Applies the forward pass and returns the output for each layer.
pub fn forward_all(&self, xs: &Tensor) -> Result<Vec<Tensor>> {
let mut vec = Vec::with_capacity(self.layers.len());
let mut xs = xs.clone();
for layer in self.layers.iter() {
xs = layer.forward(&xs)?;
vec.push(xs.clone())
}
Ok(vec)
}
}
| 5 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/layer_norm.rs | //! Layer Normalization.
//!
//! This layer applies Layer Normalization over a mini-batch of inputs as described in [`Layer
//! Normalization`]. The input is expected to have three dimensions: a batch dimension, a length,
//! and a hidden size, the normalization is applied over the last dimension.
//!
//! # Example
//!
//! ```rust
//! use candle::{Tensor, Device::Cpu, test_utils::to_vec3_round};
//! use candle_nn::{LayerNorm, Module};
//! # fn main() -> candle::Result<()> {
//!
//! let w = Tensor::new(&[1f32, 1f32, 1f32], &Cpu)?;
//! let b = Tensor::new(&[0f32, 0f32, 0f32], &Cpu)?;
//! let layer = LayerNorm::new(w, b, 1e-5);
//!
//! let xs = Tensor::new(
//! &[[[1f32, 2., 3.], [4., 5., 6.], [9., 8., 7.]]],
//! &Cpu)?;
//! let ys = layer.forward(&xs)?;
//! assert_eq!(
//! to_vec3_round(&ys, 4)?,
//! &[[[-1.2247, 0.0, 1.2247],
//! [-1.2247, 0.0, 1.2247],
//! [ 1.2247, 0.0, -1.2247]]]);
//! # Ok(()) }
//! ```
//!
//! [`Layer Normalization`]: https://arxiv.org/abs/1607.06450
use candle::{DType, Module, Result, Tensor, D};
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct LayerNormConfig {
pub eps: f64,
/// Whether to remove the mean or not, the default is true and when set to false, this turns
/// this layer into RmsNorm.
pub remove_mean: bool,
pub affine: bool,
}
impl Default for LayerNormConfig {
fn default() -> Self {
Self {
eps: 1e-5,
remove_mean: true,
affine: true,
}
}
}
impl From<f64> for LayerNormConfig {
fn from(eps: f64) -> Self {
Self {
eps,
remove_mean: true,
affine: true,
}
}
}
// This layer norm version handles both weight and bias so removes the mean.
#[derive(Clone, Debug)]
pub struct LayerNorm {
weight: Tensor,
bias: Option<Tensor>,
remove_mean: bool,
eps: f64,
}
impl LayerNorm {
pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self {
Self {
weight,
bias: Some(bias),
remove_mean: true,
eps,
}
}
pub fn new_no_bias(weight: Tensor, eps: f64) -> Self {
Self {
weight,
bias: None,
remove_mean: true,
eps,
}
}
pub fn rms_norm(weight: Tensor, eps: f64) -> Self {
Self {
weight,
bias: None,
remove_mean: false,
eps,
}
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn bias(&self) -> Option<&Tensor> {
self.bias.as_ref()
}
}
impl Module for LayerNorm {
fn forward(&self, x: &Tensor) -> Result<Tensor> {
if x.is_contiguous() && self.remove_mean {
if let Some(bias) = self.bias.as_ref() {
return crate::ops::layer_norm(x, &self.weight, bias, self.eps as f32);
}
}
let x_dtype = x.dtype();
let internal_dtype = match x_dtype {
DType::F16 | DType::BF16 => DType::F32,
d => d,
};
let hidden_size = x.dim(D::Minus1)?;
let x = x.to_dtype(internal_dtype)?;
let x = if self.remove_mean {
let mean_x = (x.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
x.broadcast_sub(&mean_x)?
} else {
x
};
let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?;
let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?;
let x = x_normed.to_dtype(x_dtype)?.broadcast_mul(&self.weight)?;
match &self.bias {
None => Ok(x),
Some(bias) => x.broadcast_add(bias),
}
}
}
pub fn layer_norm<C: Into<LayerNormConfig>>(
size: usize,
config: C,
vb: crate::VarBuilder,
) -> Result<LayerNorm> {
let config = config.into();
let weight = vb.get_with_hints(size, "weight", crate::Init::Const(1.))?;
let bias = if config.affine {
Some(vb.get_with_hints(size, "bias", crate::Init::Const(0.))?)
} else {
None
};
Ok(LayerNorm {
weight,
bias,
remove_mean: config.remove_mean,
eps: config.eps,
})
}
/// RmsNorm is a specialized version of the LayerNorm module.
#[derive(Clone, Debug)]
pub struct RmsNorm(LayerNorm);
impl RmsNorm {
pub fn new(weight: Tensor, eps: f64) -> Self {
Self(LayerNorm::rms_norm(weight, eps))
}
pub fn into_inner(self) -> LayerNorm {
self.0
}
/// Faster variant of the forward kernel, this can only be used on contiguous tensors though.
pub fn forward_diff(&self, xs: &Tensor) -> Result<Tensor> {
self.0.forward(xs)
}
}
impl Module for RmsNorm {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
if xs.is_contiguous() {
crate::ops::rms_norm(xs, &self.0.weight, self.0.eps as f32)
} else {
self.0.forward(xs)
}
}
}
pub fn rms_norm(size: usize, eps: f64, vb: crate::VarBuilder) -> Result<RmsNorm> {
let config = LayerNormConfig {
eps,
remove_mean: false,
affine: false,
};
Ok(RmsNorm(layer_norm(size, config, vb)?))
}
| 6 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/encoding.rs | //! Encoding Utilities. (e.g., one-hot/cold encoding)
use candle::{bail, DType, Result, Tensor, WithDType};
/// One-hot/cold encoding.
///
/// Given an input tensor of indices, this function returns a tensor of the same shape as the input
/// tensor with an additional dimension of the given depth size. The values in the returned tensor are
/// all set to the `off_value` except for the positions represented by the indices, which are set to the `on_value`.
///
/// This method returns a tensor with a rank that is one rank larger than the input tensor.
///
/// As an example, the following tensor will be encoded to a one-hot matrix:
///
/// `[[0i64, 2], [1, -1]]`
///
/// with a depth of 4 will be encoded to:
///
/// `[[[1, 0, 0, 0], [0, 0, 1, 0]], [[0, 1, 0, 0], [0, 0, 0, 0]]]`
///
/// When the input tensor index has a value of -1, the corresponding one-hot vector will be ignored,
/// resulting in a vector of values set to the `off_value`.
///
///
/// This method supports one-cold encoding by setting `on_value` to `0` and `off_value` to `1`.
/// By default `on_value` is `1` and `off_value` is `0`.
///
/// Other encoding values can be used by setting `on_value` and `off_value` to the desired values.
///
/// # Examples
///
/// ## One-hot encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
/// let device = candle::Device::Cpu;
///
/// let indices = Tensor::new(vec![vec![0i64, 2], vec![1, -1]], &device).unwrap();
/// let depth = 4;
/// let one_hot = one_hot(indices, depth, 1f32, 0f32).unwrap();
///
/// let expected_matrix = [
/// [[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]],
/// [[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
/// ];
///
/// assert_eq!(one_hot.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_hot.to_vec3::<f32>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
///```
/// ## One-cold Encoding
///
/// ```rust
/// use candle::{Shape, Tensor, Device};
/// use candle_nn::encoding::one_hot;
///
///
/// let device = candle::Device::Cpu;
/// let depth = 4;
/// let indices = Tensor::new(vec![vec![0u8, 2], vec![1, 3]], &device).unwrap();
/// let one_cold = one_hot(indices, depth, 0u8, 1u8).unwrap();
///
/// let expected_matrix = [[[0, 1, 1, 1], [1, 1, 0, 1]], [[1, 0, 1, 1], [1, 1, 1, 0]]];
///
/// assert_eq!(one_cold.shape(), &Shape::from((2, 2, depth)));
///
/// let matrix = one_cold.to_vec3::<u8>().unwrap();
///
/// assert_eq!(matrix, expected_matrix);
/// ```
///
///
/// # Bails
///
/// This method bails if:
/// - One of the index value is less than -1.
/// - One of the index value is greater than or equal to the depth value.
/// - The input data type is not `U8`, `U32`, or `I64`.
///
/// # API Design
///
/// The api design for this method is loosely based on the [TensorFlow One-Hot](https://www.tensorflow.org/api_docs/python/tf/one_hot) method.
pub fn one_hot<D: WithDType>(
indices: Tensor,
depth: usize,
on_value: D,
off_value: D,
) -> Result<Tensor> {
let mut target_shape = indices.dims().to_vec();
target_shape.push(depth);
let indices = indices.flatten_all()?;
let mut out = vec![off_value; depth * indices.elem_count()];
match indices.dtype() {
DType::U8 => {
let indices = indices.to_vec1::<u8>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::U32 => {
let indices = indices.to_vec1::<u32>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
DType::I64 => {
let indices = indices.to_vec1::<i64>()?;
for (i, &index) in indices.iter().enumerate() {
set_at_index(index, i * depth, depth, &mut out, on_value)?;
}
}
dtype => {
bail!("one_hot: unsupported data type {dtype:?}, expected U8, U32, or I64")
}
};
Tensor::from_vec(out, target_shape, indices.device())
}
fn set_at_index<D: WithDType, I: Into<i64>>(
value: I,
offset: usize,
depth: usize,
v: &mut [D],
on_value: D,
) -> Result<()> {
let value = value.into();
// Skip for an entire row of off_values
if value == -1 {
return Ok(());
}
if value < -1 {
bail!(
"one_hot: invalid negative index value {value}, expected a positive index value or -1"
);
}
let value = value as usize;
if value >= depth {
bail!("one_hot: index value {value} exceeds depth {depth}")
}
let idx = offset + value;
if idx >= v.len() {
bail!("one_hot: index out of bounds {idx}, len {}", v.len());
}
v[idx] = on_value;
Ok(())
}
| 7 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/src/activation.rs | //! Activation Functions
//!
use candle::{Result, Tensor};
use serde::Deserialize;
#[derive(Debug, Clone, Copy, PartialEq, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum Activation {
#[default]
#[serde(alias = "gelu")]
Gelu,
#[serde(alias = "gelu_new")]
NewGelu,
Relu,
Relu2,
Relu6,
Silu,
Sigmoid,
HardSigmoid,
Swiglu,
Swish,
HardSwish,
Elu(f64),
LeakyRelu(f64),
#[serde(alias = "gelu_pytorch_tanh")]
GeluPytorchTanh,
}
impl super::Module for Activation {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
match self {
Self::Gelu => xs.gelu_erf(),
// https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78
Self::NewGelu => xs.gelu(),
Self::Relu => xs.relu(),
Self::Relu2 => xs.relu()?.sqr(),
Self::Relu6 => xs.clamp(0f32, 6f32),
Self::Silu => xs.silu(),
Self::Sigmoid => crate::ops::sigmoid(xs),
Self::HardSigmoid => crate::ops::hard_sigmoid(xs),
Self::Swiglu => crate::ops::swiglu(xs),
Self::Swish => xs * crate::ops::sigmoid(xs)?,
Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?,
&Self::Elu(alpha) => xs.elu(alpha),
&Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope),
Self::GeluPytorchTanh => xs.gelu(),
}
}
}
#[derive(Clone, Debug)]
pub struct PReLU {
weight: Tensor,
is_scalar: bool,
}
impl PReLU {
pub fn new(weight: Tensor, is_scalar: bool) -> Self {
Self { weight, is_scalar }
}
pub fn weight(&self) -> &Tensor {
&self.weight
}
pub fn is_scalar(&self) -> bool {
self.is_scalar
}
}
impl candle::Module for PReLU {
fn forward(&self, xs: &Tensor) -> Result<Tensor> {
let weight = if self.is_scalar {
self.weight.reshape(())?
} else if xs.rank() >= 2 {
let num_channels = xs.dim(1)?;
let num_weights = self.weight.elem_count();
if num_weights != num_channels {
candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}")
}
let mut s = vec![1; xs.rank()];
s[1] = self.weight.elem_count();
self.weight.reshape(s)?
} else {
self.weight.clone()
};
let zeros = xs.zeros_like()?;
xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)?
}
}
/// Create or initialize a new PReLU layer.
///
/// This uses some default name for weights, namely `"weight"`.
/// # Arguments
///
/// * `num_channels` - The number of channels. Use `None` to have as single trainable value and
/// `Some` for a 1D vector with the appropriate number of channels. When applying the `forward`
/// function, the input tensor shape `s` should either be one dimension with this number of
/// channels or if `s.len() >= 2` it should have `s[1]` equal to this number.
pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> {
let init_ws = crate::init::Init::Const(0.25);
// When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1.
let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?;
Ok(PReLU::new(ws, num_channels.is_none()))
}
| 8 |
0 | hf_public_repos/candle/candle-nn | hf_public_repos/candle/candle-nn/benches/bench_main.rs | mod benchmarks;
use criterion::criterion_main;
criterion_main!(benchmarks::layer_norm::benches, benchmarks::conv::benches);
| 9 |
0 | hf_public_repos | hf_public_repos/blog/community-tools.md | ---
title: "Introducing Community Tools on HuggingChat"
thumbnail: /blog/assets/community-tools/thumbnail.png
authors:
- user: nsarrazin
---
<div align="center">
<video alt="demo.mp4" controls autoplay loop autobuffer muted playsinline>
<source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/demo.webm" type="video/webm">
</video>
</div>
Today we’re releasing our latest feature on HuggingChat: Community Tools! This lets you turn any Space that you love on HuggingFace into a tool that can be used by models directly from HuggingChat.
With this feature, we’re also expanding the modalities available in HuggingChat. You can now use community tools to understand images, generate videos, or answer with a text-to-speech model. The possibilities are endless and anyone can create tools using Spaces on Hugging Face! Explore existing tools [here](https://huggingface.co/chat/tools).
In this post we’re going to look at a few use cases for creating community tools:
1. [Turning a community Space into a tool](#turning-a-community-Space-into-a-tool)
2. [Creating a custom tool yourself](#creating-a-custom-tool-yourself)
3. [Enhance your assistants with community tools](#enhance-your-assistants-with-community-tools)
4. [Create a RAG tool on your own documents](#create-a-rag-tool-on-your-own-documents)
## Turning a community Space into a tool
You can turn anyone’s public Space into a tool. This is handy for using the latest models directly in HuggingChat. Let’s use [DamarJati/FLUX.1-RealismLora](https://huggingface.co/spaces/DamarJati/FLUX.1-RealismLora) as an example here.
Start by [creating a new tool](https://huggingface.co/chat/tools/new) and filling in the fields. As soon as you input the Space URL into the _Hugging Face Space URL_ field you’ll see the available functions and parameters get filled automatically.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-step-1.png"/>
</div>
There are some fields that need to be filled in correctly in order to ensure optimal tool performance.
- **Tool Description:** The description is passed to the LLM to explain what the tool can do. Keep it short and describe what the tool can be used for.
- **AI Function Name:** The tools are represented as code functions. This is your tool’s function name. Keep it short, unique and self-explanatory.
- **Arguments:** These are the tool parameters the LLM can fill-in. They can be:
- **Required:** The model must fill in a value to use this tool. This required the parameter to be described properly.
- **Optional:** There is a default value provided but the model can override it if needed.
- **Fixed:** The value is fixed when creating the tool and can’t be changed by the model.
You can always look at other tools’ definition to better understand how to create tools. [(example)](https://huggingface.co/chat/tools/000000000000000000000001/edit)
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-step-2.png"/>
</div>
Now that our tool is created, we can enable it and start using it with tools compatible models!
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-step-3.png"/>
</div>
## **Creating a custom tool yourself**
Using existing Spaces can cover a lot of use cases but if you can write basic python then you can just as easily create custom tools for yourself. Let’s do a simple example of a dice roll tool since LLMs are quite bad at picking random numbers by themselves.
Start by creating a [new Gradio Space](https://huggingface.co/new-space?sdk=gradio) on Hugging Face. The _CPU Basic_ free tier works fine for this. Your Space will have to be public in order to be used inside HuggingChat.
Create a simple [`app.py`](http://app.py) app inside your pace repo, in the case of our roll dice example that could be:
```python
import gradio as gr
import random
def roll_dice(sides=6):
return random.randint(1, sides)
demo = gr.Interface(
fn=roll_dice,
inputs=gr.Number(value=6, label="Number of Sides"),
outputs="text",
title="Dice Roller",
description="Enter the number of sides for the dice and get the roll result."
)
demo.launch()
```
If you’re not familiar with Gradio, it’s very easy to get started creating interfaces, you can find the docs [here](https://www.gradio.app/docs/gradio/interface).
> [!TIP]
> You can have multiple functions inside a single Space to make it easier to manage your tools.
Once you’re done, push the changes and when your Space is deployed you can create a community tool for it in HuggingChat just like we did previously.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-custom.png"/>
</div>
## Enhance your assistants with Community Tools
You can activate tools directly by going to [the tools page](https://huggingface.co/chat/tools) and picking the ones you like or you can also package tools in an assistant.
When [creating an assistant](https://huggingface.co/chat/settings/assistants/new) using a model that is compatible with tool calling you will now have the option to select tools. Search for the name of the tools and add up to 3 different tools. In our case, let’s create a Game Master assistant that has access to image generation and dice rolling tools.
> [!TIP]
> You can use the system instructions field to tell the model when to use the tools.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-assistant.png"/>
</div>
## Create a RAG tool on your own documents
To go along with the release we created a simple RAG tool that you can easily copy to ask questions about your documents directly from HuggingChat. Start by duplicating [this Space](https://huggingface.co/spaces/nsarrazin/rag-tool-template) into your own account. You can then drop files you want to be parsed in the `sources/` folder of that Space.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-rag-1.png"/>
</div>
Once the Space is started up you can easily create a tool out of it on HuggingChat like we’ve covered previously.
<div align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/community-tools/tools-rag-2.png"/>
</div>
### Share your feedback with us
With the release of Community Tools we hope you’ll be able to enhance your chats with multimodal content and custom tools. The feature is still experimental so if you see Spaces that are not supported or tools that don’t work, please share them with us in [the feedback thread](https://huggingface.co/spaces/huggingchat/chat-ui/discussions/569)!
| 0 |
0 | hf_public_repos | hf_public_repos/blog/collaborative-training.md | ---
title: "Deep Learning over the Internet: Training Language Models Collaboratively"
thumbnail: /blog/assets/24_sahajBERT/thumbnail.png
authors:
- user: mryab
guest: true
- user: SaulLu
---
# Deep Learning over the Internet: Training Language Models Collaboratively
<small>
With the additional help of Quentin Lhoest and Sylvain Lesage.
</small>
Modern language models often require a significant amount of compute for pretraining, making it impossible to obtain them without access to tens and hundreds of GPUs or TPUs. Though in theory it might be possible to combine the resources of multiple individuals, in practice, such distributed training methods have previously seen limited success because connection speeds over the Internet are way slower than in high-performance GPU supercomputers.
In this blog post, we describe [DeDLOC](https://arxiv.org/abs/2106.10207) — a new method for collaborative distributed training that can adapt itself to the network and hardware constraints of participants. We show that it can be successfully applied in real-world scenarios by pretraining [sahajBERT](https://huggingface.co/neuropark/sahajBERT), a model for the Bengali language, with 40 volunteers. On downstream tasks in Bengali, this model achieves nearly state-of-the-art quality with results comparable to much larger models that used hundreds of high-tier accelerators.
<div class="aspect-w-16 aspect-h-9">
<iframe
src="https://www.youtube.com/embed/v8ShbLasRF8"
frameborder="0"
allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
allowfullscreen></iframe>
</div>
## Distributed Deep Learning in Open Collaborations
### Why should we do it?
These days, many highest-quality NLP systems are based on large pretrained Transformers. In general, their quality improves with size: you can achieve unparalleled results in natural language understanding and generation by scaling up the parameter count and leveraging the abundance of unlabeled text data.
Unfortunately, we use these pretrained models not only because it's convenient. The hardware resources for training Transformers on large datasets often exceed anything affordable to a single person and even most commercial or research organizations. Take, for example, BERT: its training was estimated to cost about $7,000, and for the largest models like GPT-3, this number can be as high as $12 million! This resource limitation might seem obvious and inevitable, but is there really no alternative to using pretrained models for the broader ML community?
However, there might be a way out of this situation: to come up with a solution, we only need to take a look around. It might be the case that the computational resources we're looking for are already there; for example, many of us have powerful computers with gaming or workstation GPUs at home. You might've already guessed that we're going to join their power similarly to [Folding@home](https://foldingathome.org/), [Rosetta@home](https://boinc.bakerlab.org/), [Leela Chess Zero](https://lczero.org/) or different [BOINC](https://boinc.berkeley.edu/) projects that leverage volunteer computing, but the approach is even more general. For instance, several laboratories can join their smaller clusters to utilize all the available resources, and some might want to join the experiment using inexpensive cloud instances.
To a skeptical mind, it might seem that we're missing a key factor here: data transfer in distributed DL is often a bottleneck, since we need to aggregate the gradients from multiple workers. Indeed, any naïve approach to distributed training over the Internet is bound to fail, as most participants don't have gigabit connections and might disconnect from the network at any time. So how on Earth can you train anything with a household data plan? :)
As a solution to this problem, we propose a new training algorithm, called Distributed Deep Learning in Open Collaborations (or **DeDLOC**), which is described in detail in our recently released [preprint](https://arxiv.org/abs/2106.10207). Now, let’s find out what are the core ideas behind this algorithm!
### Training with volunteers
In its most frequently used version, distributed training with multiple GPUs is pretty straightforward. Recall that when doing deep learning, you usually compute gradients of your loss function averaged across many examples in a batch of training data. In case of _data-parallel_ distributed DL, you simply split the data across multiple workers, compute gradients separately, and then average them once the local batches are processed. When the average gradient is computed on all workers, we adjust the model weights with the optimizer and continue training our model. You can see an illustration of different tasks that are executed below.

<div style="line-height:105%;font-size:80%">
<p align="center">
Typical machine learning tasks executed by peers in distributed training, possibly with a separation of roles
</p>
</div>
Often, to reduce the amount of synchronization and to stabilize the learning process, we can accumulate the gradients for N batches before averaging, which is equivalent to increasing the actual batch size N times. This approach, combined with the observation that most state-of-the-art language models use large batches, led us to a simple idea: let's accumulate one _very_ large batch across all volunteer devices before each optimizer step! Along with complete equivalence to regular distributed training and easy scalability, this method also has the benefit of built-in fault tolerance, which we illustrate below.
Let's consider a couple of potential failure cases that we might encounter throughout a collaborative experiment. By far, the most frequent scenario is that one or several peers disconnect from the training procedure: they might have an unstable connection or simply want to use their GPUs for something else. In this case, we only suffer a minor setback of training: the contribution of these peers gets deducted from the currently accumulated batch size, but other participants will compensate for that with their gradients. Also, if more peers join, the target batch size will simply be reached faster, and our training procedure will naturally speed up. You can see a demonstration of this in the video:
<div class="aspect-w-16 aspect-h-9">
<iframe src="https://www.youtube.com/embed/zdVsg5zsGdc" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
</div>
### Adaptive averaging
Now that we have discussed the overall training procedure, there remains one more question: how do we actually aggregate the gradients of participants? Most home computers cannot easily accept incoming connections, and the download speed might also become a constraint.
Since we rely on volunteer hardware for experiments, a central server is not really a viable option, as it will quickly face overload when scaling to tens of clients and hundreds of millions of parameters. Most data-parallel training runs today don't use this strategy anyway; instead, they rely on All-Reduce — an efficient all-to-all communication primitive. Thanks to clever algorithmic optimizations, each node can compute the global average without sending the entire local gradient to every peer.
Because All-Reduce is decentralized, it seems like a good choice; however, we still need to take the diversity of hardware and network setups into account. For example, some volunteers might join from computers that have slow network but powerful GPUs, some might have better connectivity only to a subset of other peers, and some may be firewalled from incoming connections.
It turns out we can actually come up with an optimal data transfer strategy on the fly by leveraging this information about performance! On a high level, we split the entire gradient vector into parts depending on the Internet speed of each peer: those with the fastest connection aggregate the largest parts. Also, if some nodes do not accept incoming connections, they simply send their data for aggregation but do not compute the average themselves. Depending on the conditions, this adaptive algorithm can recover well-known distributed DL algorithms and improve on them with a hybrid strategy, as demonstrated below.

<div style="line-height:105%;font-size:80%">
<p align="center">
Examples of different averaging strategies with the adaptive algorithm.
</p>
</div>
<div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black">
<p align="center">
💡 The core techniques for decentralized training are available in <a href="https://github.com/learning-at-home/hivemind">Hivemind</a>.<br>
Check out the repo and learn how to use this library in your own projects!
</p>
</div><br>
## sahajBERT
As always, having a well-designed algorithmic framework doesn't mean that it will work as intended in practice, because some assumptions may not hold true in actual training runs. To verify the competitive performance of this technology and to showcase its potential, we organized a special collaborative event to pretrain a masked language model for the Bengali language. Even though it is the fifth most spoken native language in the world, it has [very few](https://huggingface.co/models?filter=bn&pipeline_tag=fill-mask) masked language models openly available, which emphasizes the importance of tools that can empower the community, unlocking a plethora of opportunities in the field.
We conducted this experiment with real volunteers from the Neuropark community and used openly available datasets (OSCAR and Wikipedia), because we wanted to have a fully reproducible example that might serve as an inspiration for other groups. Below, we describe the detailed setup of our training run and demonstrate its results.
### Architecture
For our experiment, we chose ALBERT _(A Lite BERT)_ — a model for language representations that is pretrained with Masked Language Modeling (MLM) and Sentence Order Prediction (SOP) as objectives. We use this architecture because weight sharing makes it very parameter-efficient: for example, ALBERT-large has ~18M trainable parameters and performs comparably to BERT-base with ~108M weights on the GLUE benchmark. It means that there is less data to exchange between the peers, which is crucial in our setup, as it significantly speeds up each training iteration.
<div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black">
<p align="center">
💡 Want to know more about ALBERT?<br>
<a href="https://arxiv.org/abs/1909.11942">Paper</a><br>
<a href="https://huggingface.co/transformers/model_doc/albert.html#albert"
>Transformers doc</a
>
</p>
</div>
### Tokenizer
The first brick of our model is called a _tokenizer_ and takes care of transforming raw text into vocabulary indices. Because we are training a model for Bengali, which is not very similar to English, we need to implement language-specific preprocessing as a part of our tokenizer. We can view it as a sequence of operations:
1. **Normalization:** includes all preprocessing operations on raw text data. This was the step at which we have made the most changes, because removing certain details can either change the meaning of the text or leave it the same, depending on the language. For example, the standard ALBERT normalizer removes the accents, while for the Bengali language, we need to keep them, because they contain information about the vowels. As a result, we use the following operations: NMT normalization, NFKC normalization, removal of multiple spaces, homogenization of recurring Unicode characters in the Bengali language, and lowercasing.
2. **Pretokenization** describes rules for splitting the input (for example, by whitespace) to enforce specific token boundaries. As in the original work, we have chosen to keep the whitespace out of the tokens. Therefore, to distinguish the words from each other and not to have multiple single-space tokens, each token corresponding to the beginning of a word starts with a special character “\_” (U+2581). In addition, we isolated all punctuation and digits from other characters to condense our vocabulary.
3. **Tokenizer modeling:** It is at this level that the text is mapped into a sequence of elements of a vocabulary. There are several algorithms for this, such as Byte-Pair Encoding (BPE) or Unigram, and most of them need to build the vocabulary from a text corpus. Following the setup of ALBERT, we used the **Unigram Language Model** approach, training a vocabulary of 32k tokens on the deduplicated Bengali part of the OSCAR dataset.
4. **Post-processing:** After tokenization, we might want to add several special tokens required by the architecture, such as starting the sequence with a special token `[CLS]` or separating two segments with a special token `[SEP]`. Since our main architecture is the same as the original ALBERT, we keep the same post-processing: specifically, we add a `[CLS]` token at the beginning of each example and a `[SEP]` token both between two segments and at the end.
<div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black">
<p align="center">
💡 Read more information about each component in
<a href="https://huggingface.co/docs/tokenizers/python/latest/components.html#components">Tokenizers doc</a>
</p>
</div>
You can reuse our tokenizer by running the following code:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("neuropark/sahajBERT")
```
### Dataset
The last thing we need to cover is the training dataset. As you probably know, the great strength of pretrained models like BERT or ALBERT is that you don't need an annotated dataset, but just a lot of texts. To train sahajBERT, we used the [Bengali Wikipedia dump from 03/20/2021](https://huggingface.co/datasets/lhoestq/wikipedia_bn) and the Bengali subset of [OSCAR](https://huggingface.co/datasets/oscar) (600MB + 6GB of text). These two datasets can easily be downloaded from the HF Hub.
However, loading an entire dataset requires time and storage — two things that our peers do not necessarily have. To make the most of the resources provided by the participants, we have implemented **dataset streaming**, which allows them to train the model nearly as soon as they join the network. Specifically, the examples in the dataset are downloaded and transformed in parallel to the training. We can also shuffle the dataset so that our peers have little chance to process the same examples at the same time. As the dataset is not downloaded and preprocessed in advance, the transformations needed to go from plain text to a training example (shown in the figure below) are done on the fly.

<div style="line-height:105%;font-size:80%">
<p align="center">
From a raw sample to a training sample
</p>
</div>
The dataset streaming mode is available from version v1.9 of the 🤗 datasets library, so you can use it right now as follows:
```python
from datasets import load_dataset
oscar_dataset = load_dataset("oscar", name="unshuffled_deduplicated_bn", streaming=True)
```
<div style="line-height:105%;border:1px solid #F5F5F5;background-color:#F5F5F5;color: black">
<p align="center">
💡 Learn more about loading datasets in streaming mode in the
<a href="https://huggingface.co/docs/datasets/dataset_streaming.html">documentation</a>
</p>
</div>
### Collaborative event
The sahajBERT collaborative training event took place from May 12 to May 21. The event brought together 40 participants, 30 of whom were Bengali-speaking volunteers, and 10 were volunteers from one of the authors' organizations. These 40 volunteers joined the [Neuropark](https://neuropark.co/) Discord channel to receive all information regarding the event and participate in discussions. To join the experiment, volunteers were asked to:
1. Send their username to the moderators to be allowlisted;
2. Open the provided notebook locally, on Google Colaboratory, or on Kaggle;
3. Run one code cell and fill in their Hugging Face credentials when requested;
4. Watch the training loss decrease on the shared dashboards!
For security purposes, we set up an authorization system so that only members of the Neuropark community could train the model. Sparing you the technical details, our authorization protocol allows us to guarantee that every participant is in the allowlist and to acknowledge the individual contribution of each peer.
In the following figure, you can see the activity of each volunteer. Over the experiment, the volunteers logged in 600 different sessions. Participants regularly launched multiple runs in parallel, and many of them spread out the runs they launched over time. The runs of individual participants lasted 4 hours on average, and the maximum length was 21 hours. You can read more about the participation statistics in the paper.
<iframe width="100%" height="670" frameborder="0"
src="https://observablehq.com/embed/@huggingface/sahajbert-bubbles-chart-optimized?cells=c_noaws%2Ct_noaws%2Cviewof+currentDate"></iframe>
<div style="line-height:105%;font-size:80%">
<p align="center">
Chart showing participants of the <a href="https://huggingface.co/neuropark/sahajBERT"> sahajBERT</a> experiment. Circle radius is relative to the total number of processed batches, the circle is greyed if the participant is not active. Every purple square represents an active device, darker color corresponds to higher performance
</p>
</div>
Along with the resources provided by participants, we also used 16 preemptible (cheap but frequently interrupted) single-GPU T4 cloud instances to ensure the stability of the run. The cumulative runtime for the experiment was 234 days, and in the figure below you can see parts of the loss curve that each peer contributed to!
<p align="center">
<iframe width="80%" height="950" frameborder="0"
src="https://observablehq.com/embed/@huggingface/explore-collaborative-training-data-optimized?cells=sessions%2Cviewof+participant%2ClossByParticipant"></iframe>
</p>
The final model was uploaded to the Model Hub, so you can download and play with it if you want to: [https://hf.co/neuropark/sahajBERT](https://huggingface.co/neuropark/sahajBERT)
### Evaluation
To evaluate the performance of sahajBERT, we finetuned it on two downstream tasks in Bengali:
- Named entity recognition (NER) on the Bengali split of [WikiANN](https://aclanthology.org/P17-1178/). The goal of this task is to classify each token in the input text into one of the following categories: person, organization, location, or none of them.
- News Category Classification (NCC) on the Soham articles dataset from [IndicGLUE](https://aclanthology.org/2020.findings-emnlp.445/). The goal of this task is to predict the category to which belong the input text.
We evaluated it during training on the NER task to check that everything was going well; as you can see on the following plot, this was indeed the case!
<iframe width="100%" height="476" frameborder="0"
src="https://observablehq.com/embed/@huggingface/bengali-exp-eval?cells=evalPlot"></iframe>
<div style="line-height:105%;font-size:80%">
<p align="center">
Evaluation metrics of fine-tuned models on the NER task from different checkpoints of pre-trained models.
</p>
</div>
At the end of training, we compared sahajBERT with three other pretrained language models: [XLM-R Large](https://arxiv.org/abs/1911.02116), [IndicBert](https://aclanthology.org/2020.findings-emnlp.445/), and [bnRoBERTa](https://huggingface.co/neuralspace-reverie/indic-transformers-bn-roberta). In the table below, you can see that our model has results comparable to the best Bengali language models available on HF Hub, even though our model has only ~18M trained parameters, while, for instance, XLM-R (a strong multilingual baseline), has ~559M parameters and was trained on several hundred V100 GPUs.
| Model | NER F1 (mean ± std) | NCC Accuracy (mean ± std) |
|:-------------:|:-------------:|:-------------:|
|[sahajBERT](https://huggingface.co/neuropark/sahajBERT) | 95.45 ± 0.53| 91.97 ± 0.47|
|[XLM-R-large](https://huggingface.co/xlm-roberta-large) | 96.48 ± 0.22| 90.05 ± 0.38|
|[IndicBert](https://huggingface.co/ai4bharat/indic-bert) | 92.52 ± 0.45| 74.46 ± 1.91|
|[bnRoBERTa](https://huggingface.co/neuralspace-reverie/indic-transformers-bn-roberta) |82.32 ± 0.67|80.94 ± 0.45|
These models are available on the Hub as well. You can test them directly by playing with the Hosted Inference API widget on their Model Cards or by loading them directly in your Python code.
#### sahajBERT-NER
Model card: [https://hf.co/neuropark/sahajBERT-NER](https://hf.co/neuropark/sahajBERT-NER)
```python
from transformers import (
AlbertForTokenClassification,
TokenClassificationPipeline,
PreTrainedTokenizerFast,
)
# Initialize tokenizer
tokenizer = PreTrainedTokenizerFast.from_pretrained("neuropark/sahajBERT-NER")
# Initialize model
model = AlbertForTokenClassification.from_pretrained("neuropark/sahajBERT-NER")
# Initialize pipeline
pipeline = TokenClassificationPipeline(tokenizer=tokenizer, model=model)
raw_text = "এই ইউনিয়নে ৩ টি মৌজা ও ১০ টি গ্রাম আছে ।" # Change me
output = pipeline(raw_text)
```
#### sahajBERT-NCC
Model card: [https://hf.co/neuropark/sahajBERT-NER](https://hf.co/neuropark/sahajBERT-NCC)
```python
from transformers import (
AlbertForSequenceClassification,
TextClassificationPipeline,
PreTrainedTokenizerFast,
)
# Initialize tokenizer
tokenizer = PreTrainedTokenizerFast.from_pretrained("neuropark/sahajBERT-NCC")
# Initialize model
model = AlbertForSequenceClassification.from_pretrained("neuropark/sahajBERT-NCC")
# Initialize pipeline
pipeline = TextClassificationPipeline(tokenizer=tokenizer, model=model)
raw_text = "এই ইউনিয়নে ৩ টি মৌজা ও ১০ টি গ্রাম আছে ।" # Change me
output = pipeline(raw_text)
```
## Conclusion
In this blog post, we have discussed the method that can enable collaborative pretraining of neural networks with sahajBERT as the first truly successful example of applying it to a real-world problem.
What does this all mean for the broader ML community? First, it is now possible to run large-scale distributed pretraining with your friends, and we hope to see a lot of cool new models that were previously less feasible to obtain. Also, our result might be important for multilingual NLP, since now the community for any language can train their own models without the need for significant computational resources concentrated in one place.
## Acknowledgements
The DeDLOC paper and sahajBERT training experiment were created by Michael Diskin, Alexey Bukhtiyarov, Max Ryabinin, Lucile Saulnier, Quentin Lhoest, Anton Sinitsin, Dmitry Popov, Dmitry Pyrkin, Maxim Kashirin, Alexander Borzunov, Albert Villanova del Moral, Denis Mazur, Ilia Kobelev, Yacine Jernite, Thomas Wolf, and Gennady Pekhimenko. This project is the result of a collaboration between
[Hugging Face](https://huggingface.co/), [Yandex Research](https://research.yandex.com/), [HSE University](https://www.hse.ru/en/), [MIPT](https://mipt.ru/english/), [University of Toronto](https://www.utoronto.ca/) and [Vector Institute](https://vectorinstitute.ai/).
In addition, we would like to thank Stas Bekman, Dmitry Abulkhanov, Roman Zhytar, Alexander Ploshkin, Vsevolod Plokhotnyuk and Roman Kail for their invaluable help with building the training infrastructure. Also, we thank Abhishek Thakur for helping with downstream evaluation and Tanmoy Sarkar with Omar Sanseviero, who helped us organize the collaborative experiment and gave regular status updates to the participants over the course of the training run.
Below, you can see all participants of the collaborative experiment:
<iframe width="100%" height="380" frameborder="0"
src="https://observablehq.com/embed/89470ece1dda817b?cells=humanParticipants"></iframe>
## References
"Distributed Deep Learning in Open Collaborations", [ArXiv](https://arxiv.org/abs/2106.10207)
Code for [sahajBERT experiments](https://github.com/yandex-research/DeDLOC/tree/main/sahajbert) in the DeDLOC repository.
| 1 |
0 | hf_public_repos | hf_public_repos/blog/intel-fast-embedding.md | ---
title: "CPU Optimized Embeddings with 🤗 Optimum Intel and fastRAG"
thumbnail: /blog/assets/optimum_intel/intel_thumbnail.png
authors:
- user: peterizsak
guest: true
- user: mber
guest: true
- user: danf
guest: true
- user: echarlaix
- user: mfuntowicz
- user: moshew
guest: true
---
# CPU Optimized Embeddings with 🤗 Optimum Intel and fastRAG
Embedding models are useful for many applications such as retrieval, reranking, clustering, and classification. The research community has witnessed significant advancements in recent years in embedding models, leading to substantial enhancements in all applications building on semantic representation. Models such as [BGE](https://huggingface.co/BAAI/bge-large-en-v1.5), [GTE](https://huggingface.co/thenlper/gte-small), and [E5](https://huggingface.co/intfloat/multilingual-e5-large) are placed at the top of the [MTEB](https://github.com/embeddings-benchmark/mteb) benchmark and in some cases outperform proprietary embedding services. There are a variety of model sizes found in Hugging Face's Model hub, from lightweight (100-350M parameters) to 7B models (such as [Salesforce/SFR-Embedding-Mistral](http://Salesforce/SFR-Embedding-Mistral)). The lightweight models based on an encoder architecture are ideal candidates for optimization and utilization on CPU backends running semantic search-based applications, such as Retrieval Augmented Generation ([RAG](https://en.wikipedia.org/wiki/Prompt_engineering#Retrieval-augmented_generation)).
In this blog, we will show how to unlock significant performance boost on Xeon based CPUs, and show how easy it is to integrate optimized models into existing RAG pipelines using [fastRAG](https://github.com/IntelLabs/fastRAG/).
## Information Retrieval with Embedding Models
Embedding models encode textual data into dense vectors, capturing semantic and contextual meaning. This enables accurate information retrieval by representing word and document relationships more contextually. Typically, semantic similarity will be measured by cosine similarity between the embedding vectors.
Should dense vectors always be used for information retrieval? The two dominant approaches have trade-offs:
* Sparse retrieval matches n-grams, phrases, or metadata to search large collections efficiently and at scale. However, it may miss relevant documents due to lexical gaps between the query and the document.
* Semantic retrieval encodes text into dense vectors, capturing context and meaning better than bag-of-words. It can retrieve semantically related documents despite lexical mismatches. However, it's computationally intensive, has higher latency, and requires sophisticated encoding models compared to lexical matching like BM25.
### Embedding models and RAG
Embedding models serve multiple and critical purposes in RAG applications:
* Offline Process: Encoding documents into dense vectors during indexing/updating of the retrieval document store (index).
* Query Encoding: At query time, they encode the input query into a dense vector representation for retrieval.
* Reranking: After initial retrieval, they can rerank the retrieved documents by encoding them into dense vectors and comparing against the query vector. This allows reranking documents that initially lacked dense representations.
Optimizing the embedding model component in RAG pipelines is highly desirable for a higher efficiency experience, more particularly:
* Document Indexing/Updating: Higher throughput allows encoding and indexing large document collections more rapidly during initial setup or periodic updates.
* Query Encoding: Lower query encoding latency is critical for responsive real-time retrieval. Higher throughput supports encoding many concurrent queries efficiently, enabling scalability.
* Reranking Retrieved Documents: After initial retrieval, embedding models need to quickly encode the retrieved candidates for reranking. Lower latency allows rapid reranking of documents for time-sensitive applications. Higher throughput supports reranking larger candidate sets in parallel for more comprehensive reranking.
## Optimizing Embedding Models with Optimum Intel and IPEX
[Optimum Intel](https://github.com/huggingface/optimum-intel) is an open-source library that accelerates end-to-end pipelines built with Hugging Face libraries on Intel Hardware. Optimum Intel includes several techniques to accelerate models such as low-bit quantization, model weight pruning, distillation, and an accelerated runtime.
The runtime and optimizations included in [Optimum Intel](https://github.com/huggingface/optimum-intel) take advantage of Intel® Advanced Vector Extensions 512 (Intel® AVX-512), Vector Neural Network Instructions (VNNI) and Intel® Advanced Matrix Extensions (Intel® AMX) on Intel CPUs to accelerate models. Specifically, it has built-in [BFloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format) (`bf16`) and `int8` GEMM accelerators in every core to accelerate deep learning training and inference workloads. AMX accelerated inference is introduced in PyTorch 2.0 and [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch) (IPEX) in addition to other optimizations for various common operators.
Optimizing pre-trained models can be done easily with Optimum Intel; many simple examples can be found [here](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc).
## Example: Optimizing BGE Embedding Models
In this blog, we focus on recently released embedding models by researchers at the [Beijing Academy of Artificial Intelligence](https://arxiv.org/pdf/2309.07597.pdf), as their models show competitive results on the widely adopted [MTEB](https://github.com/embeddings-benchmark/mteb) leaderboard.
### BGE Technical Details
Bi-encoder models are Transformer-based encoders trained to minimize a similarity metric, such as cosine-similarity, between two semantically similar texts as vectors. For example, popular embedding models use a BERT model as a base pre-trained model and fine-tune it for embedding documents. The vector representing the encoded text is created from the model outputs; for example, it can be the [CLS] token vector or a mean of all the token vectors.
Unlike more complex embedding architectures, bi-encoders encode only single documents, thus they lack contextual interaction between encoded entities such as query-document and document-document. However, state-of-the-art bi-encoder embedding models present competitive performance and are extremely fast due to their simple architecture.
We focus on 3 BGE models: [small](https://huggingface.co/BAAI/bge-small-en-v1.5), [base](https://huggingface.co/BAAI/bge-base-en-v1.5), and [large](https://huggingface.co/BAAI/bge-large-en-v1.5) consisting of 45M, 110M, and 355M parameters encoding to 384/768/1024 sized embedding vectors, respectively.
We note that the optimization process we showcase below is generic and can be applied to other embedding models (including bi-encoders, cross-encoders, and such).
### Step-by-step: Optimization by Quantization
We present a step-by-step guide for enhancing the performance of embedding models, focusing on reducing latency (with a batch size of 1) and increasing throughput (measured in documents encoded per second). This recipe utilizes `optimum-intel` and [Intel Neural Compressor](https://github.com/intel/neural-compressor) to quantize the model, and uses [IPEX](https://github.com/intel/intel-extension-for-pytorch) for optimized runtime on Intel-based hardware.
##### Step 1: Installing Packages
To install `optimum-intel` and `intel-extension-for-transformers` run the following command:
```bash
pip install -U optimum[neural-compressor] intel-extension-for-transformers
```
##### Step 2: Post-training Static Quantization
Post-training static quantization requires a calibration set to determine the dynamic range of weights and activations. The calibration is done by running a representative set of data samples through the model, collecting statistics, and then quantizing the model based on the gathered info to minimize the accuracy loss.
The following snippet shows a code snippet for quantization:
```python
def quantize(model_name: str, output_path: str, calibration_set: "datasets.Dataset"):
model = AutoModel.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
def preprocess_function(examples):
return tokenizer(examples["text"], padding="max_length", max_length=512, truncation=True)
vectorized_ds = calibration_set.map(preprocess_function, num_proc=10)
vectorized_ds = vectorized_ds.remove_columns(["text"])
quantizer = INCQuantizer.from_pretrained(model)
quantization_config = PostTrainingQuantConfig(approach="static", backend="ipex", domain="nlp")
quantizer.quantize(
quantization_config=quantization_config,
calibration_dataset=vectorized_ds,
save_directory=output_path,
batch_size=1,
)
tokenizer.save_pretrained(output_path)
```
In our calibration process we use a subset of the [qasper](https://huggingface.co/datasets/allenai/qasper) dataset.
##### Step 3: Loading and running inference
Loading a quantized model can be done by simply running:
```python
from optimum.intel import IPEXModel
model = IPEXModel.from_pretrained("Intel/bge-small-en-v1.5-rag-int8-static")
```
Encoding sentences into vectors can be done similarly to what we are used to with the [Transformers](https://github.com/huggingface/transformers) library:
```python
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("Intel/bge-small-en-v1.5-rag-int8-static")
inputs = tokenizer(sentences, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
# get the [CLS] token
embeddings = outputs[0][:, 0]
```
We provide additional and important details on how to configure the CPU-backend setup in the evaluation section below (correct machine setup).
### Model Evaluation with MTEB
Quantizing the models' weights to a lower precision introduces accuracy loss, as we lose precision moving from `fp32` weights to `int8`. Therefore, we aim to validate the accuracy of the optimized models by comparing them to the original models with two [MTEB](https://github.com/embeddings-benchmark/mteb) tasks:
- **Retrieval** - where a corpus is encoded and ranked lists are created by searching the index given a query.
- **Reranking** - reranking the retrieval's results for better relevance given a query.
The table below shows the average accuracy (on multiple datasets) of each task type (MAP for Reranking, NDCG@10 for Retrieval), where `int8` is our quantized model and `fp32` is the original model (results taken from the official MTEB leaderboard). The quantized models show less than 1% error rate compared to the original model in the Reranking task and less than 1.55% in the Retrieval task.
<table>
<tr><th> </th><th> Reranking </th><th> Retrieval </th></tr>
<tr><td>
| |
| --------- |
| BGE-small |
| BGE-base |
| BGE-large |
</td><td>
| int8 | fp32 | diff |
| ------ | ------ | ------ |
| 0.5826 | 0.5836 | -0.17% |
| 0.5886 | 0.5886 | 0% |
| 0.5985 | 0.6003 | -0.3% |
</td><td>
| int8 | fp32 | diff |
| ------ | ------ | ------ |
| 0.5138 | 0.5168 | -0.58% |
| 0.5242 | 0.5325 | -1.55% |
| 0.5346 | 0.5429 | -1.53% |
</td></tr> </table>
### Speed and Latency
We compare the performance of our models with two other common methods of usage of models:
1. Using PyTorch and Huggingface's Transformers library with `bf16`.
2. Using [Intel extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/#introduction) (IPEX) runtime with `bf16` and tracing the model using torchscript.
Experimental setup notes:
- Hardware (CPU): 4th gen Intel Xeon 8480+ with 2 sockets, 56 cores per socket.
- The Pytorch model was evaluated with 56 cores on 1 CPU socket.
- IPEX/Optimum setups were evaluated with ipexrun, 1 CPU socket, and cores ranging from 22-56.
- TCMalloc was installed and defined as an environment variable in all runs.
### How did we run the evaluation?
We created a script that generated random examples using the vocabulary of the model. We loaded the original model and the optimized model and compared how much time it takes to encode those examples in the two scenarios we mentioned above: latency when encoding in batch size 1, and throughput using batched example encoding.
1. Baseline PyTorch and Hugging Face:
```python
import torch
from transformers import AutoModel
model = AutoModel.from_pretrained("BAAI/bge-small-en-v1.5")
@torch.inference_mode()
def encode_text():
outputs = model(inputs)
with torch.cpu.amp.autocast(dtype=torch.bfloat16):
encode_text()
```
2. IPEX torchscript and `bf16`:
```python
import torch
from transformers import AutoModel
import intel_extension_for_pytorch as ipex
model = AutoModel.from_pretrained("BAAI/bge-small-en-v1.5")
model = ipex.optimize(model, dtype=torch.bfloat16)
vocab_size = model.config.vocab_size
batch_size = 1
seq_length = 512
d = torch.randint(vocab_size, size=[batch_size, seq_length])
model = torch.jit.trace(model, (d,), check_trace=False, strict=False)
model = torch.jit.freeze(model)
@torch.inference_mode()
def encode_text():
outputs = model(inputs)
with torch.cpu.amp.autocast(dtype=torch.bfloat16):
encode_text()
```
3. Optimum Intel with IPEX and `int8` model:
```python
import torch
from optimum.intel import IPEXModel
model = IPEXModel.from_pretrained("Intel/bge-small-en-v1.5-rag-int8-static")
@torch.inference_mode()
def encode_text():
outputs = model(inputs)
encode_text()
```
### Latency performance
In this evaluation, we aim to measure how fast the models respond. This is an example use case for encoding queries in RAG pipelines. In this evaluation we set the batch size to 1 and we measure latency of different document lengths.
We can see that the quantized model has the best latency overall, under 10 ms for the small and base models and <20 ms for the large model. Compared to the original model, the quantized model shows up to 4.5x speedup in latency.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/178_intel_ipex_quantization/latency.png" alt="latency" style="width: 90%; height: auto;"><br>
<em>Figure 1. Latency for BGE models.</em>
</p>
### Throughput Performance
In our throughput evaluation, we aim to search for peak encoding performance in terms of documents per second. We set text lengths to be 256 tokens longs, as it is a good estimate of an average document in a RAG pipeline, and evaluate with different batch sizes (4, 8, 16, 32, 64, 128, 256).
Results show that the quantized models reach higher throughput values compared to the other models, and reach peak throughput at batch size 128. Overall, for all model sizes, the quantized model shows up to 4x improvement compared to the baseline `bf16` model in various batch sizes.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/178_intel_ipex_quantization/throughput_small.png" alt="throughput small" style="width: 60%; height: auto;"><br>
<em>Figure 2. Throughput for BGE small.</em>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/178_intel_ipex_quantization/throughput_base.png" alt="throughput base" style="width: 60%; height: auto;"><br>
<em>Figure 3. Throughput for BGE base.</em>
</p>
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/178_intel_ipex_quantization/throughput_large.png" alt="throughput large" style="width: 60%; height: auto;"><br>
<em>Figure 4. Throughput for BGE large.</em>
</p>
## Optimized Embedding Models with fastRAG
<p align="center">
<a href="https://github.com/IntelLabs/fastRAG/">
<img src="https://github.com/IntelLabs/fastRAG/raw/main/assets/fastrag_header.png" style="float: right; width: 40%; height: auto; padding: 10">
</a>
</p>
As an example, we will demonstrate how to integrate the optimized Retrieval/Reranking models into [fastRAG](https://github.com/IntelLabs/fastRAG) (which can also be easily integrated into other RAG frameworks such as Langchain and LlamaIndex).
[fastRAG](https://github.com/IntelLabs/fastRAG) is a research framework, developed by [Intel Labs](https://www.intel.com/content/www/us/en/research/overview.html), for efficient and optimized retrieval augmented generative pipelines, incorporating state-of-the-art LLMs and Information Retrieval. fastRAG is fully compatible with [Haystack](https://haystack.deepset.ai/) and includes novel and efficient RAG modules for efficient deployment on Intel hardware.
To get started with fastRAG we invite readers to see the installation instructions [here](https://github.com/IntelLabs/fastRAG#round_pushpin-installation) and get started with fastRAG using our [guide](https://github.com/IntelLabs/fastRAG/blob/main/getting_started.md).
We integrated the optimized bi-encoder embedding models in two modules:
1. [`QuantizedBiEncoderRetriever`](https://github.com/IntelLabs/fastRAG/blob/main/fastrag/retrievers/optimized.py#L17) – for indexing and retrieving documents from a dense index
2. [`QuantizedBiEncoderRanker`](https://github.com/IntelLabs/fastRAG/blob/main/fastrag/rankers/quantized_bi_encoder.py#L17) – for reranking a list of documents using the embedding model as part of an elaborate retrieval pipeline.
### Fast indexing using the optimized Retriever
Let's create a dense index by using a dense retriever that utilizes an optimized embedding model.
First, create a document store:
```python
from haystack.document_store import InMemoryDocumentStore
document_store = InMemoryDocumentStore(use_gpu=False, use_bm25=False, embedding_dim=384, return_embedding=True)
```
Then, add some documents to it:
```python
from haystack.schema import Document
# example documents to index
examples = [
"There is a blue house on Oxford Street.",
"Paris is the capital of France.",
"The first commit in fastRAG was in 2022"
]
documents = []
for i, d in enumerate(examples):
documents.append(Document(content=d, id=i))
document_store.write_documents(documents)
```
Load a Retriever with an optimized bi-encoder embedding model, and encode all the documents in the document store:
```python
from fastrag.retrievers import QuantizedBiEncoderRetriever
model_id = "Intel/bge-small-en-v1.5-rag-int8-static"
retriever = QuantizedBiEncoderRetriever(document_store=document_store, embedding_model=model_id)
document_store.update_embeddings(retriever=retriever)
```
### Reranking using the Optimized Ranker
Below is an example of loading an optimized model into a ranker node that encodes and re-ranks all the documents it retrieves from an index given a query:
```python
from haystack import Pipeline
from fastrag.rankers import QuantizedBiEncoderRanker
ranker = QuantizedBiEncoderRanker("Intel/bge-large-en-v1.5-rag-int8-static")
p = Pipeline()
p.add_node(component=retriever, name="retriever", inputs=["Query"])
p.add_node(component=ranker, name="ranker", inputs=["retriever"])
results = p.run(query="What is the capital of France?")
# print the documents retrieved
print(results)
```
Done! The created pipeline can be used to retrieve documents from a document store and rank the retrieved documents using (another) embedding models to re-order the documents.
A more complete example is provided in this [notebook](https://github.com/IntelLabs/fastRAG/blob/main/examples/optimized-embeddings.ipynb).
For more RAG-related methods, models and examples we invite the readers to explore [fastRAG/examples](https://github.com/IntelLabs/fastRAG/tree/main/examples) notebooks.
| 2 |
0 | hf_public_repos | hf_public_repos/blog/fetch-eap-case-study.md | ---
title: "Fetch Consolidates AI Tools and Saves 30% Development Time with Hugging Face on AWS"
thumbnail: /blog/assets/78_ml_director_insights/fetch2.png
authors:
- user: Violette
---
# Fetch Consolidates AI Tools and Saves 30% Development Time with Hugging Face on AWS
_If you need support in using Hugging Face and AWS, please get in touch with us [here](https://huggingface.co/contact/sales?from=support) - our team will contact you to discuss your requirements!_
## Executive Summary
Fetch, a consumer rewards company, developed about 15 different AI tools to help it receive, route, read, process, analyze, and store receipts uploaded by users. The company has more than 18 million active monthly users for its shopping rewards app. Fetch wanted to rebuild its AI-powered platform and, using Amazon Web Services (AWS) and with the support of AWS Partner Hugging Face, moved from using third-party applications to developing its own tools to gain better insights about customers. Consumers scan receipts —or forward electronic receipts— to receive rewards points for their purchases. Businesses can offer special rewards to users, such as extra points for purchasing a particular product. The company can now process more than 11 million receipts per day faster and gets better data.
## Fetch Needed a Scalable Way to Train AI Faster
[Fetch](https://fetch.com/)—formerly Fetch Rewards—has grown since its founding to serve 18 million active users every month who scan 11 million receipts every day to earn reward points. Users simply take a picture of their receipt and upload it using the company’s app. Users can also upload electronic receipts. Receipts earn points; if the receipt is from a brand partner of Fetch, it may qualify for promotions that award additional points. Those points can be redeemed for gift cards from a number of partners. But scanning is just the beginning. Once Fetch receives the receipts, it must process them, extracting data and analytics and filing the data and the receipts. It has been using artificial intelligence (AI) tools running on AWS to do that.
The company was using an AI solution from a third party to process receipts, but found it wasn’t getting the data insights it needed. Fetch’s business partners wanted information about how customers were engaging with their promotions, and Fetch didn’t have the granularity it needed to extract and process data from millions of receipts daily. “Fetch was using a third-party provider for its brain, which is scanning receipts, but scanning is not enough,” says Boris Kogan, computer vision scientist at Fetch. “That solution was a black box and we had no control or insight into what it did. We just got results we had to accept. We couldn’t give our business partners the information they wanted.”
Kogan joined Fetch tasked with the job of building thorough machine learning (ML) and AI expertise into the company and giving it full access to all aspects of the data it was receiving. To do this, he hired a team of engineers to bring his vision to life. “All of our infrastructure runs on AWS, we also rely on the AWS products to train our models,” says Kogan. “When the team started working on creating a brain of our own, of course, we first had to train our models and we did that on AWS. We allocated 12 months for the project and completed it in 8 month because we always had the resources we needed.”
## Hugging Face Opens Up the Black Box
The Fetch team engaged with [AWS Partner](https://partners.amazonaws.com/partners/0010h00001jBrjVAAS/Hugging%20Face%20Inc) [Hugging Face](https://huggingface.co/) through the [Hugging Face Expert Acceleration Program](https://aws.amazon.com/marketplace/pp/prodview-z6gp22wkcvdt2/) on the AWS Marketplace to help Fetch unlock new tools to power processes after the scans had been uploaded. Hugging Face is a leader in open-source AI and provides guidance to enterprises on using AI. Many enterprises, including Fetch, use transformers from Hugging Face, which allow users to train and deploy open-source ML models in minutes. “Easy access to [Transformers](https://huggingface.co/docs/transformers/index) models is something that started with Hugging Face, and they're great at that,” says Kogan. The Fetch and Hugging Face teams worked to identify and train state-of-the-art document AI models, improving entity resolution and semantic search.
In this relationship, Hugging Face acted in an advisory capacity, transferring knowledge to help the Fetch engineers use its resources more effectively. “Fetch had a great team in place,” says Yifeng Yin, machine learning engineer at Hugging Face. “They didn't need us to come in and run the project or build it. They wanted to learn how to use Hugging Face to train the models they were building. We showed them how to use the resources, and they ran with it.” With Yifeng’s guidance, Fetch was able to cut its development time by 30 percent.
Because it was building its own AI and ML models to take over from the third-party ‘brain’, it needed to ensure a robust system that produced good results before switching over. Fetch required doing this without interrupting the flow of millions of receipts every day. “Before we rolled anything out, we built a shadow pipeline,” says Sam Corzine, lead machine learning engineer at Fetch. “We took all the things and reprocessed them in our new ML pipeline. We could do audits of everything. It was running full volume, reprocessing all of those 11 million receipts and doing analytics on them for quite a while before anything made it into the main data fields. The black box was still running the show and we were checking our results against it.” The solution uses [Amazon SageMaker](https://aws.amazon.com/sagemaker/)—which lets businesses build, train, and deploy ML models for any use case with fully managed infrastructure, tools, and workflows. It also uses [AWS Inferentia](https://aws.amazon.com/machine-learning/inferentia/) accelerators to deliver high performance at the lowest cost for deep learning (DL) inference applications.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/llama2-non-engineers/fetch3.jpg"><br>
</p>
## Fetch Grows AI Expertise, Cuts Latency by 50%, and Saves Costs
Fetch’s commitment to developing in-house ML and AI capabilities has resulted in several benefits, including some cost savings, but more important is the development of a service that better serves the needs of the customers. “With any app you have to give the customer a reason to keep coming back,” says Corzine. “We’ve improved responsiveness for customers with faster processing of uploads, cutting processing latency by 50 percent. If you keep customers waiting too long, they’ll disengage. And the more customers use Fetch, the better understanding we and our partners get about what’s important to them. By building our own models, we get details we never had before.”
The company can now train a model in hours instead of the days or weeks it used to take. Development time has also been reduced by about 30 percent. And while it may not be possible to put a number to it, another major benefit has been creating a more stable foundation for Fetch. “Relying on a third-party black box presented considerable business risk to us,” says Corzine. “Because Hugging Face existed and its community existed, we were able to use that tooling and work with that community. At the end of the day, we now control our destiny.”
Fetch is continuing to improve the service to customers and gain a better understanding of customer behavior now that it is an AI-first company, rather than a company that uses a third-party AI ‘brain’. “Hugging Face and AWS gave us the infrastructure and the resources to do what we need,” says Kogan. “Hugging Face has democratized transformer models, models that were nearly impossible to train, and made them available to anyone. We couldn’t have done this without them.”
_This article is a cross-post from an originally published post on February 2024 [on AWS's website](https://aws.amazon.com/fr/partners/success/fetch-hugging-face/)._
| 3 |
0 | hf_public_repos | hf_public_repos/blog/tf-xla-generate.md | ---
title: 'Faster Text Generation with TensorFlow and XLA'
thumbnail: /blog/assets/91_tf_xla_generate/thumbnail.png
authors:
- user: joaogante
---
# Faster Text Generation with TensorFlow and XLA
<em>TL;DR</em>: Text Generation on 🤗 `transformers` using TensorFlow can now be compiled with XLA. It is up to 100x
faster than before, and [even faster than PyTorch](https://huggingface.co/spaces/joaogante/tf_xla_generate_benchmarks)
-- check the colab below!
<a target="_blank" href="https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
## Text Generation
As the quality of large language models increased, so did our expectations of what those models could do. Especially
since the release of OpenAI's [GPT-2](https://openai.com/blog/better-language-models/), models with text
generation capabilities have been in the spotlight. And for legitimate reasons -- these models can be used to
summarize, translate, and they even have demonstrated zero-shot learning capabilities on some language tasks.
This blog post will show how to take the most of this technology with TensorFlow.
The 🤗 `transformers` library started with NLP models, so it is natural that text generation is of utmost
importance to us.
It is part of Hugging Face democratization efforts to ensure it is accessible, easily controllable, and efficient.
There is a previous [blog post](https://huggingface.co/blog/how-to-generate) about the different types of text
generation. Nevertheless, below there's a quick recap of the core functionality -- feel free to
[skip it](#tensorflow-and-xla) if you're
familiar with our `generate` function and want to jump straight into TensorFlow's specificities.
Let's start with the basics. Text generation can be deterministic or stochastic, depending on the
`do_sample` flag. By default it's set to `False`, causing the output to be deterministic, which is also known as
Greedy Decoding.
When it's set to `True`, also known as Sampling, the output will be stochastic, but you can still
obtain reproducible results through the `seed` argument (with the same format as in [stateless TensorFlow random
number generation](https://www.tensorflow.org/api_docs/python/tf/random/stateless_categorical#args)).
As a rule of thumb, you want deterministic generation if you wish
to obtain factual information from the model and stochastic generation if you're aiming at more creative outputs.
```python
# Requires transformers >= 4.21.0;
# Sampling outputs may differ, depending on your hardware.
from transformers import AutoTokenizer, TFAutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = TFAutoModelForCausalLM.from_pretrained("gpt2")
model.config.pad_token_id = model.config.eos_token_id
inputs = tokenizer(["TensorFlow is"], return_tensors="tf")
generated = model.generate(**inputs, do_sample=True, seed=(42, 0))
print("Sampling output: ", tokenizer.decode(generated[0]))
# > Sampling output: TensorFlow is a great learning platform for learning about
# data structure and structure in data science..
```
Depending on the target application, longer outputs might be desirable. You can control the length of the generation
output with `max_new_tokens`, keeping in mind that longer generations will require more resources.
```python
generated = model.generate(
**inputs, do_sample=True, seed=(42, 0), max_new_tokens=5
)
print("Limiting to 5 new tokens:", tokenizer.decode(generated[0]))
# > Limiting to 5 new tokens: TensorFlow is a great learning platform for
generated = model.generate(
**inputs, do_sample=True, seed=(42, 0), max_new_tokens=30
)
print("Limiting to 30 new tokens:", tokenizer.decode(generated[0]))
# > Limiting to 30 new tokens: TensorFlow is a great learning platform for
# learning about data structure and structure in data science................
```
Sampling has a few knobs you can play with to control randomness. The most important is `temperature`, which sets the overall entropy
of your output -- values below `1.0` will prioritize sampling tokens with a higher likelihood, whereas values above `1.0`
do the opposite. Setting it to `0.0` reduces the behavior to Greedy Decoding, whereas very large values approximate
uniform sampling.
```python
generated = model.generate(
**inputs, do_sample=True, seed=(42, 0), temperature=0.7
)
print("Temperature 0.7: ", tokenizer.decode(generated[0]))
# > Temperature 0.7: TensorFlow is a great way to do things like this........
generated = model.generate(
**inputs, do_sample=True, seed=(42, 0), temperature=1.5
)
print("Temperature 1.5: ", tokenizer.decode(generated[0]))
# > Temperature 1.5: TensorFlow is being developed for both Cython and Bamboo.
# On Bamboo...
```
Contrarily to Sampling, Greedy Decoding will always pick the most likely token at each iteration of generation.
However, it often results in sub-optimal outputs. You can increase the quality of the results through the `num_beams`
argument. When it is larger than `1`, it triggers Beam Search, which continuously explores high-probability sequences.
This exploration comes at the cost of additional resources and computational time.
```python
generated = model.generate(**inputs, num_beams=2)
print("Beam Search output:", tokenizer.decode(generated[0]))
# > Beam Search output: TensorFlow is an open-source, open-source,
# distributed-source application framework for the
```
Finally, when running Sampling or Beam Search, you can use `num_return_sequences` to return several sequences. For
Sampling it is equivalent to running generate multiple times from the same input prompt, while for Beam Search it
returns the highest scoring generated beams in descending order.
```python
generated = model.generate(**inputs, num_beams=2, num_return_sequences=2)
print(
"All generated hypotheses:",
"\n".join(tokenizer.decode(out) for out in generated)
)
# > All generated hypotheses: TensorFlow is an open-source, open-source,
# distributed-source application framework for the
# > TensorFlow is an open-source, open-source, distributed-source application
# framework that allows
```
The basics of text generation, as you can see, are straightforward to control. However, there are many options
not covered in the examples above, and it's encouraged to read the
[documentation](https://huggingface.co/docs/transformers/main/en/main_classes/text_generation#transformers.generation_tf_utils.TFGenerationMixin.generate)
for advanced use cases.
Sadly, when you run `generate` with TensorFlow, you might notice that it takes a while to execute.
If your target application expects low latency or a large amount of input prompts, running text generation with
TensorFlow looks like an expensive endeavour. 😬
Fear not, for the remainder of this blog post aims to demonstrate that one line of code can make a drastic improvement.
If you'd rather jump straight into action,
[the colab](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb)
has an interactive example you can fiddle with!
## TensorFlow and XLA
[XLA](https://www.tensorflow.org/xla), or Accelerated Linear Algebra, is a compiler originally developed to accelerate
TensorFlow models. Nowadays, it is also the compiler behind [JAX](https://github.com/google/jax), and it can even
be [used with PyTorch](https://huggingface.co/blog/pytorch-xla). Although the word "compiler" might sound daunting for
some, XLA is simple to use with TensorFlow -- it comes packaged inside the `tensorflow` library, and it can be
triggered with the `jit_compile` argument in any graph-creating function.
For those of you familiar with TensorFlow 1 🧓, the concept of a TensorFlow graph comes naturally, as it was the only
mode of operation. First, you defined the operations in a declarative fashion to create a graph. Afterwards, you could
pipe inputs through the graph and observe the outputs. Fast, efficient, but painful to debug. With TensorFlow 2 came
Eager Execution and the ability to code the models imperatively -- the TensorFlow team explains the difference in more
detail in [their blog post](https://blog.tensorflow.org/2019/01/what-are-symbolic-and-imperative-apis.html).
Hugging Face writes their TensorFlow models with Eager Execution in mind. Transparency is a core value, and being able
to inspect the model internals at any point is very benefitial to that end. However, that does mean that some uses of
the models do not benefit from the graph mode performance advantages out of the box (e.g. when calling `model(args)`).
Fortunately, the TensorFlow team has users like us covered 🥳! Wrapping a function containing TensorFlow code with
[`tf.function`](https://www.tensorflow.org/api_docs/python/tf/function) will attempt to convert it into a graph when
you call the wrapped function. If you're training a model, calling `model.compile()` (without `run_eagerly=True`) does
precisely that wrapping, so that you benefit from graph mode when you call `model.fit()`. Since `tf.function` can be
used in any function containing TensorFlow code, it means you can use it on functions that go beyond model inference,
creating a single optimized graph.
Now that you know how to create TensorFlow graphs, compiling them with XLA is straightforward -- simply add `jit_compile=True`
as an argument to the functions mentioned above (`tf.function` and `tf.keras.Model.compile`). Assuming everything went well
(more on that below) and that you are using a GPU or a TPU, you will notice that the first call will take a while, but
that the remaining ones are much, much faster. Here's a simple example of a function that performs model inference and some post-processing of its outputs:
```python
# Note: execution times are deeply dependent on hardware -- a 3090 was used here.
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = TFAutoModelForCausalLM.from_pretrained("gpt2")
inputs = tokenizer(["TensorFlow is"], return_tensors="tf")
def most_likely_next_token(inputs):
model_output = model(inputs)
return tf.argmax(model_output.logits[:, -1, :], axis=-1)
print("Calling regular function with TensorFlow code...")
most_likely_next_token(inputs)
# > Execution time -- 48.8 ms
```
In one line, you can create an XLA-accelerated function from the function above.
```python
xla_most_likely_next_token = tf.function(most_likely_next_token, jit_compile=True)
print("Calling XLA function... (for the first time -- will be slow)")
xla_most_likely_next_token(inputs)
# > Execution time -- 3951.0 ms
print("Calling XLA function... (for the second time -- will be fast)")
xla_most_likely_next_token(inputs)
# > Execution time -- 1.6 ms
```
## Text Generation using TensorFlow with XLA
As with any optimization procedure, there is no free lunch -- XLA is no exception. From the perspective of a text
generation user, there is only one technical aspect that you need to keep in mind. Without digging too much into
[details](https://www.tensorflow.org/guide/function#rules_of_tracing), XLA used in this fashion does just-in-time (JIT)
compilation of a `tf.function` when you call it, which relies on polymorphism.
When you compile a function this way, XLA keeps track of the shape and type of every tensor, as well as the data of
every non-tensor function input. The function is compiled to a binary, and every time it is called with the same tensor
shape and type (with ANY tensor data) and the same non-tensor arguments, the compiled function can be reused.
Contrarily, if you call the function with a different shape or type in an input tensor, or if you use a different
non-tensor argument, then a new costly compilation step will take place. Summarized in a simple example:
```python
# Note: execution times are deeply dependent on hardware -- a 3090 was used here.
import tensorflow as tf
@tf.function(jit_compile=True)
def max_plus_constant(tensor, scalar):
return tf.math.reduce_max(tensor) + scalar
# Slow: XLA compilation will kick in, as it is the first call
max_plus_constant(tf.constant([0, 0, 0]), 1)
# > Execution time -- 520.4 ms
# Fast: Not the first call with this tensor shape, tensor type, and exact same
# non-tensor argument
max_plus_constant(tf.constant([1000, 0, -10]), 1)
# > Execution time -- 0.6 ms
# Slow: Different tensor type
max_plus_constant(tf.constant([0, 0, 0], dtype=tf.int64), 1)
# > Execution time -- 27.1 ms
# Slow: Different tensor shape
max_plus_constant(tf.constant([0, 0, 0, 0]), 1)
# > Execution time -- 25.5 ms
# Slow: Different non-tensor argument
max_plus_constant(tf.constant([0, 0, 0]), 2)
# > Execution time -- 24.9 ms
```
In practice, for text generation, it simply means the input should be padded to a multiple of a certain length (so it
has a limited number of possible shapes), and that using different options will be slow for the first time you use
them. Let's see what happens when you naively call generation with XLA.
```python
# Note: execution times are deeply dependent on hardware -- a 3090 was used here.
import time
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForCausalLM
# Notice the new argument, `padding_side="left"` -- decoder-only models, which can
# be instantiated with TFAutoModelForCausalLM, should be left-padded, as they
# continue generating from the input prompt.
tokenizer = AutoTokenizer.from_pretrained(
"gpt2", padding_side="left", pad_token="</s>"
)
model = TFAutoModelForCausalLM.from_pretrained("gpt2")
model.config.pad_token_id = model.config.eos_token_id
input_1 = ["TensorFlow is"]
input_2 = ["TensorFlow is a"]
# One line to create a XLA generation function
xla_generate = tf.function(model.generate, jit_compile=True)
# Calls XLA generation without padding
tokenized_input_1 = tokenizer(input_1, return_tensors="tf") # length = 4
tokenized_input_2 = tokenizer(input_2, return_tensors="tf") # length = 5
print(f"`tokenized_input_1` shape = {tokenized_input_1.input_ids.shape}")
print(f"`tokenized_input_2` shape = {tokenized_input_2.input_ids.shape}")
print("Calling XLA generation with tokenized_input_1...")
print("(will be slow as it is the first call)")
start = time.time_ns()
xla_generate(**tokenized_input_1)
end = time.time_ns()
print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
# > Execution time -- 9565.1 ms
print("Calling XLA generation with tokenized_input_2...")
print("(has a different length = will trigger tracing again)")
start = time.time_ns()
xla_generate(**tokenized_input_2)
end = time.time_ns()
print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
# > Execution time -- 6815.0 ms
```
Oh no, that's terribly slow! A solution to keep the different combinations of shapes in check is through padding,
as mentioned above. The tokenizer classes have a `pad_to_multiple_of` argument that can be used to achieve a balance
between accepting any input length and limiting tracing.
```python
padding_kwargs = {"pad_to_multiple_of": 8, "padding": True}
tokenized_input_1_with_padding = tokenizer(
input_1, return_tensors="tf", **padding_kwargs
) # length = 8
tokenized_input_2_with_padding = tokenizer(
input_2, return_tensors="tf", **padding_kwargs
) # length = 8
print(
"`tokenized_input_1_with_padding` shape = ",
f"{tokenized_input_1_with_padding.input_ids.shape}"
)
print(
"`tokenized_input_2_with_padding` shape = ",
f"{tokenized_input_2_with_padding.input_ids.shape}"
)
print("Calling XLA generation with tokenized_input_1_with_padding...")
print("(slow, first time running with this length)")
start = time.time_ns()
xla_generate(**tokenized_input_1_with_padding)
end = time.time_ns()
print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
# > Execution time -- 6815.4 ms
print("Calling XLA generation with tokenized_input_2_with_padding...")
print("(will be fast!)")
start = time.time_ns()
xla_generate(**tokenized_input_2_with_padding)
end = time.time_ns()
print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
# > Execution time -- 19.3 ms
```
That's much better, successive generation calls performed this way will be orders of magnitude faster than before.
Keep in mind that trying new generation options, at any point, will trigger tracing.
```python
print("Calling XLA generation with the same input, but with new options...")
print("(slow again)")
start = time.time_ns()
xla_generate(**tokenized_input_1_with_padding, num_beams=2)
end = time.time_ns()
print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n")
# > Execution time -- 9644.2 ms
```
From a developer perspective, relying on XLA implies being aware of a few additional nuances. XLA shines when the size
of the data structures are known in advance, such as in model training. On the other hand, when their dimensions are
impossible to determine or certain dynamic slices are used, XLA fails to compile. Modern implementations of text
generation are auto-regressive, whose natural behavior is to expand tensors and to abruptly interrupt some operations
as it goes -- in other words, not XLA-friendly by default.
We have [rewritten our entire TensorFlow text generation codebase](https://github.com/huggingface/transformers/pull/17857)
to vectorize operations and use fixed-sized
structures with padding. Our NLP models were also modified to correctly use their positional embeddings in the
presence of padded structures. The result should be invisible to TensorFlow text generation users, except for the
availability of XLA compilation.
## Benchmarks and Conclusions
Above you saw that you can convert TensorFlow functions into a graph and accelerate them with XLA compilation.
Current forms of text generation are simply an auto-regressive functions that alternate between a model forward pass
and some post-processing, producing one token per iteration. Through XLA compilation, the entire process gets
optimized, resulting in faster execution. But how much faster? The [Gradio demo below](https://huggingface.co/spaces/joaogante/tf_xla_generate_benchmarks) contains some benchmarks
comparing Hugging Face's text generation on multiple GPU models for the two main ML frameworks, TensorFlow and PyTorch.
<div class="hidden xl:block">
<div style="display: flex; flex-direction: column; align-items: center;">
<iframe src="https://joaogante-tf-xla-generate-benchmarks.hf.space" frameBorder="0" width="1200px" height="760px" title="Gradio app" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
</div>
</div>
If you explore the results, two conclusions become quickly visible:
1. As this blog post has been building up to here, TensorFlow text generation is much faster when XLA is used. We are
talking about speedups larger than 100x in some cases, which truly demonstrates the power of a compiled graph 🚀
2. TensorFlow text generation with XLA is the fastest option in the vast majority of cases, in some of them by as
much as 9x faster, debunking the myth that PyTorch is the go-to framework for serious NLP tasks 💪
Give [the colab](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb)
a go, and enjoy the power of text generation supercharged with XLA!
| 4 |
0 | hf_public_repos | hf_public_repos/blog/_redirects.yml | # Redirects file. The format should be: `old_name: new_name`.
# Example:
# starcoder3: starcoder2
# redirects hf.co/blog/starcoder3 -> hf.co/blog/starcoder2
leaderboards-on-the-hub-vectara: leaderboard-vectara
leaderboards-on-the-hub-upstage: leaderboard-upstage
leaderboards-on-the-hub-haizelab: leaderboard-haizelab
leaderboards-on-the-hub-patronus: leaderboard-patronus
leaderboards-on-the-hub-nphardeval: leaderboard-nphardeval
leaderboards-on-the-hub-decodingtrust: leaderboard-decodingtrust
leaderboards-on-the-hub-hallucinations: leaderboard-hallucinations
llm-leaderboard: open-llm-leaderboard-rlhf
evaluating-mmlu-leaderboard: open-llm-leaderboard-mmlu
leaderboard-drop-dive: open-llm-leaderboard-drop
keras-nlp-integration: keras-hub-integration | 5 |
0 | hf_public_repos | hf_public_repos/blog/livebook-app-deployment.md | ---
title: "Deploy Livebook notebooks as apps to Hugging Face Spaces"
thumbnail: /blog/assets/120_elixir-bumblebee/thumbnail.png
authors:
- user: josevalim
guest: true
---
# Deploy Livebook notebooks as apps to Hugging Face Spaces
The [Elixir](https://elixir-lang.org/) community has been making great strides towards Machine Learning and Hugging Face is playing an important role on making it possible. To showcase what you can already achieve with Elixir and Machine Learning today, we use [Livebook](https://livebook.dev/) to build a Whisper-based chat app and then deploy it to Hugging Face Spaces. All under 15 minutes, check it out:
<iframe width="100%" style="aspect-ratio: 16 / 9;"src="https://www.youtube.com/embed/uyVRPEXOqzw" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
In this chat app, users can communicate only by sending audio messages, which are then automatically converted to text by the Whisper Machine Learning model.
This app showcases a few interesting features from Livebook and the Machine Learning ecosystem in Elixir:
- integration with Hugging Face Models
- multiplayer Machine Learning apps
- concurrent Machine Learning model serving (bonus point: [you can also distribute model servings over a cluster just as easily](https://news.livebook.dev/distributed2-machine-learning-notebooks-with-elixir-and-livebook---launch-week-1---day-2-1aIlaw))
If you don't know Livebook yet, it is an open-source tool for writing interactive code notebooks in Elixir, and it's part of the [growing collection of Elixir tools](https://github.com/elixir-nx) for numerical computing, data science, and Machine Learning.
## Hugging Face and Elixir
The Elixir community leverages the Hugging Face platform and its open source projects throughout its machine learning landscape. Here are some examples.
The first positive impact Hugging Face had was in the [Bumblebee library](https://github.com/elixir-nx/bumblebee), which brought pre-trained neural network models from Hugging Face to the Elixir community and was inspired by [Hugging Face Transformers](https://huggingface.co/docs/transformers/index). Besides the inspiration, Bumblebee also uses the Hugging Face Hub to download parameters for its models.
Another example is the [tokenizers library](https://github.com/elixir-nx/tokenizers), which is an Elixir binding for [Hugging Face Tokenizers](https://github.com/huggingface/tokenizers).
And last but not least, [Livebook can run inside Hugging Face Spaces](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook) with just a few clicks as one of their Space Docker templates. So, not only can you deploy Livebook apps to Hugging Face, but you can also use it to run Livebook for free to write and experiment with your own notebooks.
## Your turn
We hope this new integration between Livebook and Hugging Face empowers even more people to use Machine Learning and show their work to the world.
Go ahead and [install Livebook on Hugging Face Spaces](https://huggingface.co/docs/hub/spaces-sdks-docker-livebook), and [follow our video tutorial](https://www.youtube.com/watch?v=uyVRPEXOqzw) to build and deploy your first Livebook ML app to Hugging Face. | 6 |
0 | hf_public_repos | hf_public_repos/blog/pytorch-fsdp.md | ---
title: "Accelerate Large Model Training using PyTorch Fully Sharded Data Parallel"
thumbnail: /blog/assets/62_pytorch_fsdp/fsdp-thumbnail.png
authors:
- user: smangrul
- user: sgugger
---
# Accelerate Large Model Training using PyTorch Fully Sharded Data Parallel
In this post we will look at how we can leverage **[Accelerate](https://github.com/huggingface/accelerate)** Library for training large models which enables users to leverage the latest features of **[PyTorch FullyShardedDataParallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)**.
## Motivation 🤗
**With the ever increasing scale, size and parameters of the Machine Learning (ML) models, ML practitioners are finding it difficult to train or even load such large models on their hardware.** On one hand, it has been found that large models learn quickly (data and compute efficient) and are significantly more performant when compared to smaller models [1]; on the other hand, it becomes prohibitive to train such models on most of the available hardware.
Distributed training is the key to enable training such large ML models. There have been major recent advances in the field of **Distributed Training at Scale**. Few the most notable advances are given below:
1. Data Parallelism using ZeRO - Zero Redundancy Optimizer [2]
1. Stage 1: Shards optimizer states across data parallel workers/GPUs
2. Stage 2: Shards optimizer states + gradients across data parallel workers/GPUs
3. Stage 3: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs
4. CPU Offload: Offloads the gradients + optimizer states to CPU building on top of ZERO Stage 2 [3]
2. Tensor Parallelism [4]: Form of model parallelism wherein sharding parameters of individual layers with huge number of parameters across accelerators/GPUs is done in a clever manner to achieve parallel computation while avoiding expensive communication synchronization overheads.
3. Pipeline Parallelism [5]: Form of model parallelism wherein different layers of the model are put across different accelerators/GPUs and pipelining is employed to keep all the accelerators running simultaneously. Here, for instance, the second accelerator/GPU computes on the first micro-batch while the first accelerator/GPU computes on the second micro-batch.
4. 3D parallelism [3]: Employs Data Parallelism using ZERO + Tensor Parallelism + Pipeline Parallelism to train humongous models in the order of 100s of Billions of parameters. For instance, BigScience 176B parameters Language Model employ this [6].
In this post we will look at Data Parallelism using ZeRO and more specifically the latest PyTorch feature **[FullyShardedDataParallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)**. **[DeepSpeed](https://github.com/microsoft/deepspeed)** and **[FairScale](https://github.com/facebookresearch/fairscale/)** have implemented the core ideas of the ZERO paper. These have already been integrated in `transformers` Trainer and accompanied by great blog [Fit More and Train Faster With ZeRO via DeepSpeed and FairScale](https://huggingface.co/blog/zero-deepspeed-fairscale) [10]. PyTorch recently upstreamed the Fairscale FSDP into PyTorch Distributed with additional optimizations.
## Accelerate 🚀: Leverage PyTorch FSDP without any code changes
We will look at the task of Causal Language Modelling using GPT-2 Large (762M) and XL (1.5B) model variants.
Below is the code for pre-training GPT-2 model. It is similar to the official causal language modeling example [here](https://github.com/huggingface/transformers/blob/main/examples/pytorch/language-modeling/run_clm_no_trainer.py) with the addition of 2 arguments `n_train` (2000) and `n_val` (500) to prevent preprocessing/training on entire data in order to perform quick proof of concept benchmarks.
<a href="./assets/62_pytorch_fsdp/run_clm_no_trainer.py" target="_parent">run_clm_no_trainer.py</a>
Sample FSDP config after running the command `accelerate config`:
```bash
compute_environment: LOCAL_MACHINE
deepspeed_config: {}
distributed_type: FSDP
fsdp_config:
min_num_params: 2000
offload_params: false
sharding_strategy: 1
machine_rank: 0
main_process_ip: null
main_process_port: null
main_training_function: main
mixed_precision: 'no'
num_machines: 1
num_processes: 2
use_cpu: false
```
### Multi-GPU FSDP
Here, we experiment on the Single-Node Multi-GPU setting. We compare the performance of Distributed Data Parallel (DDP) and FSDP in various configurations. First, GPT-2 Large(762M) model is used wherein DDP works with certain batch sizes without throwing Out Of Memory (OOM) errors. Next, GPT-2 XL (1.5B) model is used wherein DDP fails with OOM error even on batch size of 1. We observe that FSDP enables larger batch sizes for GPT-2 Large model and it enables training the GPT-2 XL model with decent batch size unlike DDP.
**Hardware setup**: 2X24GB NVIDIA Titan RTX GPUs.
Command for training GPT-2 Large Model (762M parameters):
```bash
export BS=#`try with different batch sizes till you don't get OOM error,
#i.e., start with larger batch size and go on decreasing till it fits on GPU`
time accelerate launch run_clm_no_trainer.py \
--model_name_or_path gpt2-large \
--dataset_name wikitext \
--dataset_config_name wikitext-2-raw-v1 \
--per_device_train_batch_size $BS
--per_device_eval_batch_size $BS
--num_train_epochs 1
--block_size 12
```
Sample FSDP Run:

| Method | Batch Size Max ($BS) | Approx Train Time (minutes) | Notes |
| --- | --- | --- | --- |
| DDP (Distributed Data Parallel) | 7 | 15 | |
| DDP + FP16 | 7 | 8 | |
| FSDP with SHARD_GRAD_OP | 11 | 11 | |
| FSDP with min_num_params = 1M + FULL_SHARD | 15 | 12 | |
| FSDP with min_num_params = 2K + FULL_SHARD | 15 | 13 | |
| FSDP with min_num_params = 1M + FULL_SHARD + Offload to CPU | 20 | 23 | |
| FSDP with min_num_params = 2K + FULL_SHARD + Offload to CPU | 22 | 24 | |
Table 1: Benchmarking FSDP on GPT-2 Large (762M) model
With respect to DDP, from Table 1 we can observe that FSDP **enables larger batch sizes**, up to **2X-3X** without and with CPU offload setting, respectively. In terms of train time, DDP with mixed precision is the fastest followed by FSDP using ZERO Stage 2 and Stage 3, respectively. As the task of causal language modelling always has fixed context sequence length (--block_size), the train time speedup with FSDP wasn’t that great. For applications with dynamic batching, FSDP which enables larger batch sizes will likely have considerable speed up in terms of train time. FSDP mixed precision support currently has few [issues](https://github.com/pytorch/pytorch/issues/75676) with transformer. Once this is supported, the training time speed up will further improve considerably.
#### CPU Offloading to enable training humongous models that won’t fit the GPU memory
Command for training GPT-2 XL Model (1.5B parameters):
```bash
export BS=#`try with different batch sizes till you don't get OOM error,
#i.e., start with larger batch size and go on decreasing till it fits on GPU`
time accelerate launch run_clm_no_trainer.py \
--model_name_or_path gpt2-xl \
--dataset_name wikitext \
--dataset_config_name wikitext-2-raw-v1 \
--per_device_train_batch_size $BS
--per_device_eval_batch_size $BS
--num_train_epochs 1
--block_size 12
```
| Method | Batch Size Max ($BS) | Num GPUs | Approx Train Time (Hours) | Notes |
| --- | --- | --- | --- | --- |
| DDP | 1 | 1 | NA | OOM Error RuntimeError: CUDA out of memory. Tried to allocate 40.00 MiB (GPU 0; 23.65 GiB total capacity; 22.27 GiB already allocated; 20.31 MiB free; 22.76 GiB reserved in total by PyTorch) |
| DDP | 1 | 2 | NA | OOM Error RuntimeError: CUDA out of memory. Tried to allocate 40.00 MiB (GPU 0; 23.65 GiB total capacity; 22.27 GiB already allocated; 20.31 MiB free; 22.76 GiB reserved in total by PyTorch) |
| DDP + FP16 | 1 | 1 | NA | OOM Error RuntimeError: CUDA out of memory. Tried to allocate 40.00 MiB (GPU 0; 23.65 GiB total capacity; 22.27 GiB already allocated; 20.31 MiB free; 22.76 GiB reserved in total by PyTorch) |
| FSDP with min_num_params = 2K | 5 | 2 | 0.6 | |
| FSDP with min_num_params = 2K + Offload to CPU | 10 | 1 | 3 | |
| FSDP with min_num_params = 2K + Offload to CPU | 14 | 2 | 1.16 | |
Table 2: Benchmarking FSDP on GPT-2 XL (1.5B) model
From Table 2, we can observe that DDP (w and w/o fp16) isn’t even able to run with batch size of 1 and results in CUDA OOM error. FSDP with Zero-Stage 3 is able to be run on 2 GPUs with batch size of 5 (effective batch size =10 (5 X 2)). FSDP with CPU offload can further increase the max batch size to 14 per GPU when using 2 GPUs. **FSDP with CPU offload enables training GPT-2 1.5B model on a single GPU with a batch size of 10**. This enables ML practitioners with minimal compute resources to train such large models, thereby democratizing large model training.
### Capabilities and limitations of the FSDP Integration
Let’s dive into the current support that Accelerate provides for FSDP integration and the known limitations.
**Required PyTorch version for FSDP support**: PyTorch Nightly (or 1.12.0 if you read this after it has been released) as the model saving with FSDP activated is only available with recent fixes.
**Configuration through CLI:**
1. **Sharding Strategy**: [1] FULL_SHARD, [2] SHARD_GRAD_OP
2. **Min Num Params**: FSDP's minimum number of parameters for Default Auto Wrapping.
3. **Offload Params**: Decides Whether to offload parameters and gradients to CPU.
For more control, users can leverage the `FullyShardedDataParallelPlugin` wherein they can specify `auto_wrap_policy`, `backward_prefetch` and `ignored_modules`.
After creating an instance of this class, users can pass it when creating the Accelerator object.
For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code.
Next, we will see the importance of the `min_num_params` config. Below is an excerpt from [8] detailing the importance of FSDP Auto Wrap Policy.

(Source: [link](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html))
When using the `default_auto_wrap_policy`, a layer is wrapped in FSDP module if the number of parameters in that layer is more than the min_num_params . The code for finetuning BERT-Large (330M) model on the GLUE MRPC task is the official complete NLP example outlining how to properly use FSDP feature with the addition of utilities for tracking peak memory usage.
[fsdp_with_peak_mem_tracking.py](https://github.com/huggingface/accelerate/tree/main/examples/by_feature/fsdp_with_peak_mem_tracking.py)
We leverage the tracking functionality support in Accelerate to log the train and evaluation peak memory usage along with evaluation metrics. Below is the snapshot of the plots from wandb [run](https://wandb.ai/smangrul/FSDP-Test?workspace=user-smangrul).

We can observe that the DDP takes twice as much memory as FSDP with auto wrap. FSDP without auto wrap takes more memory than FSDP with auto wrap but considerably less than that of DDP. FSDP with auto wrap with min_num_params=2k takes marginally less memory when compared to setting with min_num_params=1M. This highlights the importance of the FSDP Auto Wrap Policy and users should play around with the `min_num_params` to find the setting which considerably saves memory and isn’t resulting in lot of communication overhead. PyTorch team is working on auto tuning tool for this config as mentioned in [8].
#### **Few caveats to be aware of**
- PyTorch FSDP auto wraps sub-modules, flattens the parameters and shards the parameters in place. Due to this, any optimizer created before model wrapping gets broken and occupies more memory. Hence, it is highly recommended and efficient to prepare model before creating optimizer. `Accelerate` will automatically wrap the model and create an optimizer for you in case of single model with a warning message.
> FSDP Warning: When using FSDP, it is efficient and recommended to call prepare for the model before creating the optimizer
>
However, below is the recommended way to prepare model and optimizer while using FSDP:
```diff
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=True)
+ model = accelerator.prepare(model)
optimizer = torch.optim.AdamW(params=model.parameters(), lr=lr)
- model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(model,
- optimizer, train_dataloader, eval_dataloader, lr_scheduler
- )
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
+ optimizer, train_dataloader, eval_dataloader, lr_scheduler
+ )
```
- In case of a single model, if you have created optimizer with multiple parameter groups and called prepare with them together, then the parameter groups will be lost and the following warning is displayed:
> FSDP Warning: When using FSDP, several parameter groups will be conflated into a single one due to nested module wrapping and parameter flattening.
>
This is because parameter groups created before wrapping will have no meaning post wrapping due parameter flattening of nested FSDP modules into 1D arrays (which can consume many layers). For instance, below are the named parameters of FSDP model on GPU 0 (When using 2 GPUs. Around 55M (110M/2) params in 1D arrays as this will have the 1st shard of the parameters). Here, if one has applied no weight decay for [bias, LayerNorm.weight] named parameters of unwrapped BERT-Base model, it can’t be applied to the below FSDP wrapped model as there are no named parameters with either of those strings and the parameters of those layers are concatenated with parameters of various other layers. More details mentioned in this [issue](https://github.com/pytorch/pytorch/issues/76501) (`The original model parameters' .grads are not set, meaning that they cannot be optimized separately (which is why we cannot support multiple parameter groups)`).
```
{
'_fsdp_wrapped_module.flat_param': torch.Size([494209]),
'_fsdp_wrapped_module._fpw_module.bert.embeddings.word_embeddings._fsdp_wrapped_module.flat_param': torch.Size([11720448]),
'_fsdp_wrapped_module._fpw_module.bert.encoder._fsdp_wrapped_module.flat_param': torch.Size([42527232])
}
```
- In case of multiple models, it is necessary to prepare the models before creating optimizers else it will throw an error.
- Mixed precision is currently not supported with FSDP as we wait for PyTorch to fix support for it.
## How it works 📝

(Source: [link](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/))
The above workflow gives an overview of what happens behind the scenes when FSDP is activated. Let's first understand how DDP works and how FSDP improves it. In DDP, each worker/accelerator/GPU has a replica of the entire model parameters, gradients and optimizer states. Each worker gets a different batch of data, it goes through the forwards pass, a loss is computed followed by the backward pass to generate gradients. Now, an all-reduce operation is performed wherein each worker gets the gradients from the remaining workers and averaging is done. In this way, each worker now has the same global gradients which are used by the optimizer to update the model parameters. We can see that having full replicas consume a lot of redundant memory on each GPU, which limits the batch size as well as the size of the models.
FSDP precisely addresses this by sharding the optimizer states, gradients and model parameters across the data parallel workers. It further facilitates CPU offloading of all those tensors, thereby enabling loading large models which won't fit the available GPU memory. Similar to DDP, each worker gets a different batch of data. During the forward pass, if the CPU offload is enabled, the parameters of the local shard are first copied to the GPU/accelerator. Then, each worker performs all-gather operation for a given FSDP wrapped module/layer(s) to all get the needed parameters, computation is performed followed by releasing/emptying the parameter shards of other workers. This continues for all the FSDP modules. The loss gets computed after the forward pass and during the backward pass, again an all-gather operation is performed to get all the needed parameters for a given FSDP module, computation is performed to get local gradients followed by releasing the shards of other workers. Now, the local gradients are averaged and sharded to each relevant workers using reduce-scatter operation. This allows each worker to update the parameters of its local shard. If CPU offload is activated, the gradients are passed to CPU for updating parameters directly on CPU.
Please refer [7, 8, 9] for all the in-depth details on the workings of the PyTorch FSDP and the extensive experimentation carried out using this feature.
## Issues
If you encounter any issues with the integration part of PyTorch FSDP, please open an Issue in [accelerate](https://github.com/huggingface/accelerate/issues).
But if you have problems with PyTorch FSDP configuration, and deployment - you need to ask the experts in their domains, therefore, please, open a [PyTorch Issue](https://github.com/pytorch/pytorch/issues) instead.
## References
[1] [Train Large, Then Compress: Rethinking Model Size for Efficient Training and Inference of Transformers](http://nlp.cs.berkeley.edu/pubs/Li-Wallace-Shen-Lin-Keutzer-Klein-Gonzalez_2020_Transformers_paper.pdf)
[2] [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/pdf/1910.02054v3.pdf)
[3] [DeepSpeed: Extreme-scale model training for everyone - Microsoft Research](https://www.microsoft.com/en-us/research/blog/deepspeed-extreme-scale-model-training-for-everyone/)
[4] [Megatron-LM: Training Multi-Billion Parameter Language Models Using
Model Parallelism](https://arxiv.org/pdf/1909.08053.pdf)
[5] [Introducing GPipe, an Open Source Library for Efficiently Training Large-scale Neural Network Models](https://ai.googleblog.com/2019/03/introducing-gpipe-open-source-library.html)
[6] [Which hardware do you need to train a 176B parameters model?](https://bigscience.huggingface.co/blog/which-hardware-to-train-a-176b-parameters-model)
[7] [Introducing PyTorch Fully Sharded Data Parallel (FSDP) API | PyTorch](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/)
[8] [Getting Started with Fully Sharded Data Parallel(FSDP) — PyTorch Tutorials 1.11.0+cu102 documentation](https://pytorch.org/tutorials/intermediate/FSDP_tutorial.html)
[9] [Training a 1 Trillion Parameter Model With PyTorch Fully Sharded Data Parallel on AWS | by PyTorch | PyTorch | Mar, 2022 | Medium](https://medium.com/pytorch/training-a-1-trillion-parameter-model-with-pytorch-fully-sharded-data-parallel-on-aws-3ac13aa96cff)
[10] [Fit More and Train Faster With ZeRO via DeepSpeed and FairScale](https://huggingface.co/blog/zero-deepspeed-fairscale)
| 7 |
0 | hf_public_repos | hf_public_repos/blog/getting-started-habana.md | ---
title: Getting Started with Transformers on Habana Gaudi
thumbnail: /blog/assets/61_getting_started_habana/habana01.png
authors:
- user: juliensimon
---
# Getting Started with Transformers on Habana Gaudi
A couple of weeks ago, we've had the pleasure to [announce](https://huggingface.co/blog/habana) that [Habana Labs](https://habana.ai) and [Hugging Face](https://huggingface.co/) would partner to accelerate Transformer model training.
Habana Gaudi accelerators deliver up to 40% better price performance for training machine learning models compared to the latest GPU-based Amazon EC2 instances. We are super excited to bring this price performance advantages to Transformers 🚀
In this hands-on post, I'll show you how to quickly set up a Habana Gaudi instance on Amazon Web Services, and then fine-tune a BERT model for text classification. As usual, all code is provided so that you may reuse it in your projects.
Let's get started!
## Setting up an Habana Gaudi instance on AWS
The simplest way to work with Habana Gaudi accelerators is to launch an Amazon EC2 [DL1](https://aws.amazon.com/ec2/instance-types/dl1/) instance. These instances are equipped with 8 Habana Gaudi processors that can easily be put to work thanks to the [Habana Deep Learning Amazon Machine Image](https://aws.amazon.com/marketplace/server/procurement?productId=9a75c51a-a4d1-4470-884f-6be27933fcc8) (AMI). This AMI comes preinstalled with the [Habana SynapseAI® SDK](https://developer.habana.ai/), and the tools required to run Gaudi accelerated Docker containers. If you'd like to use other AMIs or containers, instructions are available in the [Habana documentation](https://docs.habana.ai/en/latest/AWS_Quick_Starts/index.html).
Starting from the [EC2 console](https://console.aws.amazon.com/ec2sp/v2/) in the us-east-1 region, I first click on **Launch an instance** and define a name for the instance ("habana-demo-julsimon").
Then, I search the Amazon Marketplace for Habana AMIs.
<kbd>
<img src="assets/61_getting_started_habana/habana01.png">
</kbd>
I pick the Habana Deep Learning Base AMI (Ubuntu 20.04).
<kbd>
<img src="assets/61_getting_started_habana/habana02.png">
</kbd>
Next, I pick the *dl1.24xlarge* instance size (the only size available).
<kbd>
<img src="assets/61_getting_started_habana/habana03.png">
</kbd>
Then, I select the keypair that I'll use to connect to the instance with ```ssh```. If you don't have a keypair, you can create one in place.
<kbd>
<img src="assets/61_getting_started_habana/habana04.png">
</kbd>
As a next step, I make sure that the instance allows incoming ```ssh``` traffic. I do not restrict the source address for simplicity, but you should definitely do it in your account.
<kbd>
<img src="assets/61_getting_started_habana/habana05.png">
</kbd>
By default, this AMI will start an instance with 8GB of Amazon EBS storage, which won't be enough here. I bump storage to 50GB.
<kbd>
<img src="assets/61_getting_started_habana/habana08.png">
</kbd>
Next, I assign an Amazon IAM role to the instance. In real life, this role should have the minimum set of permissions required to run your training job, such as the ability to read data from one of your Amazon S3 buckets. This role is not needed here as the dataset will be downloaded from the Hugging Face hub. If you're not familiar with IAM, I highly recommend reading the [Getting Started](https://docs.aws.amazon.com/IAM/latest/UserGuide/getting-started.html) documentation.
Then, I ask EC2 to provision my instance as a [Spot Instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html), a great way to reduce the $13.11 per hour cost.
<kbd>
<img src="assets/61_getting_started_habana/habana06.png">
</kbd>
Finally, I launch the instance. A couple of minutes later, the instance is ready and I can connect to it with ```ssh```. Windows users can do the same with *PuTTY* by following the [documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/putty.html).
```
ssh -i ~/.ssh/julsimon-keypair.pem [email protected]
```
On this instance, the last setup step is to pull the Habana container for PyTorch, which is the framework I'll use to fine-tune my model. You can find information on other prebuilt containers and on how to build your own in the Habana [documentation](https://docs.habana.ai/en/latest/Installation_Guide/index.html).
```
docker pull \
vault.habana.ai/gaudi-docker/1.5.0/ubuntu20.04/habanalabs/pytorch-installer-1.11.0:1.5.0-610
```
Once the image has been pulled to the instance, I run it in interactive mode.
```
docker run -it \
--runtime=habana \
-e HABANA_VISIBLE_DEVICES=all \
-e OMPI_MCA_btl_vader_single_copy_mechanism=none \
--cap-add=sys_nice \
--net=host \
--ipc=host vault.habana.ai/gaudi-docker/1.5.0/ubuntu20.04/habanalabs/pytorch-installer-1.11.0:1.5.0-610
```
I'm now ready to fine-tune my model.
## Fine-tuning a text classification model on Habana Gaudi
I first clone the [Optimum Habana](https://github.com/huggingface/optimum-habana) repository inside the container I've just started.
```
git clone https://github.com/huggingface/optimum-habana.git
```
Then, I install the Optimum Habana package from source.
```
cd optimum-habana
pip install .
```
Then, I move to the subdirectory containing the text classification example and install the required Python packages.
```
cd examples/text-classification
pip install -r requirements.txt
```
I can now launch the training job, which downloads the [bert-large-uncased-whole-word-masking](https://huggingface.co/bert-large-uncased-whole-word-masking) model from the Hugging Face hub, and fine-tunes it on the [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) task of the [GLUE](https://gluebenchmark.com/) benchmark.
Please note that I'm fetching the Habana Gaudi configuration for BERT from the Hugging Face hub, and you could also use your own. In addition, other popular models are supported, and you can find their configuration file in the [Habana organization](https://huggingface.co/Habana).
```
python run_glue.py \
--model_name_or_path bert-large-uncased-whole-word-masking \
--gaudi_config_name Habana/bert-large-uncased-whole-word-masking \
--task_name mrpc \
--do_train \
--do_eval \
--per_device_train_batch_size 32 \
--learning_rate 3e-5 \
--num_train_epochs 3 \
--max_seq_length 128 \
--use_habana \
--use_lazy_mode \
--output_dir ./output/mrpc/
```
After 2 minutes and 12 seconds, the job is complete and has achieved an excellent F1 score of 0.9181, which could certainly improve with more epochs.
```
***** train metrics *****
epoch = 3.0
train_loss = 0.371
train_runtime = 0:02:12.85
train_samples = 3668
train_samples_per_second = 82.824
train_steps_per_second = 2.597
***** eval metrics *****
epoch = 3.0
eval_accuracy = 0.8505
eval_combined_score = 0.8736
eval_f1 = 0.8968
eval_loss = 0.385
eval_runtime = 0:00:06.45
eval_samples = 408
eval_samples_per_second = 63.206
eval_steps_per_second = 7.901
```
Last but not least, I terminate the EC2 instance to avoid unnecessary charges. Looking at the [Savings Summary](https://console.aws.amazon.com/ec2sp/v2/home/spot) in the EC2 console, I see that I saved 70% thanks to Spot Instances, paying only $3.93 per hour instead of $13.11.
<kbd>
<img src="assets/61_getting_started_habana/habana07.png">
</kbd>
As you can see, the combination of Transformers, Habana Gaudi, and AWS instances is powerful, simple, and cost-effective. Give it a try and let us know what you think. We definitely welcome your questions and feedback on the [Hugging Face Forum](https://discuss.huggingface.co/).
---
*Please [reach out to Habana](https://developer.habana.ai/accelerate-transformer-training-on-habana-gaudi-processors-with-hugging-face/) to learn more about training Hugging Face models on Gaudi processors.*
| 8 |
0 | hf_public_repos | hf_public_repos/blog/deep-rl-a2c.md | ---
title: "Advantage Actor Critic (A2C)"
thumbnail: /blog/assets/89_deep_rl_a2c/thumbnail.gif
authors:
- user: ThomasSimonini
---
# Advantage Actor Critic (A2C)
<h2>Unit 7, of the <a href="https://github.com/huggingface/deep-rl-class">Deep Reinforcement Learning Class with Hugging Face 🤗</a></h2>
⚠️ A **new updated version of this article is available here** 👉 [https://huggingface.co/deep-rl-course/unit1/introduction](https://huggingface.co/deep-rl-course/unit6/introduction)
*This article is part of the Deep Reinforcement Learning Class. A free course from beginner to expert. Check the syllabus [here.](https://huggingface.co/deep-rl-course/unit0/introduction)*
<img src="assets/89_deep_rl_a2c/thumbnail.jpg" alt="Thumbnail"/>
---
⚠️ A **new updated version of this article is available here** 👉 [https://huggingface.co/deep-rl-course/unit1/introduction](https://huggingface.co/deep-rl-course/unit6/introduction)
*This article is part of the Deep Reinforcement Learning Class. A free course from beginner to expert. Check the syllabus [here.](https://huggingface.co/deep-rl-course/unit0/introduction)*
[In Unit 5](https://huggingface.co/blog/deep-rl-pg), we learned about our first Policy-Based algorithm called **Reinforce**.
In Policy-Based methods, **we aim to optimize the policy directly without using a value function**. More precisely, Reinforce is part of a subclass of *Policy-Based Methods* called *Policy-Gradient methods*. This subclass optimizes the policy directly by **estimating the weights of the optimal policy using Gradient Ascent**.
We saw that Reinforce worked well. However, because we use Monte-Carlo sampling to estimate return (we use an entire episode to calculate the return), **we have significant variance in policy gradient estimation**.
Remember that the policy gradient estimation is **the direction of the steepest increase in return**. Aka, how to update our policy weights so that actions that lead to good returns have a higher probability of being taken. The Monte Carlo variance, which we will further study in this unit, **leads to slower training since we need a lot of samples to mitigate it**.
Today we'll study **Actor-Critic methods**, a hybrid architecture combining a value-based and policy-based methods that help to stabilize the training by reducing the variance:
- *An Actor* that controls **how our agent behaves** (policy-based method)
- *A Critic* that measures **how good the action taken is** (value-based method)
We'll study one of these hybrid methods called Advantage Actor Critic (A2C), **and train our agent using Stable-Baselines3 in robotic environments**. Where we'll train two agents to walk:
- A bipedal walker 🚶
- A spider 🕷️
<img src="https://github.com/huggingface/deep-rl-class/blob/main/unit7/assets/img/pybullet-envs.gif?raw=true" alt="Robotics environments"/>
Sounds exciting? Let's get started!
- [The Problem of Variance in Reinforce](https://huggingface.co/blog/deep-rl-a2c#the-problem-of-variance-in-reinforce)
- [Advantage Actor Critic (A2C)](https://huggingface.co/blog/deep-rl-a2c#advantage-actor-critic-a2c)
- [Reducing variance with Actor-Critic methods](https://huggingface.co/blog/deep-rl-a2c#reducing-variance-with-actor-critic-methods)
- [The Actor-Critic Process](https://huggingface.co/blog/deep-rl-a2c#the-actor-critic-process)
- [Advantage Actor Critic](https://huggingface.co/blog/deep-rl-a2c#advantage-actor-critic-a2c-1)
- [Advantage Actor Critic (A2C) using Robotics Simulations with PyBullet 🤖](https://huggingface.co/blog/deep-rl-a2c#advantage-actor-critic-a2c-using-robotics-simulations-with-pybullet-%F0%9F%A4%96)
## The Problem of Variance in Reinforce
In Reinforce, we want to **increase the probability of actions in a trajectory proportional to how high the return is**.
<img src="https://huggingface.co/blog/assets/85_policy_gradient/pg.jpg" alt="Reinforce"/>
- If the **return is high**, we will **push up** the probabilities of the (state, action) combinations.
- Else, if the **return is low**, it will **push down** the probabilities of the (state, action) combinations.
This return \\(R(\tau)\\) is calculated using a *Monte-Carlo sampling*. Indeed, we collect a trajectory and calculate the discounted return, **and use this score to increase or decrease the probability of every action taken in that trajectory**. If the return is good, all actions will be “reinforced” by increasing their likelihood of being taken.
\\(R(\tau) = R_{t+1} + \gamma R_{t+2} + \gamma^2 R_{t+3} + ...\\)
The advantage of this method is that **it’s unbiased. Since we’re not estimating the return**, we use only the true return we obtain.
But the problem is that **the variance is high, since trajectories can lead to different returns** due to stochasticity of the environment (random events during episode) and stochasticity of the policy. Consequently, the same starting state can lead to very different returns.
Because of this, **the return starting at the same state can vary significantly across episodes**.
<img src="assets/89_deep_rl_a2c/variance.jpg" alt="variance"/>
The solution is to mitigate the variance by **using a large number of trajectories, hoping that the variance introduced in any one trajectory will be reduced in aggregate and provide a "true" estimation of the return.**
However, increasing the batch size significantly **reduces sample efficiency**. So we need to find additional mechanisms to reduce the variance.
---
If you want to dive deeper into the question of variance and bias tradeoff in Deep Reinforcement Learning, you can check these two articles:
- [Making Sense of the Bias / Variance Trade-off in (Deep) Reinforcement Learning](https://blog.mlreview.com/making-sense-of-the-bias-variance-trade-off-in-deep-reinforcement-learning-79cf1e83d565)
- [Bias-variance Tradeoff in Reinforcement Learning](https://www.endtoend.ai/blog/bias-variance-tradeoff-in-reinforcement-learning/)
---
## Advantage Actor Critic (A2C)
### Reducing variance with Actor-Critic methods
The solution to reducing the variance of Reinforce algorithm and training our agent faster and better is to use a combination of policy-based and value-based methods: *the Actor-Critic method*.
To understand the Actor-Critic, imagine you play a video game. You can play with a friend that will provide you some feedback. You’re the Actor, and your friend is the Critic.
<img src="assets/89_deep_rl_a2c/ac.jpg" alt="Actor Critic"/>
You don’t know how to play at the beginning, **so you try some actions randomly**. The Critic observes your action and **provides feedback**.
Learning from this feedback, **you’ll update your policy and be better at playing that game.**
On the other hand, your friend (Critic) will also update their way to provide feedback so it can be better next time.
This is the idea behind Actor-Critic. We learn two function approximations:
- *A policy* that **controls how our agent acts**: \\( \pi_{\theta}(s,a) \\)
- *A value function* to assist the policy update by measuring how good the action taken is: \\( \hat{q}_{w}(s,a) \\)
### The Actor-Critic Process
Now that we have seen the Actor Critic's big picture, let's dive deeper to understand how Actor and Critic improve together during the training.
As we saw, with Actor-Critic methods there are two function approximations (two neural networks):
- *Actor*, a **policy function** parameterized by theta: \\( \pi_{\theta}(s,a) \\)
- *Critic*, a **value function** parameterized by w: \\( \hat{q}_{w}(s,a) \\)
Let's see the training process to understand how Actor and Critic are optimized:
- At each timestep, t, we get the current state \\( S_t\\) from the environment and **pass it as input through our Actor and Critic**.
- Our Policy takes the state and **outputs an action** \\( A_t \\).
<img src="assets/89_deep_rl_a2c/step1.jpg" alt="Step 1 Actor Critic"/>
- The Critic takes that action also as input and, using \\( S_t\\) and \\( A_t \\), **computes the value of taking that action at that state: the Q-value**.
<img src="assets/89_deep_rl_a2c/step2.jpg" alt="Step 2 Actor Critic"/>
- The action \\( A_t\\) performed in the environment outputs a new state \\( S_{t+1}\\) and a reward \\( R_{t+1} \\) .
<img src="assets/89_deep_rl_a2c/step3.jpg" alt="Step 3 Actor Critic"/>
- The Actor updates its policy parameters using the Q value.
<img src="assets/89_deep_rl_a2c/step4.jpg" alt="Step 4 Actor Critic"/>
- Thanks to its updated parameters, the Actor produces the next action to take at \\( A_{t+1} \\) given the new state \\( S_{t+1} \\).
- The Critic then updates its value parameters.
<img src="assets/89_deep_rl_a2c/step5.jpg" alt="Step 5 Actor Critic"/>
### Advantage Actor Critic (A2C)
We can stabilize learning further by **using the Advantage function as Critic instead of the Action value function**.
The idea is that the Advantage function calculates **how better taking that action at a state is compared to the average value of the state**. It’s subtracting the mean value of the state from the state action pair:
<img src="assets/89_deep_rl_a2c/advantage1.jpg" alt="Advantage Function"/>
In other words, this function calculates **the extra reward we get if we take this action at that state compared to the mean reward we get at that state**.
The extra reward is what's beyond the expected value of that state.
- If A(s,a) > 0: our gradient is **pushed in that direction**.
- If A(s,a) < 0 (our action does worse than the average value of that state), **our gradient is pushed in the opposite direction**.
The problem with implementing this advantage function is that it requires two value functions — \\( Q(s,a)\\) and \\( V(s)\\). Fortunately, **we can use the TD error as a good estimator of the advantage function.**
<img src="assets/89_deep_rl_a2c/advantage2.jpg" alt="Advantage Function"/>
## Advantage Actor Critic (A2C) using Robotics Simulations with PyBullet 🤖
Now that you've studied the theory behind Advantage Actor Critic (A2C), **you're ready to train your A2C agent** using Stable-Baselines3 in robotic environments.
<img src="https://github.com/huggingface/deep-rl-class/blob/main/unit7/assets/img/pybullet-envs.gif?raw=true" alt="Robotics environments"/>
Start the tutorial here 👉 [https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/unit7/unit7.ipynb](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/unit7/unit7.ipynb)
The leaderboard to compare your results with your classmates 🏆 👉 **[https://huggingface.co/spaces/chrisjay/Deep-Reinforcement-Learning-Leaderboard](https://huggingface.co/spaces/chrisjay/Deep-Reinforcement-Learning-Leaderboard)**
## Conclusion
Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorial. 🥳.
It's **normal if you still feel confused** with all these elements. **This was the same for me and for all people who studied RL.**
Take time to grasp the material before continuing. Look also at the additional reading materials we provided in this article and the syllabus to go deeper 👉 **[https://github.com/huggingface/deep-rl-class/blob/main/unit7/README.md](https://github.com/huggingface/deep-rl-class/blob/main/unit7/README.md)**
Don't hesitate to train your agent in other environments. The **best way to learn is to try things on your own!**
In the next unit, we will learn to improve Actor-Critic Methods with Proximal Policy Optimization.
And don't forget to share with your friends who want to learn 🤗!
Finally, with your feedback, we want **to improve and update the course iteratively**. If you have some, please fill this form 👉 **[https://forms.gle/3HgA7bEHwAmmLfwh9](https://forms.gle/3HgA7bEHwAmmLfwh9)**
### **Keep learning, stay awesome 🤗,**
| 9 |
0 | hf_public_repos/accelerate/examples/inference | hf_public_repos/accelerate/examples/inference/pippy/t5.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from packaging import version
from transformers import AutoModelForSeq2SeqLM
from accelerate import PartialState, prepare_pippy
from accelerate import __version__ as accelerate_version
from accelerate.utils import set_seed
if version.parse(accelerate_version) > version.parse("0.33.0"):
raise RuntimeError(
"Using encoder/decoder models is not supported with the `torch.pipelining` integration or accelerate>=0.34.0. "
"Please use a lower accelerate version and `torchpippy`, which this example uses."
)
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForSeq2SeqLM.from_pretrained("t5-small")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
example_inputs = {"input_ids": input, "decoder_input_ids": input}
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(
model,
no_split_module_classes=["T5Block"],
example_kwargs=example_inputs,
)
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(
# model,
# no_split_module_classes=["T5Block"],
# example_kwargs=example_inputs,
# gather_outputs=True
# )
# The model expects a tuple during real inference
# with the data on the first device
args = (example_inputs["input_ids"].to("cuda:0"), example_inputs["decoder_input_ids"].to("cuda:0"))
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(*args)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")
PartialState().destroy_process_group()
| 0 |
0 | hf_public_repos/accelerate/examples/inference | hf_public_repos/accelerate/examples/inference/pippy/README.md | # Distributed inference examples with PiPPy
This repo contains a variety of tutorials for using the [PiPPy](https://github.com/PyTorch/PiPPy) pipeline parallelism library with accelerate. You will find examples covering:
1. How to trace the model using `accelerate.prepare_pippy`
2. How to specify inputs based on what the model expects (when to use `kwargs`, `args`, and such)
3. How to gather the results at the end.
## Installation
This requires the `main` branch of accelerate (or a version at least 0.27.0), `pippy` version of 0.2.0 or greater, and at least python 3.9. Please install using `pip install .` to pull from the `setup.py` in this repo, or run manually:
```bash
pip install 'accelerate>=0.27.0' 'torchpippy>=0.2.0'
```
## Running code
You can either use `torchrun` or the recommended way of `accelerate launch` (without needing to run `accelerate config`) on each script:
```bash
accelerate launch bert.py
```
Or:
```bash
accelerate launch --num_processes {NUM_GPUS} bert.py
```
Or:
```bash
torchrun --nproc-per-node {NUM_GPUS} bert.py
```
## General speedups
One can expect that PiPPy will outperform native model parallism by a multiplicative factor since all GPUs are running at all times with inputs, rather than one input being passed through a GPU at a time waiting for the prior to finish.
Below are some benchmarks we have found when using the accelerate-pippy integration for a few models when running on 2x4090's:
### Bert
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.2137s | 0.3119s |
| Average of 5 batches | 0.0099s | **0.0062s** |
### GPT2
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.1959s | 0.4189s |
| Average of 5 batches | 0.0205s | **0.0126s** |
### T5
| | Accelerate/Sequential | PiPPy + Accelerate |
|---|---|---|
| First batch | 0.2789s | 0.3809s |
| Average of 5 batches | 0.0198s | **0.0166s** | | 1 |
0 | hf_public_repos/accelerate/examples/inference | hf_public_repos/accelerate/examples/inference/pippy/gpt2.py | # Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import torch
from transformers import AutoModelForSequenceClassification
from accelerate import PartialState, prepare_pippy
from accelerate.utils import set_seed
# Set the random seed to have reproducable outputs
set_seed(42)
# Create an example model
model = AutoModelForSequenceClassification.from_pretrained("gpt2")
model.eval()
# Input configs
# Create example inputs for the model
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(1, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
# Create a pipeline stage from the model
# Using `auto` is equivalent to letting `device_map="auto"` figure
# out device mapping and will also split the model according to the
# number of total GPUs available if it fits on one GPU
model = prepare_pippy(model, split_points="auto", example_args=(input,))
# You can pass `gather_output=True` to have the output from the model
# available on all GPUs
# model = prepare_pippy(model, split_points="auto", example_args=(input,), gather_output=True)
# Create new inputs of the expected size (n_processes)
input = torch.randint(
low=0,
high=model.config.vocab_size,
size=(2, 1024), # bs x seq_len
device="cpu",
dtype=torch.int64,
requires_grad=False,
)
# Move the inputs to the first device
input = input.to("cuda:0")
# Take an average of 5 times
# Measure first batch
torch.cuda.synchronize()
start_time = time.time()
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
first_batch = end_time - start_time
# Now that CUDA is init, measure after
torch.cuda.synchronize()
start_time = time.time()
for i in range(5):
with torch.no_grad():
output = model(input)
torch.cuda.synchronize()
end_time = time.time()
# The outputs are only on the final process by default
if PartialState().is_last_process:
output = torch.stack(tuple(output[0]))
print(f"Time of first pass: {first_batch}")
print(f"Average time per batch: {(end_time - start_time) / 5}")
PartialState().destroy_process_group()
| 2 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/multi_node.yaml | # This config template is for a multi-node setup. This assumes DDP, but can be interop'd with the other configs in this folder
# Generally it's recommended to look at the SLURM config template for a more robust multi-node setup
distributed_type: MULTI_GPU
# We need to specify the current machine's rank
machine_rank: 0
# We then need to specify the IP address and port of the main process
main_process_ip: '1234'
main_process_port: 9999
# We need to specify the number of machines
num_machines: 2
# We need to specify the *total* number of processes
num_processes: 8
# And then we need to specify how rdvz comms will be handled
rdzv_backend: static # or c10d
# If the compute nodes are on the same network (cloud will more than likely be false)
same_network: false
| 3 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/run_me.py | # Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A base script which outputs the accelerate config for the given environment
"""
from accelerate import Accelerator
accelerator = Accelerator()
accelerator.print(f"Accelerator state from the current environment:\n{accelerator.state}")
if accelerator.fp8_recipe_handler is not None:
accelerator.print(f"FP8 config:\n{accelerator.fp8_recipe_handler}")
accelerator.end_training()
| 4 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/fp8.yaml | # This config template simply setups up the TransformersEngine config (and a config for a single GPU),
# this can interop with the other configs in this folder
distributed_type: "NO"
mixed_precision: "fp8"
# Then we specify the fp8 configuration:
fp8_config:
backend: TE # Can be TE | MS-AMP
# The following are TE specific arguments.
# See https://docs.nvidia.com/deeplearning/transformer-engine/user-guide/api/common.html#common-api for more details
amax_history_length: 1024
fp8_format: E4M3
interval: 1
margin: 0
override_linear_precision: false
# Generally this should always be set to `false` to have the most realistic fp8 eval performance
use_autocast_during_eval: false
# If using MS-AMP, we ignore all of the prior and set a opt_level
#opt_level: O1 | 5 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/deepspeed.yaml | # Similar to FSDP, we set the distributed type as DEEPSPEED
distributed_type: DEEPSPEED
# With DeepSpeed, we utilize a deepspeed config file for the entire configuration
deepspeed_config:
# Can also be any of the config json's in accelerate/examples/deepspeed_config_templates
deepspeed_config_file: ../deepspeed_config_templates/zero_stage1_config.json
# If using ZeRO-3 and wanting to load big models in, this should be set to `true` so
# `transformers` uses the right `init` function
zero3_init_flag: false # true
# Finally we need to specify the number of GPUs to use
num_processes: 2
# Optionally we can set the mixed precision now instead of in the deepspeed config file,
# however this requires the `fp16` and `bf16` options to be set to `auto` in the deepspeed config file
# mixed_precision: "bf16"
| 6 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/multi_gpu.yaml | # Specify distributed_type as `MULTI_GPU` for DDP
distributed_type: "MULTI_GPU"
# Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`)
mixed_precision: "bf16"
# Specify the number of GPUs to use
num_processes: 2 | 7 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/README.md | # Config Zoo
This folder contains a variety of minimal configurations for `Accelerate` achieving certain goals. You can use these
direct config YAML's, or build off of them for your own YAML's.
These are highly annoted versions, aiming to teach you what each section does.
Each config can be run via `accelerate launch --config_file {file} run_me.py`
`run_me.py` will then print out how the current environment is setup (the contents of the `AcceleratorState`) | 8 |
0 | hf_public_repos/accelerate/examples | hf_public_repos/accelerate/examples/config_yaml_templates/fsdp.yaml | # Since we are doing FSDP (even though it's multi-GPU), we need to specify the distributed type as FSDP
distributed_type: FSDP
# Can be one of "no", "fp16", or "bf16" (see `transformer_engine.yaml` for `fp8`, but it works for FSDP as well)
mixed_precision: 'bf16'
# Specify the number of GPUs to use
num_processes: 2
# Then we can specify the FSDP config
fsdp_config:
fsdp_activation_checkpointing: false
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
fsdp_backward_prefetch: BACKWARD_PRE
fsdp_cpu_ram_efficient_loading: true
fsdp_forward_prefetch: false
fsdp_offload_params: false
fsdp_sharding_strategy: FULL_SHARD
fsdp_state_dict_type: SHARDED_STATE_DICT
fsdp_sync_module_states: true
fsdp_use_orig_params: true
| 9 |
0 | hf_public_repos/blog/assets/35_bert_cpu_scaling_part_2 | hf_public_repos/blog/assets/35_bert_cpu_scaling_part_2/baselines/eager_mode_tensorflow_onednn_baseline.svg | <svg width="5750" height="3155" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" overflow="hidden"><defs><clipPath id="clip0"><rect x="1271" y="130" width="5750" height="3155"/></clipPath><clipPath id="clip1"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip2"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="1672" y1="2776" x2="1672" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill3"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip4"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="2762.5" y1="2749" x2="2762.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill5"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip6"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="3853.5" y1="2519" x2="3853.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill7"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip8"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="4944" y1="1608" x2="4944" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill9"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip10"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="6034.5" y1="1008" x2="6034.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill11"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip12"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="1866.5" y1="2868" x2="1866.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill13"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip14"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="2957" y1="2852" x2="2957" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill15"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip16"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="4047.5" y1="2762" x2="4047.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill17"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip18"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="5138.5" y1="2568" x2="5138.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill19"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip20"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="6229" y1="2429" x2="6229" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill21"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip22"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="2060.5" y1="2895" x2="2060.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill23"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip24"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="3151.5" y1="2888" x2="3151.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill25"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip26"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="4242" y1="2836" x2="4242" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill27"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip28"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="5332.5" y1="2753" x2="5332.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill29"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip30"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="6423.5" y1="2714" x2="6423.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill31"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip32"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="2255" y1="2901" x2="2255" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill33"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip34"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="3345.5" y1="2897" x2="3345.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill35"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip36"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="4436.5" y1="2869" x2="4436.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill37"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip38"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="5527" y1="2836" x2="5527" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill39"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip40"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="6617.5" y1="2815" x2="6617.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill41"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip42"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="2449.5" y1="2895" x2="2449.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill43"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip44"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="3540" y1="2887" x2="3540" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill45"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip46"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="4630.5" y1="2844" x2="4630.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill47"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip48"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="5721.5" y1="2798" x2="5721.5" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill49"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip50"><rect x="1515" y="491" width="5457" height="2492"/></clipPath><linearGradient x1="6812" y1="2786" x2="6812" y2="2980" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill51"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip52"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip53"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip54"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip55"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip56"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip57"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip58"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip59"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip60"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip61"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip62"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip63"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip64"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip65"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip66"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip67"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip68"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip69"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip70"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip71"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip72"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip73"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip74"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip75"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip76"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip77"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip78"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip79"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip80"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip81"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip82"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip83"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip84"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip85"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip86"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip87"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip88"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip89"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip90"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip91"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip92"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip93"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip94"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip95"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip96"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><linearGradient x1="3948.5" y1="3206" x2="3948.5" y2="3228" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill97"><stop offset="0" stop-color="#6083CB"/><stop offset="0.5" stop-color="#3E70CA"/><stop offset="1" stop-color="#2E61BA"/></linearGradient><clipPath id="clip98"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip99"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><linearGradient x1="4028" y1="3206" x2="4028" y2="3228" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill100"><stop offset="0" stop-color="#F18C55"/><stop offset="0.5" stop-color="#F67B28"/><stop offset="1" stop-color="#E56B17"/></linearGradient><clipPath id="clip101"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip102"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><linearGradient x1="4107.5" y1="3206" x2="4107.5" y2="3228" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill103"><stop offset="0" stop-color="#AFAFAF"/><stop offset="0.5" stop-color="#A5A5A5"/><stop offset="1" stop-color="#929292"/></linearGradient><clipPath id="clip104"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip105"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><linearGradient x1="4207.5" y1="3206" x2="4207.5" y2="3228" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill106"><stop offset="0" stop-color="#FFC746"/><stop offset="0.5" stop-color="#FFC600"/><stop offset="1" stop-color="#E5B600"/></linearGradient><clipPath id="clip107"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><clipPath id="clip108"><rect x="1272" y="130" width="5747" height="3153"/></clipPath><linearGradient x1="4307.5" y1="3206" x2="4307.5" y2="3228" gradientUnits="userSpaceOnUse" spreadMethod="reflect" id="fill109"><stop offset="0" stop-color="#71A6DB"/><stop offset="0.5" stop-color="#559BDB"/><stop offset="1" stop-color="#438AC9"/></linearGradient><clipPath id="clip110"><rect x="1272" y="130" width="5747" height="3153"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1271 -130)"><rect x="1272" y="131" width="5747" height="3152" fill="#FFFFFF"/><g clip-path="url(#clip1)"><path d="M1515.5 2482.58 6968.5 2482.58M1515.5 1984.57 6968.5 1984.57M1515.5 1486.55 6968.5 1486.55M1515.5 988.533 6968.5 988.533M1515.5 491.5 6968.5 491.5" stroke="#E0E5EB" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g clip-path="url(#clip2)"><rect x="1594" y="2776" width="156" height="204" fill="url(#fill3)"/></g><g clip-path="url(#clip4)"><rect x="2684" y="2749" width="157" height="231" fill="url(#fill5)"/></g><g clip-path="url(#clip6)"><rect x="3775" y="2519" width="157" height="461" fill="url(#fill7)"/></g><g clip-path="url(#clip8)"><rect x="4866" y="1608" width="156" height="1372" fill="url(#fill9)"/></g><g clip-path="url(#clip10)"><rect x="5956" y="1008" width="157" height="1972" fill="url(#fill11)"/></g><g clip-path="url(#clip12)"><rect x="1788" y="2868" width="157" height="112" fill="url(#fill13)"/></g><g clip-path="url(#clip14)"><rect x="2879" y="2852" width="156" height="128" fill="url(#fill15)"/></g><g clip-path="url(#clip16)"><rect x="3969" y="2762" width="157" height="218" fill="url(#fill17)"/></g><g clip-path="url(#clip18)"><rect x="5060" y="2568" width="157" height="412" fill="url(#fill19)"/></g><g clip-path="url(#clip20)"><rect x="6151" y="2429" width="156" height="551" fill="url(#fill21)"/></g><g clip-path="url(#clip22)"><rect x="1982" y="2895" width="157" height="85" fill="url(#fill23)"/></g><g clip-path="url(#clip24)"><rect x="3073" y="2888" width="157" height="92.0002" fill="url(#fill25)"/></g><g clip-path="url(#clip26)"><rect x="4164" y="2836" width="156" height="144" fill="url(#fill27)"/></g><g clip-path="url(#clip28)"><rect x="5254" y="2753" width="157" height="227" fill="url(#fill29)"/></g><g clip-path="url(#clip30)"><rect x="6345" y="2714" width="157" height="266" fill="url(#fill31)"/></g><g clip-path="url(#clip32)"><rect x="2177" y="2901" width="156" height="79" fill="url(#fill33)"/></g><g clip-path="url(#clip34)"><rect x="3267" y="2897" width="157" height="82.9998" fill="url(#fill35)"/></g><g clip-path="url(#clip36)"><rect x="4358" y="2869" width="157" height="111" fill="url(#fill37)"/></g><g clip-path="url(#clip38)"><rect x="5449" y="2836" width="156" height="144" fill="url(#fill39)"/></g><g clip-path="url(#clip40)"><rect x="6539" y="2815" width="157" height="165" fill="url(#fill41)"/></g><g clip-path="url(#clip42)"><rect x="2371" y="2895" width="157" height="85" fill="url(#fill43)"/></g><g clip-path="url(#clip44)"><rect x="3462" y="2887" width="156" height="92.9998" fill="url(#fill45)"/></g><g clip-path="url(#clip46)"><rect x="4552" y="2844" width="157" height="136" fill="url(#fill47)"/></g><g clip-path="url(#clip48)"><rect x="5643" y="2798" width="157" height="182" fill="url(#fill49)"/></g><g clip-path="url(#clip50)"><rect x="6734" y="2786" width="156" height="194" fill="url(#fill51)"/></g><g clip-path="url(#clip52)"><path d="M1515.5 2980.5 6968.5 2980.5" stroke="#E0E5EB" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g clip-path="url(#clip53)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1640.75 2744)">205</text></g><g clip-path="url(#clip54)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2731.41 2717)">233</text></g><g clip-path="url(#clip55)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3822.07 2487)">463</text></g><g clip-path="url(#clip56)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4902.28 1576)">1378</text></g><g clip-path="url(#clip57)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5992.94 976)">1981</text></g><g clip-path="url(#clip58)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1835.06 2836)">113</text></g><g clip-path="url(#clip59)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2925.72 2820)">129</text></g><g clip-path="url(#clip60)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4016.38 2730)">220</text></g><g clip-path="url(#clip61)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5107.04 2536)">414</text></g><g clip-path="url(#clip62)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 6197.7 2397)">554</text></g><g clip-path="url(#clip63)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2039.83 2863)">86</text></g><g clip-path="url(#clip64)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3130.48 2856)">93</text></g><g clip-path="url(#clip65)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4210.69 2804)">145</text></g><g clip-path="url(#clip66)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5301.35 2721)">228</text></g><g clip-path="url(#clip67)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 6392.01 2682)">267</text></g><g clip-path="url(#clip68)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2234.14 2869)">79</text></g><g clip-path="url(#clip69)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3324.8 2865)">84</text></g><g clip-path="url(#clip70)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4405.01 2837)">112</text></g><g clip-path="url(#clip71)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5495.67 2804)">145</text></g><g clip-path="url(#clip72)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 6586.33 2783)">166</text></g><g clip-path="url(#clip73)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2428.45 2863)">86</text></g><g clip-path="url(#clip74)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3519.11 2855)">94</text></g><g clip-path="url(#clip75)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4599.32 2812)">138</text></g><g clip-path="url(#clip76)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5689.98 2766)">183</text></g><g clip-path="url(#clip77)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 6780.64 2754)">195</text></g><g clip-path="url(#clip78)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1456.23 2992)">0</text></g><g clip-path="url(#clip79)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1414.43 2494)">500</text></g><g clip-path="url(#clip80)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1393.53 1996)">1000</text></g><g clip-path="url(#clip81)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1393.53 1498)">1500</text></g><g clip-path="url(#clip82)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1393.53 1001)">2000</text></g><g clip-path="url(#clip83)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 1393.53 503)">2500</text></g><g clip-path="url(#clip84)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 2039.83 3046)">20</text></g><g clip-path="url(#clip85)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3130.49 3046)">32</text></g><g clip-path="url(#clip86)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4210.69 3046)">128</text></g><g clip-path="url(#clip87)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 5301.35 3046)">384</text></g><g clip-path="url(#clip88)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 6392.01 3046)">512</text></g><g clip-path="url(#clip89)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1368.43 1844)">Latency (ms)</text></g><g clip-path="url(#clip90)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="41" transform="matrix(1 0 0 1 4101.15 3110)">Sequence Length</text></g><g clip-path="url(#clip91)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="73" transform="matrix(1 0 0 1 3264.67 235)">Intel Ice lake Xeon 8380 </text></g><g clip-path="url(#clip92)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="73" transform="matrix(1 0 0 1 4000.02 235)">-</text></g><g clip-path="url(#clip93)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="73" transform="matrix(1 0 0 1 4039.05 235)">TensorFlow + OneDNN (>= 2.5.0)</text></g><g clip-path="url(#clip94)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="73" transform="matrix(1 0 0 1 2872.07 324)">Latency Benchmark with varying number of CPU cores involved in the computations</text></g><g clip-path="url(#clip95)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="700" font-size="73" transform="matrix(1 0 0 1 3053.15 414)">(some data might be missing indicating benchmark crashed during runs)</text></g><g clip-path="url(#clip96)"><rect x="3937" y="3206" width="23" height="22" fill="url(#fill97)"/></g><g clip-path="url(#clip98)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 3969.52 3229)">1</text></g><g clip-path="url(#clip99)"><rect x="4017" y="3206" width="22" height="22" fill="url(#fill100)"/></g><g clip-path="url(#clip101)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4048.86 3229)">4</text></g><g clip-path="url(#clip102)"><rect x="4096" y="3206" width="23" height="22" fill="url(#fill103)"/></g><g clip-path="url(#clip104)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4128.2 3229)">16</text></g><g clip-path="url(#clip105)"><rect x="4196" y="3206" width="22.9995" height="22" fill="url(#fill106)"/></g><g clip-path="url(#clip107)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4228.44 3229)">40</text></g><g clip-path="url(#clip108)"><rect x="4296" y="3206" width="23" height="22" fill="url(#fill109)"/></g><g clip-path="url(#clip110)"><text fill="#44546A" font-family="Calibri,Calibri_MSFontService,sans-serif" font-weight="400" font-size="41" transform="matrix(1 0 0 1 4328.67 3229)">80</text></g><rect x="1272.5" y="131.5" width="5747" height="3152" stroke="#E0E5EB" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 0 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/112_document-ai/empty.txt | 1 |
|
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sd_turbo_a100_perf_chart_static.svg | <svg width="2224" height="1111" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1808" y="521" width="2224" height="1111"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1808 -521)"><rect x="1809" y="522" width="2221" height="1108" fill="#FFFFFF"/><g><path d="M2502.58 792.5 2502.58 1336.5M2994.6 792.5 2994.6 1336.5M3486.61 792.5 3486.61 1336.5M3979.5 792.5 3979.5 1336.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2129.07 1168.04 2182.07 1168.04 2182.07 1336 2129.07 1336ZM2621.09 1105.04 2674.09 1105.04 2674.09 1336 2621.09 1336ZM3113.1 1047.03 3166.1 1047.03 3166.1 1336 3113.1 1336ZM3605.12 983.032 3658.12 983.032 3658.12 1336 3605.12 1336Z" fill="#156082"/></g><g><path d="M2230.07 1138.04 2283.07 1138.04 2283.07 1336 2230.07 1336ZM2722.09 1054.03 2775.09 1054.03 2775.09 1336 2722.09 1336ZM3214.11 980.032 3268.11 980.032 3268.11 1336 3214.11 1336ZM3706.12 884.029 3760.12 884.029 3760.12 1336 3706.12 1336Z" fill="#E97132"/></g><g><path d="M2331.08 1132.04 2384.08 1132.04 2384.08 1336 2331.08 1336ZM2823.09 1056.03 2877.09 1056.03 2877.09 1336 2823.09 1336ZM3315.11 960.031 3369.11 960.031 3369.11 1336 3315.11 1336ZM3808.12 881.029 3861.13 881.029 3861.13 1336 3808.12 1336Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 544" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2010.5 1336.5)"/></g><g><path d="M1997.5 1336.5 2010.5 1336.5M1997.5 1268.54 2010.5 1268.54M1997.5 1200.54 2010.5 1200.54M1997.5 1132.54 2010.5 1132.54M1997.5 1064.54 2010.5 1064.54M1997.5 996.533 2010.5 996.533M1997.5 928.531 2010.5 928.531M1997.5 860.528 2010.5 860.528M1997.5 792.5 2010.5 792.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2010.5 1336.5 3979.5 1336.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2167.06 1140)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2659.23 1078)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3151.41 1019)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3643.59 955)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2268.26 1110)">+18%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2760.44 1026)">+22%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3252.61 953)">+23%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3744.79 856)">+28%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2369.47 1105)">+21%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2861.64 1029)">+21%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3353.82 932)">+30%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3846 853)">+29%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1951.56 1348)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1951.56 1280)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 1212)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 1144)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 1076)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 1008)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 940)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 872)">35</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1930.66 804)">40</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2202.19 1393)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2694.37 1393)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3186.55 1393)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3678.72 1393)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1905.57 1178)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2863.5 1457)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2460.52 625)">SD TURBO (STATIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2308.92 714)">(A100</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2510.96 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2539.28 714)">SXM4</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2730.36 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2758.32 714)">80GB, 512X512, FP16)</text></g><g><rect x="2392" y="1553" width="22.9998" height="22" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2424.2 1576)">Torch (Compile)</text></g><g><rect x="2751" y="1553" width="23" height="22" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2783.67 1576)">ORT_CUDA (Static)</text></g><g><rect x="3159" y="1553" width="23" height="22" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3191.25 1576)">ORT_TRT (Static)</text></g><rect x="1809.5" y="522.5" width="2221" height="1108" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 2 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sd_turbo_rtx_perf_chart_dynamic.svg | <svg width="2351" height="1111" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1908" y="521" width="2351" height="1111"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1908 -521)"><rect x="1909" y="522" width="2348" height="1108" fill="#FFFFFF"/><g><path d="M2634.59 792.5 2634.59 1336.5M3158.6 792.5 3158.6 1336.5M3682.62 792.5 3682.62 1336.5M4206.5 792.5 4206.5 1336.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2236.07 1245.04 2293.08 1245.04 2293.08 1336 2236.07 1336ZM2760.09 1148.04 2817.09 1148.04 2817.09 1336 2760.09 1336ZM3284.11 1131.04 3341.11 1131.04 3341.11 1336 3284.11 1336ZM3808.12 1038.03 3865.13 1038.03 3865.13 1336 3808.12 1336Z" fill="#156082"/></g><g><path d="M2344.08 1142.04 2401.08 1142.04 2401.08 1336 2344.08 1336ZM2868.09 1113.04 2925.1 1113.04 2925.1 1336 2868.09 1336ZM3392.11 959.031 3449.11 959.031 3449.11 1336 3392.11 1336ZM3916.13 953.031 3973.13 953.031 3973.13 1336 3916.13 1336Z" fill="#E97132"/></g><g><path d="M2452.08 1155.04 2508.08 1155.04 2508.08 1336 2452.08 1336ZM2976.1 1058.03 3032.1 1058.03 3032.1 1336 2976.1 1336ZM3500.11 949.031 3556.12 949.031 3556.12 1336 3500.11 1336ZM4024.13 873.029 4081.13 873.029 4081.13 1336 4024.13 1336Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 544" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2110.5 1336.5)"/></g><g><path d="M2097.5 1336.5 2110.5 1336.5M2097.5 1258.54 2110.5 1258.54M2097.5 1180.54 2110.5 1180.54M2097.5 1103.54 2110.5 1103.54M2097.5 1025.53 2110.5 1025.53M2097.5 947.531 2110.5 947.531M2097.5 870.529 2110.5 870.529M2097.5 792.5 2110.5 792.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2110.5 1336.5 4206.5 1336.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2276.14 1217)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2800.23 1121)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3324.31 1104)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3848.4 1011)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2383.91 1115)">+112%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2907.99 1085)">+19%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3432.08 931)">+84%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3956.17 925)">+29%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2491.67 1127)">+98%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3015.76 1031)">+48%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3539.85 922)">+89%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 4063.94 846)">+55%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2051.25 1348)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2051.25 1270)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1193)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1115)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1037)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 959)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 882)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 804)">35</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2317.84 1393)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2841.92 1393)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3366.01 1393)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3890.1 1393)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2005.25 1178)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3027.01 1457)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2578.47 625)">SD TURBO (DYNAMIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2611.42 714)">(RTX</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2767.92 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2795.88 714)">4090, 512X512, FP16)</text></g><g><rect x="2526" y="1553" width="23" height="22" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2558.35 1576)">Torch (Eager)</text></g><g><rect x="2841" y="1553" width="23" height="22" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2873.13 1576)">ORT_CUDA (Dynamic)</text></g><g><rect x="3301" y="1553" width="23" height="22" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3333.76 1576)">ORT_TRT (Dynamic)</text></g><rect x="1909.5" y="522.5" width="2348" height="1108" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 3 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sdxl_turbo_ort_vs_torch.svg | <svg width="936" height="399" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><image width="937" height="399" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAkACQAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAGPA6kDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD9QvOvLi8uo4pYo44WVRujLE5UH1HrT/J1D/n6g/78n/4qksf+QhqX/XVP/Ra1foAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAo+TqH/P1B/wB+T/8AFUeTqH/P1B/35P8A8VV6igCj5Oof8/UH/fk//FUeTqH/AD9Qf9+T/wDFVeooAo+TqH/P1B/35P8A8VR5Oof8/UH/AH5P/wAVV6igCj5Oof8AP1B/35P/AMVR5Oof8/UH/fk//FVeooAxdWu9Q0uya486CTDAbfKI6nH96rghvyAftUH/AH4P/wAVVTxb/wAgWT/fT+YrYX7o+lAFPyb/AP5+oP8Avwf/AIqjyb//AJ+oP+/B/wDiqu0UAUvJv/8An6g/78H/AOKo8m//AOfqD/vwf/iqu0UAUvJv/wDn6g/78H/4qjyb/wD5+oP+/B/+Kq7RQBS8m/8A+fqD/vwf/iqPJv8A/n6g/wC/B/8Aiqu0UAUvJv8A/n6g/wC/B/8AiqPJv/8An6g/78H/AOKq7RQBS8m//wCfqD/vwf8A4qjyb/8A5+oP+/B/+Kq7RQBS8m//AOfqD/vwf/iqPJv/APn6g/78H/4qrtFAFLyb/wD5+oP+/B/+Ko8m/wD+fqD/AL8H/wCKq7RQBS8m/wD+fqD/AL8H/wCKo8m//wCfqD/vwf8A4qrtFAFLyb//AJ+oP+/B/wDiqPJv/wDn6g/78H/4qrtFAFLyb/8A5+oP+/B/+Ko8m/8A+fqD/vwf/iqu0UAUvJv/APn6g/78H/4qjyb/AP5+oP8Avwf/AIqrtFAFLyb/AP5+oP8Avwf/AIqjyb//AJ+oP+/B/wDiqu0UAUvJv/8An6g/78H/AOKo8m//AOfqD/vwf/iqu0UAUvJv/wDn6g/78H/4qjyb/wD5+oP+/B/+Kq7RQBS8m/8A+fqD/vwf/iqPJv8A/n6g/wC/B/8Aiqu0UAUvJv8A/n6g/wC/B/8AiqPJv/8An6g/78H/AOKq7RQBS8m//wCfqD/vwf8A4qjyb/8A5+oP+/B/+Kq7RQBS8m//AOfqD/vwf/iqPJv/APn6g/78H/4qrtFAFLyb/wD5+oP+/B/+Ko8m/wD+fqD/AL8H/wCKq7RQBS8m/wD+fqD/AL8H/wCKo8m//wCfqD/vwf8A4qrtFAFLyb//AJ+oP+/B/wDiqPJv/wDn6g/78H/4qrtFAFLyb/8A5+oP+/B/+Ko8m/8A+fqD/vwf/iqu0UAUvJv/APn6g/78H/4qjyb/AP5+oP8Avwf/AIqrtFAFLyb/AP5+oP8Avwf/AIqjyb//AJ+oP+/B/wDiqu0UAUvJv/8An6g/78H/AOKo8m//AOfqD/vwf/iqu0UAUvJv/wDn6g/78H/4qjyb/wD5+oP+/B/+Kq7RQBS8m/8A+fqD/vwf/iqPJv8A/n6g/wC/B/8Aiqu0UAZd9Jf2VnNP9ogfy13bfJIz/wCPVqVQ1z/kD3n/AFzNX6AKFj/yENS/66p/6LWr9ULH/kIal/11T/0WtX6ACiiigAooooAKKKKACiiigArz3x18dfC3gDXE0S8bUNR1gxC4ex0iwlu5Yoj0dwgO0HHc5r0KvFNf8H+NvAPxY8ReOvCWl2Piuw1+2tY9Q0ae6FrdI8CFEaCVgUxg5KtjJ7jg0Adxp/xg8K6t8Pb7xrZaibnQbGOWS6kWFxJD5YzIjRkBgw/ukZ6eork9L/as+HmpXdnDLf3+lJeMqwXWqaZPbW7Fvu/vWTaAfUkD3rjda8XeGfFfwF+Mj6Nok/hnXEtruTXdJvE2XEd08J/eOMkEOF4YcHafSvVPDvhvTfF3wP0XR9Xhjm0680G3hmWQDAU26gtz0I6g9sUAa+qfETRNF8YaJ4avJ5ItS1qOSSxPlMYptgyyiT7u7HOO+RSz/ELRYPHC+EvOll1v7EdQeGKJmWKEHG52AwuTwAetfN1rcrrH7DvhzxLql82n634eh+3aTqHWRLmCd47cD13qFTHQh/au8/Zdm/4STwZ4l8Y6pN53jHWr6ZdZiZCjWTQ5SK12nkKiYIH+3QB634G8caT8RPDdvruiTPPp1w8iRySRmMko7I3B5+8pqLx54+0z4c6LHqmrR3sls8ywBbC0kuZNxDEZVATjCnnp09a86/Y9/wCSBaH/ANfN9/6Vy17TQB5L4S/ae8E+Ntcg0rSf7Ymupbj7IWfSZ0jjl7q7lcIR3zjGa9arw39lnr8Wf+x81P8A9BirFv8Axz4gj/Yli8Trq90viE6PDOdSEh87eZVBbd64JoA+jK5y18f6NeeO77wfFNIdcs7NL6aExMFETNtBDdCcnpXi/jPUPEviT43eAPC9l4s1PQNL1Lw3LdXxsGXzJSOpUsCFc8fPgkDOMHkZv/CH3Wv/ALWniDS4vEOqaVaw+FrQXFxZShLu4VXAAM2Mqc4YlQCcdRk0AfTtFfL03xf8RfDPwH8Y9Le/uNe1LwbdW8Ol6lqDK8zx3ZVYRKf42jYnLH73HAro/E2k+Ivgrd+B9Zj8Za14jGoazaaPrNpqtwJIJxcEp50SY/clXwQq8YOD05APfqKxvFnjDR/Auiyatrt6un6dGyo07qzAFjgDCgnk+1M8H+NtE8faP/amgX6alYeY0XnIjKNwxkYYA9xQBz3j742+FfhzqVvpeo3NzeazcJ5iaXpdrJd3Oz++UQEqvoTjODjODV74e/Fbw58Tobw6JdSfarJxHd2F5A9vdWzHoHicBgD2PQ4PPBrzz9nK3TV/GHxg8R3iCXWW8XXWkfaG6i1tlQQxgdgAxzjrxnoKi8eEaB+1p8L7mw220+vadqVhqO1ebiGKLzow3urgHPXt04oA1Lz9rDwDZ6hqVo0msSnTrqSzuprfSLiWKOSNsOC6oRx/LFaviD9ozwV4f03S9RNzfalpupWxvLe80vT5rmIxgkEsyKdpBBBVsEYOQK8Q+CvxO1rwHpnxTGn+AdZ8V26eL9UuGudOaLyw2IwYyCd2QFBOFPDDrXpPwZ0O10f9mG5ls9Qt9Sj1Kyv9SaSyDLbxtL5jGKNWGVVPu4POVbp0oA634e/H3wr8T9UjsdCXVXeSE3CTXOmTQwsgxyJGUKeoxzzXo9eafs1/8kD8B/8AYJh/9BrNs/FGo+EPj54o03WtRmfw5qWhx6zp/wBoZmjtjASlyidhwyOwHYrQB67RXypdeLfG978MfCnii4u/ESeHvEWtXWpaw2kZlvdO0+TP2WKIqu5YwqqWZRnJOD8wB9b+BWq2eradrE2k+OpfGuh/aV+xi+DG9sfl+eKZmAdjuyRvUEA4560Ael3VwtrbyzuGKRoXbaCTgDJwB1NeMn9r34frfixI14XrR+aLb+xLnzCmcbtuzOM8Zr2uvBtS/wCT2tI/7Ep//St6APbNF1aDXtHsNTthIttewR3MQmjMbhXUMNynlTg8g8irtfP3hPTfEXxr03xP4rXxnrWgsupXlpoNpptwIre3igcxo80e396zOpLBsjGAMVzN58UvFXxE8D/A6+07XpvD2oeJNQe11G5s4xhtiOrkI2VPKEjOQCQcEcUAfU9YvjPxfpngHwvqHiDWJXh0yxj8yeSNC7BcgcKOTyRXj2qw6vqnxa0X4VWvizXLTSNP0GTWtQ1FbrGo3rNceXGhnAyoBJPygcDFUvjV4Y17wj+zb8SdP1jxBL4jtAofTbq75ukty8f7uZsDeytu+bqRjNAH0JbzpdW8c0Zykih1OOxGRWX4u8V6d4H8M6jr2rSNDpthEZp5EQuyqO4A5NeH+IdN8S/Di/8AhlrS+NNX1O51bWbPStTsrh1FjJFMpyI4AMR7ccHJb1Jrt/2oP+TfvHX/AGDX/mKAK+gftOeDfEuqadY2MGvNLqE0cEEkmi3KREyMFUlymAvI+YnAHNes14z8J/Hnje50DwlYXHwyvLTSmsrWJtWbWLN0WPy1Hm+WH3kY524zzVH4mX2nS+NtXg1f4ja1pxhtohY6J4VWcTWpKktLceSjlyzYK7gFAHQ5zQB7pTSyqwBIBboM9a+ZLP4veJda+BPwx1W91htLOu6yNL1jXoY1R4YFmmj35YFY2fy0UuRgFieKhtdXt/BvxJ+JOvwapdeMbbwv4aS5sNRv7prn7HMwkL2+9SFbdhWOfmxgZ4oA+o6K+QNN+KN1pvhnwx4o0/xj4t8QeLZ5rOfVdGn0y7awuYpWUTxRKYVjj2KxZXVudnfdXo9tb+IfHH7RHjnRX8XarpXhzR4NMuVsNPcI0kjxlsByCUQlW3KoBbI5ABBAPeKKKKACiiigAooooAKKKKACiiigDG8W/wDIFk/30/mK2F+6PpWP4t/5Asn++n8xWwv3R9KAFooooAKKKKACiiigAooooA898TfH7wL4R1q60rUdac3tng3aWdlcXS2uRn968UbKn/AiK0vEPxd8IeF/Cmn+JdQ1uIaHqEqQWt5bRyXCzO4YqqiJWJyFbt2xXl1rpXjb4E654qk0/wAJr488Ka7qk+ru+nzqmoW7SnLxtE/EoHbac464rmfHWpeEdV+Efwsn8EW5s9APjawCWjKyvBJum8yNlYkhgxORnHpxQB7X4W+OngbxlrCaTpevxtqkgzHZ3UEtrLIOfurKiluh6ZroLHxtoupeKtS8NQXwbXNOijnubN43Rljf7rqWADr2JUkA8HBrzX9rbR7a4+Cus61tEOr6EY9Q02+Tia2nWRMMjdRnpxXI/tDa9c+DdH8F/E3RUVfGxt/sQsFQt9uglgLyIyryfKI8wHoNpz1oA9jv/i94R0218TXVxq4S28NOsWqzLbysls7Ywm4IQ7DIyFyRkZxXXxyLLGrocqwBB9jXzJ468NaZ4Z/Yi1xdLvxq6X+npqNxqecm8nmljeSU/UnvyAADyK+ldP8A+Qfbf9cl/kKAOX8c/Frwt8N5oIvEN/PZNNG0yGOwuLhdgOCS0UbBfxIqDwN8aPCHxIvPsvh3Up7+XyftALafcwoY8gbg8kaqeo6GtT4kf8k78Uf9gq6/9EtXiupeJtU8G/sR6Bq+jXj2GpW+i6SIriPG5d0lujdR3VmH40AfRtFeTfGbxbq/h3xp8K7PTr6S1ttV137LexoBiaLyidpyOmfSsfwzL4u8afG7xzaHxbcab4b8O6pZNHp1vAhafdbRyGJnYHbGTuyBydx5GBQB6z4a8XaT4whvpdIuxeR2V5LYXDCNk2TxNtkT5gM4PccHsa2K+WvhT4K8UeJ9G+IlzpXjS/8ADaW3inVzZW2mxRYknExO6dpFYsudo2rt4zyc1rW/xi8S/EPwX8JNMsL1NA13xpJOl9qVvGGaCO2R2maEMCoZynGQdu78aAPo+ivIPBuoa94F+Lx8D6r4gu/FGlalpD6rYXupiMXUEkUqxyRMyKodSHVgcAjBFdb44+MXgv4bX1vZ+JvENpo91cR+dFHcbsumSMjAPcGgDsq85179oXwD4b1a80671xpLqxYrd/YrG4uktiM5ErxRsqEY5BIIrd+I+uT6T8M/Emr6a7G4g0u4uLeSPqGETFWH6Gub/Zr0Gy8P/A3wdHZKn+lafFezyKBmWaVQ7sx7tk4yfQUAdivjfQpPB83iqLU4Ljw9DayXr6hbkyx+SilnYbck4CngDPGMZrjtK/aT+HGsXtlaweI/KkvGVLdryxubaOVj90LJLGqkntzzXlNjYReG/Bv7TnhywOdHsYry5tlTAjia405pJI1A6bW4xXP+N/HE/jz4C+FfAd54T1fw3baxFpenr4k1uFFsoivlMJEMbOxLbMLkLndyR0oA988UftDeAvBerz6ZrOsXFneQyiB1/su7dfMOMKrrEVYnPYmun8GePNE+IGnzXuh3M1zbQyeU7TWk1uQ2AcbZUUngjkDFefftL/8AID8C/wDY56P/AOj69bu4WuLWaJJGieRGVZF6qSMZH0oAmor5sX4weJpP2e7e3N5s+IUmtDwebny13C7Fz5LS7c43eUC3pv7Yp3xQ8YalpPxM1PTfEvjjWPh54ejtLddA1GztkNpdzlCZTcTPG+WDYGzK8DPuQD6RrnvGnj3RPh9p8F7rtzNbW00vko0NpNcEtgnG2JGI4B5IxVrwhJdzeFtJkvtStdYu2tozLqFkoWG5baP3iAEgBuvBxzxWvQB5z4T/AGhfAXjjVLXTtE1i4vrq5do4saZdohYAkgu0QUYwepFejV8ufD3xxf8Aw7/ZV8Q65pUKz6pHrN7b2iyDKiaa/wDJRiO4DSA49q6vWF8V/BXxB4Hvb3xrqfi3T9e1iDQ9StNShhCpLOreXNBsRTGA64Kktw3tQB7xRXhvhi48V+Kvil45kuPFtzp/hnw1rMezTbaBC9wv2aGQxu7AkR8nhcEl2yeBWV4AtfH/AMX/AAXH4+sfHd1omoX9zLLp2jLBE+mxwJMyLHKpTzHLBDlgwPzcCgD3NvE+mJ4oi8Om6A1mSza/S12NkwK6xs+7G3hnUYznnpWpXhfjrR9Z8QftHaPY6VrJ8Pyy+Ebn7Te28SyTJF9sgyIdw2hy20bmU4G7jOCN74I6pr0etePPC+u61N4hPh/U44bXULpEWZ4ZbdJlV9oAJG/GcUAdx4y8d6B8PtLGo+IdTh0y0ZxGjSZZpHPRURQWdvZQTWN4P+NHg/x1qz6VpOrMdVVPN+w3trNaTsn95UmRGYe4Brg72FPEn7X1pb6jiW30DwybzT4JQCqzyz7HlUf3toC59q9O8T/D3RvF2ueHtYv4ZP7S0G5NzZXEL7GVmXayn1UjqPagDpaK8I02TxR8afGXjg2ni/UvCGk+HdQOkWEGkpD++mRA0ks5kRiw3MAFG3geuTXMXnxk8W+KPhT4DvLHUE0nxJceLofD1/cLEGhlZZWjZin91sKxUEdSAaAPp6ivn1tF8faV8WIvBUHxFv7nS9W0Z9Smv721ge7tminRHW3KoEXd5q/eVsAHjODWYPHHizwz8Pfjhpdx4hudU1Dwjn+zdXuEQXIR7dZVD7VCsVJ647+mAAD6Vrnbj4haBa6nrmny3+y70S0W+1CMwyYhhYMwbO3DcK3CknjpXiHibUvHHw38H+FviFdeMrzWzNcWKanossMSWRgnKoREAu9XXeCGLHJGSO1X/FXizVr/AMdfGvQLi9kl0ew8LRTW1oQNsTvBJvI4zzgUAe56Drtj4n0Wx1fTJ/tWnX0K3FvMFZd8bDKthgCMg9xV+vmS5+JFx4H/AGf/AIQaXaaodDn8QW9pZyaqsPnPaQLAHlkRMHL4CqMg43Z7VvfCX4h3CfFaXwpa+JdT8beHrvTWvYdT1S22T2lwjgNEziNFZWU7hxkYxQB77RRRQAUUUUAFFFFABRRRQBQ1z/kD3n/XM1fqhrn/ACB7z/rmav0AULH/AJCGpf8AXVP/AEWtX6oWP/IQ1L/rqn/otav0AFFFFABRRRQAUUUUAFFFFABXk2reA/Hvh3x9rniHwVq+kT2OtiF7rSPEAn8uGaNNnmQtHkruHVcYJr1migDxCz+AusXHgz4l/wBsatZXXjLxxA0VzdW0TR2kCrE0cMaKcttUM2SeTn2qFvhr8WtY8G2ng+98TeGdG0NbSOwnvNHs7h714FQIVUyPsUso5OPpXutFAHk2o/A9brWPAGm288Nv4E8JAXMelnc0txdoMQu5xjamS3uxPFXdP+F2o+Hfi5rPiTRb62tvD2v2mNV0t1YFrxRhbiPAwCRw3TOSTk4r0yigDwH4c/Df4wfC3wnb+HNI1PwRcafbSzSRyX1teNKfMlaQ7irqOrHoK97h8zyU83aZdo37M7c98e1PooA88+D/AMNbz4b/APCafbLuC7/t3xHd61D5Ab93HKEARsj7w2HOOOa8vuvgH8Srn4P3HwyHiPw7H4fhj8i1vvs8xu5oRJvSOUfcTHALLuJCj3NfSdFAHmEnwnvm+L3hLxd9tt/smjaJJpctvhvMkdujLxjH1rJ1z4W+M7L41ap4/wDDOpaLtu9Li046dqkcuJNrZJLpynIGCA3oRXstFAHiVj+zvJrXgTx9YeLL+2ufEPjWb7Rf3mnxMILcoB9nSJWOSsZXILcnJzU83w18feN9S8Jx+N9W0N9I8P38ept/ZMcom1G4iBETSBsLGASWKrkE/hj2aigAooooA8j1b4Y+LvDPxC1nxX4C1bTFTXFjOpaLrqS/ZmnRdqzxvGcoxUAEYOcfTFzwL8M/EEfjy68b+N9VstT102n2Cxs9MidLPT4SwaTy95LF3IGWODgY6cD1CigDzj4KfDG9+F9n4thvbu3vDrPiK71mI24YCOOYRgI2R94bDnHHNVPhr8JtS8C/DnxJ4Tm1C3ube7ur59NMYZUtoJ8lYyuOMMzHA4+Y16lRQByfwp8H3Hw/+G/hzw3dzx3NzpllHbSTQ52OyjGRnnFcl+0B8HdT+LGnaQdD1aHRNWsXuITdzKWH2W4haKdBjnJBUjkDKg9hXrNFAHC+KfCviSwtfDaeBL+w0u30f9y+kX6N9lurcR7EjLKCyFMAggH3BrlPD/wn8X6TP8RvEaaxpWmeMvFawCD7FC8lnZGGPy0b5wGdiCSWI69j0r2WigDN8N2uo2Ph7TLfWLtNQ1aK2jS7uo02LNKFAdwvYE5OPeuLuvhje3Hx+svHwu4BYQaA2kG1IbzTIZmk3jjG3Bx1zXo1FAHi4+GnxB8Jv4p03wfrOiJoOuXlxfwyams32nTJJzulEQQbXXcSyglcE96mX4BnSbD4UadpV9Etl4LuvtEzTqQ9zmNlYqBwCWYtz69a9iooA8x8d/DnX5viJpXjrwhe6fBrVtYyaXd2eqq/2e7tmcOBvTLIyuMg4P8AjmeI/hX4z8cfCnxnoPiHxDY3Ot6+w8gQROtlYRgpiNM5Yj5SSx5JavYaKAOA+IHw6u/F9p4Jit7qGA6DrVnqcxkB/eJCDuVcDqc8ZrQ+Lvgu5+Ivw08ReGrS4itbnU7VreOaYHYhJHJxzjiuvooA8o8G6L8W/D8OhaZeXfguXR7FYLaZoILsXDQIFVtpL7d5UHGRjPaqNj8MPG3hPxZ41uPDmp6KdL8UXn217rUlme7snKBGCoPlkUY+UFlxwDkV7LRQB8reL/AMnw5+GXwu8AarrFhDL/bNyzaxqEKS6WDmeZVnikG1ywkAUHADjcD8ozu+D2c+Irv4Ty3vhnXfDmraPdTTS+FrBLM6eTtT94iO0fz7zt4ByvORXv8ArGi6f4gsZLHVLC21KzkwXt7uFZY2xyMqwIqt4e8I6H4Sgkh0TR7HSIpDudLG2SEOfU7QM/jQB5j4X8C/FLQ9F8P+GH8RaHFomkvDE2rWsUv2+5tYsBYjGwKIzKApYMeOgzzXU+Gfh9d6H8WPGviyW6hktNegsYoYEB8yMwI6sW4xzuGMV3dFABRRRQAUUUUAFFFFABRRRQAUUUUAY3i3/kCyf76fzFbC/dH0rH8W/wDIFk/30/mK2F+6PpQAtFFFABRRRQAUUUUAFFFFAHidv4P+K3w+1LWbXwpfaF4j0LULuS9t28R3NwlzZNIcmPKKweNT06HtWLr37PPiW1+FegaNoup6bqHiWz8TL4mu7nUN8FtNMWkd1UIrEDc6gDHQHp0r6GooA8Q1T4Y/EP4sfZtN+Ieo+H9P8LRXEdxcaV4dE0j6hsbcI5ZZQuxMgEhQSfUV0tn8NNQ1T4xXnjDxBJay6fp9n/Z+gafAzOIUcfv5pAVAEj4C4GQF4ya9KooA+e9Q+APiaD4YfET4faVeaafDuqS+f4f+0TSCSzDyLJLBJhD+7DA7SMnk59uy8Jt8YotW0+HXbPwWmjKQty+n3V01wEA6oGjCk9Opr1KigDJ8W6VNr3hXWdMt2RJ7yymto2kJChnjZQTgHjJrzrUPgxea5+zXa/Di6vobbU4tItbP7XDloluIdjKwyASm+Mdgcdq9booA8H1H4b/E7x14o8B6x4nuPDNnH4d1QXctnpjzt5q+WVLh3T72eiYAAyd3Yd54F8Cah4Z+IHxB1y6lt3tPEF5a3FqsTMXRY7ZImDggAHcpxgniu8ooA+f9E+GnxW8D2Piyx0G88M3Vr4g1i/vkN7JOklis7kq6lU/eHaclTjBHDEHjVuvgDd+HfBngG38KanCPEXgp2ls59RQ+TeGRWWdJNuWRXDtgjO3jr1r2uigDzDwP4F8UXXxAn8ceNZdNh1RNPOlWGm6O8kkFvC0gkkdndVLSMyqOAAAvfNemtGknLIrfUZp1FAEF9YwalY3FndRLNbXEbRSxN0dGGCD7EE14l4e8CfFX4U6QPDHhC68Na74bt9w02416SeG6tELEiNxGjLIFzw3BPoK90ooA8h0n4HXOh/CHxz4fTU01PxR4rt76S91O4UxRSXdxCyA4AJWNSQO5xn6VoeMPhTdeKv2fX8BvLbjUxo9vaRzeYyxLcwqhjfcF3bRJGpzjOB0r06igDzP4heAvEPj3wj4Ktp5tPj1rTdX0zVdSZXcQMYGDTCI7STk525A7ZxXplFFAHjcfwV1Rfj43is3dn/wiIkbVE00Z80am1uLYykbMbfLGfvfeOcZ5rT8baP8AEttY1ePRX8OeJPDupQqken+IN0H2FgCGH7uJvORuDhuR0Feo0UAeWeDfh94u+HehfDrw5o2rafc6HpEUkOuPdxt51wu3KeTwcAOW6kcBevSvU6KKAPE9B+AN1L8Cdb8A63fwwXGoXtzdx3liWcQs1z58L8hSSrBCR7EZqVfh/wDEDx94i8Ly+PZfD9tpPhy+TVI49FaWSS+u41ZYnbzFXylXczYBbJ4969nooA4XwH4FvfDfibx9f3z28tr4g1NLy3SJiWWMW8cRDggYOUPTPGK4LRfhj8TfA+hS+CvC+r6Jb+FvPka01u4Mn9o2MMkhdkWIL5bupZgrFgOckdq93ooA4f8A4Qe+X4x2HioTxPptv4dm0hlkcmdpXuIZA2MYK7Y2yc5yRxS+CfBN94b8dePtZuZLd7XX722ubVYmYuqx2scTBwQADuQ4wTxj6V29FAHmfxM+GOrax4p0fxr4Qv7TTfF+lwSWm2/Rmtb62c5MEu35lAb5lZckEng9sey+H/j3x54z0DWfHlzo+maZoE5u7TSdBlll+03G0qHmeRVwqg8KAc55I7+yUUAeM3Hw98deA/FnifUPAL6Dead4kuPt1xb65LNE1ndbQrOhjVt6tgNtOCCMZxVU/s93WkeCfAWhaZfwXM+i+JbfX9SvLrMZuWErSTFFUHBJbCqcDAGT3r3CigDjLzwbe3Hxh0zxUskA0610O5014yx80ySTwyKQMY24jbJznJHFeWfFLwFqHhfwH8ftbu5bd7TxBai4tVhZi6LHaiNg4KgA5BxgnivoaoL6xttUs5rS8t4ru0mUpLBOgdHU9Qyngj2NAHgNv8N/H3xI8M+ENB8QXuhp4MtPsV9LeWXmi+vEiVXjiaMjYmSBuYMemQOw6yT4P6hffEj4i6zc3VvFpPifRoNLh8os00TLG6MzKQBj5sjDHPtXq0MKW8KRRIscSKFVEGFUDgADsKfQB4Tp/wAFvF3/AArLwnptzfaPB4s8F3iS6LeQea9tcRRx+WEnBUMu9SwbbnGFIz0rufBFr8Q7rxBcaj4uu9J0/Tlg8mDRdGzOryZyZpJpEVgccBF45yea72igAooooAKKKKACiiigAooooAoa5/yB7z/rmav1Q1z/AJA95/1zNX6AKFj/AMhDUv8Arqn/AKLWr9ULH/kIal/11T/0WtX6ACiiigAooooAKKKKACvOvit4y1DSNY8F+GtFuWs9V8Q6oImuFjRzDaRKZbhgHBGSoCjION5PavRa8W8fKF/af+FjzD921hqixZ6eZ5ak/jtzQBufHrxZrmheHdK0jwnfw2Hi3xBqMWn6fNIiSmIcyTS7GBBVY0bPB5Ze5Fafwh+IEnj74V6R4glieXUvs7RXtuihX+1REpMgHAGXRsdOCK8n8aQ+MPiF+0dLceCZdG/4obT1tpH1vzTALm7UswQRgksI1QH0zV79nWTWfAnxB8d+BPFD2A1O4m/4Sa2Oml/sxS4YiZU3gEBXC8H+8ewoAt+Bfiv48179oA+G/Eejw+GtHn0BtUtdJ8xJ7hcT+WryyKMBjhvkUkAY6mtTxR4o8U+OPjBe+BfCviAeFbTRdNivtS1JLGK6nkmlJ8uFFlBQLtwxbBPbjrXN3PjLQG/bGs70a5ppsx4LMBuBdx+X5n21zs3ZxuxzjrWn4fvrfwj+1d45j1aeGwTXtGsbuwkuJAiziEGKRVJ4LAjO3rjnGKAMy3/aG1nwt8J/iHeeII7W+8VeDL5tMaSNTHFesxUQTFB9zdu5UH+HgjPF648Q/EH4Uaz4Hv8AxX4oh8S6P4iv4tJv7P8As6K3On3MykxNC0YBZA4KneScYPfjx7xjo8/jT4T/ALQXiHSla70281+Ka1mjBKzxWzJ5kiH+JevI4+U+leufG7xZpXxAj+FGl6Df2+pXuq+JdP1OKG3kDMtrDmWWU46BRjrjk0AfQFfNXgLWvH/xs0nXfEem/EBvDOq2d7c21t4XhsraWO18tiqJcb1LsWxyx24zkDivXbPRPH0PjB7y68UaXc+G/OkddLj0kpceWQ3lp5/mkZGVy23nB4Ga8oubP4R/GrSb/wAXTzN4E8R2jzRXl5HfDTtStJEYgmZUf5j8uRvB46UAdD448Z+P/BPhHwb401by7O2sJFTxbolpEkyeS/yGeN8Fx5Zw21W6E5zjnS8G+PtW+JvxS1y50PVAngHw/F9hYwxRyJqd8w3MyyEEhI1Kj5SMk+leYWfxT8SeKv2bfCGg3bC98deORNpNrJcR9bbe6vdyDHQQgNnuWB9a6z4BA/CDXtX+DupS7hZo+qaBduiqb2zkYmQEjrIkhbIPODxwKAKf7LPxy17xy1/o/jS6E+qTGa/0q6MMcQuLRJnhdcIqrujeM9skMD2pvw4+OWv/ABB/aSv9Jt7lk8BvpU0+mwtBGDcGKYQtPvxvwZFlAG7BABwDXPfCv4Ry/FD9nbwpJpmrt4d1+wvtUWDVYo97rBNdXEc8XUfeQ8HsyqRyK6IaLF4N/ag0XS9BslMem/D1oLK03BQ2y5cIpY+pAyT65NAFn9qr45678PtCvNN8EmM6/Z26ajqN6yLIun2pkWNcqwKl5HYAKRnaGOOhH0BGxaNCepANfE3xKg8beHf2e/Hg8T/D69t9Z12RLzWfET6jZSIH8+Py0VElZ/KQBUVR0yT3NfSt3H8RPFmi6Te6Td2fgO5KOLvTNVsk1GQENhCJIpgoBUZwCfvDoRQBiePPGPinWfjNp3w88O6tD4XjbR31m41aSzW5lkAlEYhiVzszk5JIPFZlh8aNY8E+Avibd+K7qx1m98F3BtotRs4/Kiv2eJHgV1XIR98qI4HCkn0rD8caPpfj746Wnhnx/qy6bDpOiw3emNbSCyfUZ3LLcOs4IdApCjyQ/OdxziuI1/w0mofCP44+AfBl0ut+HNHjs7+yulAkZpi/2m7h89R+/dREOuWBcKTwKAO+1HxZ8SPhZZ+C/FHijxNBr2na3f21lqmjJp0UKWBuBlTBIg3tsPy/OTu9s10fiDxX4n+JHxU1fwR4T1n/AIRjStBtYpdW1qK1S4uHuJclLeIPlFAT5ixBORgAda5P42eNtJ8dfDP4b6f4fvrfVLvXda0xraCGQGTYhEjsVHK7cAMDjGTnpWp4Rv7P4WftIeP7LXrmPTbbxdHa6npd7duI4Z2jVo5IVcnHmAtnbnOMHGCKAOq8KQ/ETwb8QLXQ9Xv5PGvhO+tnkXW5raK3uNPmTpHKI8LIrAcNtByeenPA+D9X+InxI8U/EzyPiVJ4csvD+u3Gn2dt/ZNnNEsa5K72aPcQOn3s4HWktRqS/tEeGtG8NfETXPE9nGtzqWvWs14k1raw4xDGdgGCXOApJONp6c1xfgH4f/DPxl47+MUvjue2huI/E93HF9o1mWy/dcknasqKwznkg0AfQnwB+IGp/E74W6Xr+rwQxX8rzQvJagiGfy5WQSxg87W25+ua7vUr6LS9Our2Y4ht4nmcj+6oJP6Cvn/4A/FHT/CPwn1rUPEmvbfB2m+ILnS9B1a9LO09kJAsGSBubuMkcAdgOPbfGlzaQ+C9bnvLWbULAWMzT21qf3k0Ww7lTkclc45FAHinwZ17xx480XwvrGpfFbTI7u/iivZ/D66RbCYxFslAd4YblH3tvGc12fx++IHjDwP4T1Gfwj4fF9Pb6fcX0+r3cqLbWMcSMxOzO6SQhThQMdMntXjfxF0v4SR/s1Jqfgq006x1UwwS6JJaup1ZLwupWMOpMpkBJDDJwAewr2X4za0um/s/eJ4fEF9Z2es3fhm8jaJ5kTzbj7KwdYwT83zHoPUUATaX488SXH7Pek+K7HS28R+KbrRba6SzhUIJ7iVEycDACgsWIHYHFcR4q8UfEj4NweFte8Q+KLPxNZapqdtp2oaOulpbfZzNnmCRTuOwgj5859q6X4RfEXwz4a/Z38M6pqGuWMNnpGgWZvWWdWaE+SuEKg53kggL1J4AzXLeFde0/wCMXizTPGfi7WdK0rQtNl8/w74amv4vN3kYW6ul3f6zGCkf8GfWgDe1j4paj8JvinrVp421TzfB+pWD6jol20MafZ3hUtPaEqo3Nt+ZdxJIGM5zWNefELx3ov7O2uePNTvfs+vao6XOk6a1tEV02KWVUgh+6DKxVgSz85bGBjnM+OWg3X7R/ja48BaLcR29j4UtzqV5fmNZFbUXRhbW+T0AyWfrkcY4zWje/FTwt8SfgPDcePtMuDbx6jb6V4jsrdmhOm3SSrl5CGVljDhGypPDDrg4AO4+H9n4lbxC73vxS07xdZ2wZLjTbPS7eF1fBA3PHIxUg9sc4riv2iPjB4+8C3VqugaHHpuhx6laWk+uXzxyG6MpGUgi5OBkgu2OQQB3Od450vwfo/xU+Ex+HkOk2viGbU9twmh7FSTSvKYzmXy+CPubS3fOO9Xf2zdd02LwDpNg+oWqXy67YStatOolCCQ/MVznHv0oA9L+LGq+MbSx0iw8FWUb6hqV6ttPqVxF5sWnQYJedkyNxHQDpmuR0PxV4w8B/GLQfBPifXofF1j4gsbi4tL9bBLSe2mgAZ1dUO0xlTweuePc9T8Rvjd4Y+HfhOLWpb6DUzdy/ZtPtrKdHa8n6bFYHAAP3mJwvfsDy3whsrO58UXPi3xR4m0bVPHmrQ/ZY7HT76OWHT7YHcLeAbiTyNzN3P6gHtNeHfELWvGWsfH3SPBmgeL5vC2mzaDLqUrW9hbXLNIswT/lqjYGD2PavU/C/jrQfGsmqpoepw6i+l3bWN4Igf3My9UOQM/UZHXnivDfiZ4F0v4hftWaBper/avsi+Fp5h9ju5LZ9wuAB88bA45PGcUAafxkv/Hfwc+CPijW18e3Ot6ys1mtpeXGl2kRtQ1wiSAIse1tyv8AxA4xxiovGGueP/gynhjW7/xv/wAJjpepara6Zd6ff6ZbWzKsxwJInhVTuUjocjB6Vm/tLfDzSPh3+y/4r03RjeC3mvLKZjeXkty+43UA4aRiQMAcdK7bQv2dNF/tPRdX1rX/ABJ4qk01lurO11vU3nt4ZgOHCcAkds0AYH7THxc8ffD/AMPavdeFdASz0/TBA9z4g1F0ZH8x0UJbxZJY5cAs4AGGwDwa7340/EDUPAPhqwGiW8F34i1rUYNI0uK6J8nz5SfnkxztVVZj9PeuH/bP1zTbf4C+KNNl1C1j1GVbV47N5lEzr9qi5CZyRwecdjVX9qIaP4q8H+E9bGofbPDWk+I7eXVrrSbgs0NsyPHI4eLJG3zFzjnBoA63w3a/Enw/4v0qK/8AEmn+ONAuw6aiy2cVnPpzhco8YRvnRm+UhssODk81o+BfGV/L8SPG3g/WLv7Xc6fJFqOnzMiIWsp14jwoAPlurLuPJBXJJzXlHibw54L8J/FP4VyfDNdPttdvdS8u8h0eYOlxpnlMZ5JtpIbHy7Xbkk8EkV2doWm/a+1Awt+7h8HxLOB/eN0xUH8KAPaKKKKACiiigAooooAKKKKAMbxb/wAgWT/fT+YrYX7o+lY/i3/kCyf76fzFbC/dH0oAWiiigAooooAKKKKACiimtna2OuOKAPPfhP4wvPH2qeMdXNwx0WHVX0rTbfA2hbcbJJQcZJeQt16BV964Lwh8Ydb1L42yi7vFfwPrF/e6HpEaxrhbq0SNmk3jkrIRcKOeqD1rL+GPjZPhv+yTq+tSsft9jcapEEXlmu3vZkiXHXJd4/wOa5TxF8LfiT4B+COhSyT+G57bwbLFr8cdtFOLx5EYySgyElSSHcH5eaAPavj34/8AGHgvwzqj+EtA+0y2+mXOoTa1dSILayWJGcjYfmkkIX5VAxkjPGak1z4sz+Df2fbXxzfQjUNSOl2swhUBBPczBFUYHQF3GcdBmovjN498P3nwC8S3h1eztk1vwzeS6fHczpFJcB7VioRWILH5lGBzk4rzzx9JbeNP2PLZdCu7fWLnR9P026ubexmWV0EJieRWCk4IRXODz8tAHQ/8JZ8RPhfr3g6fxlrdj4j0fxJepplxBa2C2zabcSKWjKOD86ZBU7ucc1Yn8TfED4oeNvF9j4N1yw8LaT4ZuF08S3Vit21/d7A7hiT8iLuUfKM859hk/GLxlo3xMb4T6N4c1K21e71DxBaaoI7OZZGjt4UdpJHAOVAB745BHUVP8JfFmi/Dnxt8YNH8RanbaPMmuya4rX0qxCS2mhjIdcn5gChBx6igD0r4N/ECX4nfDvTNeubUWGoSGS3vLVTxFcRSNHIByeNyEjvgiuT+NXjDWrXxv4J8H6X4hi8Hwa99qebWpYUkcmIJtgi3/KHYv39BjPSuc+BvhXxZq3wg03UNE8Qnwk+q6lf6s0c+nR3TPDNOzQ8MRt+UBvffXR/ELXPC0OoeGvAnxOs7XVbXU7NpU17UkjgtJLqPAZBz+6kIO4YI64BzQBPofhr4nabdeJPD974l/tPTJ9P83R/E8kEKXNrdEkeVJGvDgcNu24xkZzwOIj+Omv8Aivwb4f8AB+l3KWHxXvL86RqSGMMdP8g5uborjaVKAFexMmB0OGeCdc0L4T/F3XNJ0HxTPqHw/sfD0mp6hazXjXkOkTJKoRUlJYjchb92STx9K5PRf7a8D+KrL9oPWU8nTPE9wbTVNP8AJG/T9Nl8tLSbgckeVGX74cdTmgDrviB8bPEHw1/aI07Srq5ku/A8Ol2x1TciA27TTeStyxwDgSGPd2wTgdK1v2jvi5rnhq+0vR/CF5HDd2k1rf61PtDmK0luY7eOPkEbpHlz67YyeMjMV1oun+Mf2o/E2l3qpdaZqXgdIZAMEPG8+Mg/Q5BrK8afCM/Cn4B69HeaxN4k1u+1XRzc6tcx7JJI4761jhjxk8Ii468kse9AHtvxL8eRfDvwrLqRtn1C/lkW00/Tof8AWXl0/EcS+5OST2AY9q5H9mfx34i+IXw6n1LxTLDLrEOqXVnJ9njCIojfaFAHUDnnqa5rxVH4/b41S+IH+H7eJNI0iH7NoSpq0MCRs4HnXDKwJMjfcHQKoPUnNc1+y/J4v8R/DfxFp+nJ/wAIi667dzx6w4ivQztMfMg8k4+7x82ee1AHrvx08f6n8PvCVjNo0dv/AGnqmp22kwXN4CYLVpiR5zgdQMdMjJIrnfD/AIo8a+Cfi1oXgzxNrVt4vttdsri6ivbewW0lsmhAJ3qhKtG2QoJwdxHNc7+0DJrWg/D7wxoHinX01DRta1yKy17XPsUduIbRiWAx8yx8qPn6jHas7TtP8NfDf49+CbP4dX66l/blvNb61Z/bzqBjtY03RTeY7O8Y3dAGCnHSgC/H46+KHjrw/wCJPHnhnUtO03QtKuLpNO0CewEx1SK2Yh2ebduVnZHVduAMD6nofF3xf1jxHo/w607wSsNnrnjm3+2RXd4nmpptqsSSzSMoOC4DhQDwWrjfhN8RtB+Gf7PfifRdYv4bXVvC1xqlnPYzyKk80jTyyRBEJyd/mKB759KqaXYyfBGP4GeJdbSS30m30OTQ9ZuZs7dPedI5Yi2Og8wMhY8AYoA9Cm8P/E74f65od7aeJp/H2j3N5Hbarp99Zwwy28TnBuYmTHCHBKHPHr2o+Mbjxn4s+Pl34U0PxpP4V0208PQalthsYrjfK1xJGc7+RwF79q5r49akdV1CyTwN8UdWXxZr91bWunaNpOoRS2cSZUSzuiKWVBGHYtu5bp6U7x9ovgrXP2pNQh8b3Npb2K+ErYwm81FrNWf7VKCAwdN3GeMmgD0T4D+Mte8TWfirTPEN/aaze+HtZl0oatZRCNLtVRGyVHAcFirAcAjvXqVfOPwN8Q6B4F8Q/Eiw8P6mkvwp0KKG7hvjIZoLS4ZXa4jjmwTKuAGPzMQcD+KvoLR9Ws9f0my1PT51urC8hS4t50+7JG6hlYexBBoA8m8fa14wvvidqGj6L4z07wfo+m6PZ3kst9Yxz+ZNPNcpjc7DAxAO/eushvfFHgn4f3N5qLyfELXI3DQx6XbR2rTq7qqqAW2gLksWJ6A1x3iKL4TeJfi9rlj4t0u2TxNZWNuvm+IHC211bFWdXtld9rBCzqzBQQdwqh+y3f2en+F/Hb2t5s8FWPiK8Gj3E8oFvFZqFJ2OT/qlbfgk4xzQBe+AvxI8ZeNPHXxH0nxfDaWMuiz2Qt9Ps8Otqs0buUMgH7xgAoJ6ZBxxTJm+MPjS/wDEWoaZqth4J0/T72a10zTb/TBO1/HGBieWQtlUc5xtHAHTuef+B/jLw/N+0B8Zdmu6a/8AaV7pgsdt3GftRFu4Ii5+fB4+XNSePfi9pXxZ1rUfA2j+K9N8PeGoS1vr3iCe+ihlkHRra0DH5iRw0mNoBOMmgCe6+NGv3fw48AfFi3WSDw7IRH4j0ZYgQsTuYjcRsQWAjcbsZ+ZCM4xXWeCfG2r/ABG+KWv32m3gX4f6FF/Z8TRqpXUb770rhuuyMYT0LE9ecYnxS8Tadb/DrQvh98PpdPvrvxPGdF01baRZ4LezVdlxMWBIIjjyOc/MRweRWd8FrtPg/deJ/hLqEVxeLo1tJq+jSwoBPqFhIWLKAMZlR9y5zzkdMUAU/hZq3xO+JHhPQNak+J2j6bd6tb/a10n+xoXlWPcRx84JGB1xXc/Hr4g+MPBfhrVW8JeHvtU1vpdxqE2tXcira2SxIzH5fvSSEL8qgYyRnjNeJeM9P+Cq/s7XGueC7bT9L1gQrJpBWcHV4bzcNked7SeYDwVycAele0fF7xJb6T+zzr1t4j1O0sNcvvC12nkXc6RST3H2Qh1RSRuO9gML3I9aANBfHfiKP4A6T4p07Sn8R+KLrRrO5SzhTAmuJo48sQMYQFyxA7KcVx2qeJviT8I/EHg2fxR4i03xVpXiHVotHuLO200Wz2ksqMyvE4bLKChzv7fmNz4c/Fjwj4V/Z/8ADmr33iCw+yaRoVgl75FwkrwyGBAIyqkkOSCAp5yD6GuY8E+JNG+KvjTTPGvi7xHotglm5/4Rvwt/acJlt2f5RcXAD/NcMDgR8hAe7cgA+h68j/aV8Va94X8I6D/wj2qNo19qOvWenNdpEkhSOVirYVgQex/CvQtL8ZaLrfiDWNDsdRhudW0jyxfWqZ3QeYu5M8Y5APSvJP2u7GHU/BfhKzuFZre48VadFIquUJVnIIDKQRweoOaAOv8ABvw98Z6Br8F7rHxIvPEdgisH0+bTIIFkJUgHenIwcH8K8h+HbfFbxp8H4vH1r8S/LvjHdzppV5pUDWzeTNKgRpBhgGEfXHGa908H/CXwx4D1KW/0Wzure6kiMLNPqNzcDaSDjbLIwByo5AzXz9+zz8C4PiB8ENFn1Pxh4pj0m9e7WfQrS/WKydBdTKybQm4K2CW+bncaAPWI/ix4m8Q/Cvwrr/hjwo2q6vr1osrK8yw2tgxUFpJWY7igJOAuSwFV/hT8Wr66/Z1i8eeKpkuru2tbu5umhjWMP5UkgAUDgcKBXoOoXegfD/wvBbT3djoOk28ItLZbmdYY1CphUUsRzgdOvFfOnww+zeOP2MtU8L6PfWt7rzaZf/8AEvgnV5wTPKVygO4buMZHORQB2ek3/wAZ9Q8N6b42t7zS9QF4kV5/whaWiRf6PJghFumcHzQhBy3y5B47V2WqeMNS8O/GzQtFupN+h+JNNm+zRuBm3vLch2AI7PHJ0JPMfHfPzr4os/hsP2fLPxb4d1Ef8LE+zW32K+XUHk1Y3+UDRbd24kHcNgXaB0AGK9s+Il1cz+N/glbXe2PWpNSmuZkXsEsJRNjHbdIo/EUAeyUUUUAFFFFABRRRQAUUUUAUNc/5A95/1zNX6oa5/wAge8/65mr9AFCx/wCQhqX/AF1T/wBFrV+qFj/yENS/66p/6LWr9ABRRRQAUUUUAFFFFABXEfErwLceKLrwvq+mGJNZ8P6nHeQNMxVZIW+S4iJAON0bHHH3lXtmu3ooAoaboOmaPcXs9hp1pYz30vn3UltAsbXEnTfIQBub3OTRJoGlzawmrSabaSaqkJt1vmgUzrETkxh8bgpPO3OKv0UAcV/wpH4dCTzB4B8L+Zndv/sa2zn1zs61r+LPAfhzx1bww+ItD0/WooW3RLfW6y+WT1KkjjPtW9RQBS0zRrDRdLh03T7K3stPhTy4rW3iVIkX+6FAwBWJ4b+F/hDwdqU2o6H4Z0nSb+YFXubOzSOQqTkjIGQCew4rqKKACuV1v4VeC/EuqjU9W8JaJqeojH+lXenxSSHHTLMuTj3rqqKAM7/hHNJ/tW31P+y7L+0reE20N59nTzooicmNXxkLn+EHFF54c0nUtUs9Tu9LsrrUrIMLW8mt0eaAMMMEcjK5HXB5rRooApaPoun+H7BLHSrC102yjLMltZwrFGpZizEKoABLEk+pJNNbQdMbWl1htOtG1dYPsy35gXzxDu3eWJMbtuTnbnGav0UAUtY0XT/EWmz6dqthbanp84AltbyFZYpACCAyMCDyAeR2q4OOBwKWigDF8T+CvD/jW1S38QaJp+tQRnckd/bJMFPqNwOPwq1onh7S/DWmpp+kabaaXYR/dtbOBYoxnrhVAFaFFAHLaD8LfB/hfWpdX0fwxpOmanLu33drZpHJ8xy2CBxnvjrWt4h8L6P4t09rDW9Ks9XsmOTb30CzJn1wwIzWnRQBj+GvB+heDbNrTQNGsNFtWO5obC2SFWPqQoGT7msm++D/AID1TUJ7+88E+Hbu/nkM0t1PpNu8sjk5LMxTJJPOTzXXUUAch8QPhrpvxA8Ix+GblY7XRWmiee3it0IeJG3eWuRiPJA+YDIGcYJyOtSNY41RRhVGAPanUUAc1a/DPwjY+IG1238L6PBrTEsdQjsYln3Hqd4XOffrVrxL4H8OeNBbjxB4f0vXRb7jCNSso7jyt2N23epxnAzjrgVt0UAcpbfCbwPZ6beadb+DfD8Gn3hRrm1j0uBYpyhJQugTDFSTjI4ycVUj+B/w5ikV08AeF0dTlWXRrYEEdx8ldtRQBQ0vQdM0M3Z03TrTTzeTtdXP2WBYvOmbG6R9oG5zgZY8nFQ2/hPRLSbUpYNG0+GXU23Xzx2qK12cYzKQPnOCfvZrVooA57wz8PPC3guaebQPDmlaLLP/AK2Sws44Wf2JUDj2qLX/AIY+DvFeoG/1vwnoesXxUIbrUNNhnl2joNzqTgeldNRQBylx8J/BF3pdrps/g3w/Np1ozvb2cmlwNDCz43lEKYUtgZIHOKTSPhL4G8P6lBqGl+DPD+m38B3RXVnpUEUsZIIJV1QEcEjg966yigDk/Afw7sPAba5cWzeff61fyahe3HlrGHduFAVeAFUAepOSSSTW82h6bJrEertp9q2qxwm3S+MCmdYiclBJjcFJ5xnGavUUAUda0PTfEmnSafq2n2uqWEhUva3sCzROVIZSVYEHBAI9CBV1VCqFUAKBgAdBS0UAc54i+G3hHxffJe694W0XW7xIxEtxqOnQ3EgQEkKGdScAknHuauaL4P0Hw3pUumaRomnaVpsrM8lnZWkcMLswAYlFABJAAPHYVr0UAc74Z+HfhXwXPPPoHhzStFmuP9bJYWccLOPQlQMj2rJ8D+B7rR/GXjLxPqnktqOtXMccCwsWEVnCm2FckD5iS7sOmWA5xmu4ooAKKKKACiiigAooooAKKKKAMbxb/wAgWT/fT+YrYX7o+lY/i3/kCyf76fzFbC/dH0oAWiiigAooooAKKKKACiiigDgPCvwxg0DVvFlvc21lf+HNV1JdYtbWdPMaG5cZnyrDGN6h1I5BdhxgV3dxbxXVvJBPGk0MilHjkUMrKRggg9QRUlFAGHfeBvDeqWtpa3nh/S7u2tF2W8M9nG6Qr6ICuFHA6elWdG8L6N4dhmh0nSbHTIpjmWOztkiVzjGWCgZ49a06KAMTRfBPh3w3eT3mk6Dpml3c/wDrZ7OzjieTv8zKAT+NLrngvw94ouIJ9Z0LTdWng/1Ul9aRzNH3+UsDj8K2qKAGxxrDGqIoRFGFVRgADsBVLWtA0zxJZm01bTrTU7QnJgvIFlTPrtYEVfooAwrLwJ4a03SZ9LtPD2l2umz/AOts4bKNIZMHPzIFwefUVp3ml2Woae9hdWkFzYuuxraaNXjZR2KkYI9qtUUAUbbQ9Ns7xbu30+1gulhW2E8cKq4iHSPcBnaP7vSpr7T7XVLZre8tobu3ZlYxToHQlWDKcHjIYAj0IBqxRQAVV07SrLR4WhsLOCyhZ2kaO3iWNS7HLMQB1J6mrVFAEF9Y22qWktreW8V3ayjbJDOgdHHoVPBrM8P+CvD3hLzP7D0LTdG8z7/9n2kcG767QM1tUUAYmoeB/DmravHqt7oGl3mqR42Xs9nG8y46YcrkY+tat3ZwX9tJb3MEdxbyDa8UqBkYehB4NTUUAYWg+BfDXhe4kuNG8P6XpM8nDy2NlHCzfUqoJp2s+CfDviK6W61bQdM1O5VBGJryzjlcKCSFywJxknj3rbooA53XPBVnqHg7UPDunJb6LZ3kLQN9ltYyqo3D4QjbkrkcjjOcHFaPh3QbPwr4f0zRdPjMVhp1tHaW6MxYrHGoVQSepwBzWjRQBjeIPBugeLFiGuaHpusCL/Vi/tI59n03A4q6uj2CaWdMWxtl00xmE2YiXyTGRgpsxjaQTxjFXKKAOdsfhz4T0u8hu7Pwxo1pdQtvjngsIkdGHQhguQahf4W+DJHZ38JaG7sclm02Ekn1+7XUUUAZOm+EdC0eaCWw0XT7GW3Ro4Xt7VIzGjEsyqQOASSSB1Jq3NpNjc6hbX81lby31sGWC6eJTLEGGGCsRkAjrjrVuigDAj+H/hiPWv7ZTw5pK6vu3/b1sYhPu9d+3dn8ata54U0TxN5P9saPYar5O7yvt1sk3l5xnbuBxnA6egrVooA56L4d+FYbGeyj8M6PHZzsry262EQjkZc7Sy7cEjJxnpk1FB8MfB1rPHND4U0SGaNg6SR6dCrKwOQQQvBBrpqKAOY8P+BbbQ/F3iLxGZvtGo6z5McjCJI1jhiDeWnyjLEb2yzEk5A6AAbuoaVZatHEl9ZwXiRSLNGtxGrhHX7rDI4I7HqKtUUAFVdN0yz0ezjtLC0gsrSPOyC3jEaLkknCgYGSSfqatUUAUNZ8P6X4jtVttW0201O3Vt4hvIFlQNgjIDAjPJ596raL4O0Dw3NJNpGiadpcsi7HksrSOFmXOcEqBkVsUUAYMfgPwzDrZ1mPw7pSauW3m/WyiE5b18zbuz+NY9x4DuNT+Llp4tvp42s9L0x7LTbZCSwlmfM8rcYHypGoxn+LOMCu2ooAKKKKACiiigAooooAKKKKAKGuf8ge8/65mr9UNc/5A95/1zNX6AKFj/yENS/66p/6LWr9ULH/AJCGpf8AXVP/AEWtX6ACiiigAoopKAOb074leFNW8UXHhyy8Q6dd67b7vNsIbhWlUr94YB6r3HUd6d4y+Inhr4eW9tP4l1q00WG5cpC92+0OwGSB+FeUL4o8Pat4y8DW3h57WNLPWLgS+FUthDe2c5SdZrmUBiVVSzEggBt4O4nArZ8b+LfDOu3vhbX4PHUnhe38q/itdVS3i8mUgxrIhe4RkVgY+ARlgG29KAO31T4qeENF0PTNZ1DxJptlpepgNZXVxcKiTgjIKk9RjnPQDrXTwzJcRJLE6yRuoZXQ5DA8gg9xXzx4d8fJb6l4e8U/EK5gtrC/8L3NtHdXkHkx3DrcbmOwjCtNCInCfxYIA4xXqHwM0+90n4O+DrPUYpbe8h0yFXhmBDx/KMKwPIIGBg9MUAd1RSNnacde2aq7b7+/b/8AfLf40AW6Kqbb7+/b/wDfLf40bb7+/b/98t/jQBboqptvv79v/wB8t/jRtvv79v8A98t/jQBboqptvv79v/3y3+NG2+/v2/8A3y3+NAFuiqm2+/v2/wD3y3+NG2+/v2//AHy3+NAFuiqm2+/v2/8A3y3+NG2+/v2//fLf40AW6Kqbb7+/b/8AfLf40bb7+/b/APfLf40AW6Kqbb7+/b/98t/jRtvv79v/AN8t/jQBboqptvv79v8A98t/jRtvv79v/wB8t/jQBboqptvv79v/AN8t/jRtvv79v/3y3+NAFuiqm2+/v2//AHy3+NG2+/v2/wD3y3+NAFuiqm2+/v2//fLf40bb7+/b/wDfLf40AW6Kqbb7+/b/APfLf40bb7+/b/8AfLf40AW6Kqbb7+/b/wDfLf40bb7+/b/98t/jQBboqptvv79v/wB8t/jRtvv79v8A98t/jQBboqptvv79v/3y3+NG2+/v2/8A3y3+NAFuiqm2+/v2/wD3y3+NOjF3vG9oSnfapz/OgCzRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBjeLf+QLJ/vp/MVsL90fSsfxb/wAgWT/fT+YrYX7o+lAC0UUUAFFFFAGV4k8Uab4TsEvNUuDBFJKsEarG0kksjfdREUFmY4PABPBpfDnibTPFmm/btKulurcSPC52lWjkQ7XR1YBlYEYKkAisv4g+IbDwzo8F5qKXMcBn8oajbWouDp7tG4W4ZcNgA/Lu2kAuMjaTXG/BfWrfStHlhmkvb5NX126+xarNYyxvqJZPNaeRduIxkOoOFQhF2gAgUAdzo/jzR9e1WbT7GS6nnhklieT7FMsO+NirgSlNhwwI4btUXhv4k+HPFupyafpWpLc3SxtMq+W6CaNX2NJEzACRA2FLISASOea8V8M3V5Drml6H4T1jxHLdXU1/FrWk61bhE02NkmcSkqgCN57R7WVm3hz1AyLHwvm/t3xN8LLOxtLq3uPCeh3drray2zxC2kaOGJYGJABYujMAM8Ju6UAfRFFFV7qO5kZfInjhHffEXz/48KALFFUPs+o/8/sH/gMf/i6Ps+o/8/sH/gMf/i6AL9FUPs+o/wDP7B/4DH/4uj7PqP8Az+wf+Ax/+LoAv0VQ+z6j/wA/sH/gMf8A4uj7PqP/AD+wf+Ax/wDi6AL9FUPs+o/8/sH/AIDH/wCLo+z6j/z+wf8AgMf/AIugC/RVD7PqP/P7B/4DH/4uj7PqP/P7B/4DH/4ugC/RVD7PqP8Az+wf+Ax/+Lo+z6j/AM/sH/gMf/i6AL9FUPs+o/8AP7B/4DH/AOLo+z6j/wA/sH/gMf8A4ugC/RVD7PqP/P7B/wCAx/8Ai6Ps+o/8/sH/AIDH/wCLoAv0VQ+z6j/z+wf+Ax/+Lo+z6j/z+wf+Ax/+LoAv0VQ+z6j/AM/sH/gMf/i6Ps+o/wDP7B/4DH/4ugC/RVD7PqP/AD+wf+Ax/wDi6Ps+o/8AP7B/4DH/AOLoAv0VQ+z6j/z+wf8AgMf/AIuj7PqP/P7B/wCAx/8Ai6AL9FUPs+o/8/sH/gMf/i6Ps+o/8/sH/gMf/i6AL9FUPs+o/wDP7B/4DH/4uj7PqP8Az+wf+Ax/+LoAv0VQ+z6j/wA/sH/gMf8A4uj7PqP/AD+wf+Ax/wDi6AL9FUPs+o/8/sH/AIDH/wCLo+z6j/z+wf8AgMf/AIugC/RSLnaMnJ7nFLQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBQ1z/AJA95/1zNX6oa5/yB7z/AK5mr9AFCx/5CGpf9dU/9FrV+qFj/wAhDUv+uqf+i1q/QAUUUUAFFFFADfLQOXCrvPBbHNI0MbJsKKU67SBin0UANaNZMBlDYORkZp1FFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUV8QftwftGfED4Q/FjSdH8Ka7/ZenT6JDdyQ/ZopMytPOhbLqT91FGOnFdOHw8sTU9nDc48XioYOl7Wom15H2/RX5Nf8Nv/ABm/6G//AMkLf/43R/w2/wDGb/ob/wDyQt//AI3Xq/2NiO6/H/I8P/WHC/yy+5f5n6y0V+TX/Db/AMZv+hv/APJC3/8Ajdfp18J9avPEnws8G6vqMv2jUNQ0azu7mbaF3ySQIztgcDJJOBXFisDUwiUpta9j0sFmdHHScaaat3/4c6uiiivOPWCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDG8W/8gWT/fT+YrYX7o+lY/i3/kCyf76fzFbC/dH0oAWiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/ACB7z/rmav1Q1z/kD3n/AFzNX6AKFj/yENS/66p/6LWr9ULH/kIal/11T/0WtX6ACiiigAoopKAKserWU2oS2CXlu99EoeS1WVTKinoSucgcj86df6laaVbm4vbqGztwQDLcSBFBJwBknHWvCodP8JWPjDwJrnhiz0z+zb3WbqN721Lf2nNdOk4kWVn+do1YsXVjldq8YGK6XxtN4b8ZfEHwKmoSabrXhyez1ZlEzRz2klxH9nA65UsqfaPcYf0NAHqF7qVppsaSXd1DapI4jRppAgZj0UEnkn0qzXy38NbDQtbh8L2ni2G1vvC0Xhi+k0wauFaPyluyC6huhW38nDDkKeDjNe0fAea8uPgz4Mlv3kkuX0uAs8xy7DYNpPuVxQB3lFI2dpwMntVTz73/AJ9I/wDv/wD/AGNAFyiqfn3v/PpH/wB//wD7Gjz73/n0j/7/AP8A9jQBcoqn597/AM+kf/f/AP8AsaPPvf8An0j/AO//AP8AY0AXKKp+fe/8+kf/AH//APsaPPvf+fSP/v8A/wD2NAFyiqfn3v8Az6R/9/8A/wCxo8+9/wCfSP8A7/8A/wBjQBcoqn597/z6R/8Af/8A+xo8+9/59I/+/wD/APY0AXKKp+fe/wDPpH/3/wD/ALGjz73/AJ9I/wDv/wD/AGNAFyiqfn3v/PpH/wB//wD7Gjz73/n0j/7/AP8A9jQBcoqn597/AM+kf/f/AP8AsaPPvf8An0j/AO//AP8AY0AXKKp+fe/8+kf/AH//APsaPPvf+fSP/v8A/wD2NAFyvzW/4KVf8l00L/sW4P8A0quq/R3z73/n0j/7/wD/ANjX5vf8FInlf45aGZo1ib/hHIOFfdx9puu+BXs5T/vK9GfPZ9/uT9UfKNFFFfbn5qFftF8Cv+SI/D3/ALF3Tv8A0mjr8Xa/Zj4GzXg+Cfw+C2sbL/wj2n4JmxkfZo/9mvnM6/hw9T6/hz+LU9F+Z6HRX55/EYsfiF4nLDa39qXWQDnB81q52vxGpxZ7Ocoew2dvi/8AtT+jqXA/tacan1m10n8Hf/t4/S6ivzRr0D4Bs6/F3w4Y1Dv5smFJwD+6fvV4fir29aFL2FuZpfF3dv5TLFcF/VqFSv8AWL8qbty72V/5j7uoqn597/z6R/8Af/8A+xo8+9/59I/+/wD/APY198fmRcoqn597/wA+kf8A3/8A/sadHNdtIoe2jRO7CbOPw20AWqKKKACiiigAooooAKKKKACiiigAooooAKKKKAMbxb/yBZP99P5ithfuj6Vj+Lf+QLJ/vp/MVsL90fSgBaKKKACiiigCG7vILC3ee5njt4EGXlmcKqj1JPAp1vcRXcKTQSpNE4yskbBlI9QRXL/EVdDk0/To9ZazWZrv/iW/2iHNsbwQyGPzQOCuAxwxxkDHzba5b4F3trpmh6pYXVzYR6g2u3UbfYZl+xzTsBKy2o4+UKeV5IKvknBNAHpcOqWdxeTWkV3BLdQjMsCSKXTPTcoOR+NFrqlnfTTw213BcTQHbLHFIrNGfRgDwfrXy74QaVdP+FM2lbD44kvNW/tXbjz2fyLkyi47484Rff4zt74rc+Ff9nJ4k+Dp0Iwm8k8OXh1ryMeYy7YDmbHO7z8/e5yW96APpCiiq9zLcRsvkwLMO5aTbj9DQBYoqj9pvv8Anyj/AO//AP8AY0fab7/nyj/7/wD/ANjQBeoqj9pvv+fKP/v/AP8A2NH2m+/58o/+/wD/APY0AXqKo/ab7/nyj/7/AP8A9jR9pvv+fKP/AL//AP2NAF6iqP2m+/58o/8Av/8A/Y0fab7/AJ8o/wDv/wD/AGNAF6iqP2m+/wCfKP8A7/8A/wBjR9pvv+fKP/v/AP8A2NAF6iqP2m+/58o/+/8A/wDY0fab7/nyj/7/AP8A9jQBeoqj9pvv+fKP/v8A/wD2NH2m+/58o/8Av/8A/Y0AXqKo/ab7/nyj/wC//wD9jR9pvv8Anyj/AO//AP8AY0AXqKo/ab7/AJ8o/wDv/wD/AGNH2m+/58o/+/8A/wDY0AXqKo/ab7/nyj/7/wD/ANjR9pvv+fKP/v8A/wD2NAF6iqP2m+/58o/+/wD/APY0fab7/nyj/wC//wD9jQBeoqj9pvv+fKP/AL//AP2NH2m+/wCfKP8A7/8A/wBjQBeoqj9pvv8Anyj/AO//AP8AY0fab7/nyj/7/wD/ANjQBeoqj9pvv+fKP/v/AP8A2NH2m+/58o/+/wD/APY0AXqKo/ab7/nyj/7/AP8A9jR9pvv+fKP/AL//AP2NAF6iqP2m+/58o/8Av/8A/Y0fab7/AJ8o/wDv/wD/AGNAF6ikXO0ZGD3FLQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBQ1z/kD3n/AFzNX6oa5/yB7z/rmav0AULH/kIal/11T/0WtX6oWP8AyENS/wCuqf8Aotav0AFFFFABRRRQBk2/hLQ7PWJNWg0awg1SQsXvY7ZFmYnqS4GTnvzTbjwboF5pUemT6Jp82mxuZEs5LVGiViSSwQjAOSefc1sUUAZWqeFNE1uzt7XUdIsb61tseTDc2ySJFgYG1SMDjjitRVCqFUYA4AFLRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFfmt/wAFKv8Akumhf9i3B/6VXVfpTX5rf8FKv+S6aF/2LcH/AKVXVezlP+8r0Z89n3+5P1R8m0UUV9ufmoV+0XwK/wCSI/D3/sXdO/8ASaOvxdr9ovgV/wAkR+Hv/Yu6d/6TR185nX8OHqfX8OfxanovzPi34kf8lE8U/wDYVuv/AEc1c5XR/Ej/AJKJ4p/7Ct1/6Oaucr+T8R/Gn6v8z+38L/u9P0X5BXoX7P8A/wAlh8N/9dpP/RT157XoX7P/APyWHw3/ANdpP/RT10Zf/vlH/FH80c2af7hX/wAEv/SWfdlFFFfvZ/MoUUUUAFFFFABRRRQAUV8FftjftO/Ev4V/Gq50Hwv4l/svSUsreZbf7BbTYdlJY7pI2bn61618D/jF4w8ZfsleIvGWraidS8S2sWoGG6W3iiK+WmUOyNVX5evTJxXfUwVSlRjXbVn9+p5dHMaVbEywsU+aN+1tPmfTdFeI+JvC2m/DOw8C6roF1dJqrataWLSvdvK2qR3B2zLNkkSHbmQEj5Sny4FctHbmRtJvP7KubO4k8cXMMniT7SAojGoTqIWRW3OrgCDayhRvB/hGeA9Q+l6K8l+G/hfSrH4rfEDyLGOMafcWS2gGcQB7RGcIO2SST9a0vjvfXUHhfR7CC7m0+31fXLHTLy6t3MciW8swVwrjlSw+TcORuoA9Iorxf4n+D9G8E+EfGNzompTaTeTeGbwf2PHdnbN5acXCxliQ6ZCl167xuycVofFS9mj8d/C9YJ5FjmudRLrG5AdRplwRnHUA4P1xQB6xRXzj4P0W20P4V/CnxZYSz2mvzzaXbXEkdw4F9HcSJHMkqZxIdrM4JGQUznGafb/EKK5+Op137RqiWf8Aah8LRxm0ufsLwbMGXzdnk7/ta+WPmzg4oA9y8W/8gWT/AH0/mK2F+6PpWP4t/wCQLJ/vp/MVsL90fSgBaKKKACiiigCnq2j2GvafNYanZW+o2Mw2y2t3EssTj0ZWBB/GorPw7pOn29hb2umWdtb2BzaRQ26ItudpXMYAwnysw4xwSO9aNFAGZZ+GdH07WLzVrTSrG21S8AW5vobdEmnA6B3A3Nj3NGl+GdH0O8vrzTtJsbC7vn8y7ntbZI5LhufmkZQCx5PJz1rTooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiuY+JfxB074V+B9V8U6tDdXGn6cgkljs0VpWBYL8oZlB5I6kV478L/25fAfxZ8d6V4T0jSfEVtqOpM6xS3ttAsK7I2kO4rOxHCHoDzit4UKtSDnGN0jlqYqjSqKlOVpPZH0TRXCX3jPWtW8c6n4b8OWtjnSbSG4vb3UWfZ5k24xRIqcn5UZmbPGV4NYP/C39Wvm8IWMGn2Vhq+rajfaXfJdSNJFaTWsUrvtK7S6sYvlJxwwNYHUes0V44vxc8S3XhmDUotP0uKVfECeHZgzSSRSyNdrb+fC4xmMb84IzlWGeM165Y/aRaRC8aJrrb+8MAITPsCScUAT0V5ovj3xXrtrr2q+HNH0+/0vTbyWzt7Wad1ub9oXKTFDjanzK4TOd2OSoNbnhnxxN4g8U+MNKksxbJob2qxsx+d/NtlmO4dAQWxx6UAdfRXkWgfFTxN4zutMtdG0/SYbm68NWuvMt9JLsLzFh5QZRkAY+9g9elWNW+NN1F8M4ta0/RPN8Tz3zaTFoU0v/L7HIyzR7wOVURysGA5CjpmgD0bXP+QPef8AXM1frnrfxBaeK/A0GtWD77LULJbqFj12uoYZ9+a6GgChY/8AIQ1L/rqn/otav1Qsf+QhqX/XVP8A0WtX6ACiiigAooooAKK8d07S73T/AIh6Lp2n+I9Z1/Vba7uLvX7iW6lNlFbyI5jgaEuY0fLRbFQbgqFjgH5tr4oafr+reJPD1vogFxFHBdS3Vp/bs2mbjmIRsTDmRgP3n8JAJ5I4yAekUV4Z4f8AECfFbWtEsGudc0DSE8PT3Rhh1WaKcXC3HkP5kyPukMew4JJB35IPGO/+DGvX/if4U+FNV1SUz6hdafFJPMVwZG2/fx79fxoA7SikZtqk4JwM4HWs3+2v+offf9+f/r0AadFZn9tf9Q++/wC/P/16P7a/6h99/wB+f/r0AaMhKxsR1xX5Nf8ADb3xo/6HJv8AwAtf/jdfqlJrX7t/+JffdD/yx/8Ar1+HNfR5PSp1Pac8U9t16nyHEFerR9l7KTje+zt2PdP+G3vjR/0OTf8AgBa//G6P+G3vjR/0OTf+AFr/APG68Lor6P6rh/8An2vuR8h9exX/AD9l97PvP9if9o/4h/Fn4vXei+KvEB1TTY9JmuVhNrDHiRZIlDZRAejN37190V+Y3/BOy6+y/Hi+fyZZv+JHcDbCu4/66Dmvtf8AaG8fav4X8D213oz3mk3bX8cbTtEoyhSQlec9wPyr4bPqtPAynV5fdir2R+mcMUa2ZQhQ5rylJq7b/wCCexUV8If8L78f/wDQy3P/AHxH/wDE0f8AC+/H/wD0Mtz/AN8R/wDxNfnf+tmE/wCfcvw/zP1f/UjHf8/Ifj/kfd9FfCH/AAvvx/8A9DLc/wDfEf8A8TX0V+zr4/1jxR4JvbrWpLzVrpNReJZliU7UEURC8Y7kn8a9HAZ9h8wrewpxae+tv8zycz4ZxWV4d4mrOLV0tL319Uj2Wisz+2v+offf9+f/AK9H9tf9Q++/78//AF6+mPkDTorM/tr/AKh99/35/wDr0f21/wBQ++/78/8A16ANOisz+2v+offf9+f/AK9H9tf9Q++/78//AF6ANOvzW/4KVf8AJdNC/wCxbg/9Krqv0W/tr/qH33/fn/69fnF/wUeuvtfxw0N/Jlgx4dgG2Zdp/wCPm65+lezlP+8r0Z89n3+5P1R8qUUUV9ufmoV+0XwK/wCSI/D3/sXdO/8ASaOvxdr9kvgfrHl/BX4fp9hvH2+H9PG5Ysg/6NHyOelfOZ1/Dh6n1/Dn8Wp6L8z49+JH/JRPFP8A2Fbr/wBHNXOV0PxEfzPiB4mfay7tUujtYYI/etwa56v5PxH8afq/zP7fwv8Au9P0X5BXoX7P/wDyWHw3/wBdpP8A0U9ee133wHm+z/Fvw7JseTbK/wAsYyx/dP0FdGX/AO+Uf8UfzRzZp/uFf/BL/wBJZ94UVmf21/1D77/vz/8AXo/tr/qH33/fn/69fvZ/Mpp0Vmf21/1D77/vz/8AXqSDVvOmVPsV3HuON0kWFH15oAv0UUUAFFFFAHl/xA/Zn+G/xS8RPrvifw6NT1V41ha4N3PHlVGFGEcDj6V1HgX4Z+Gvhr4XPh3w7piWOjF3c2rSPKpL/eyXJJB9OldRRWrq1JR5HJ27GEaFKM3UjFKT621ON8P/AAh8K+GdQs7yx0+bzLEOLKO5vZ7iGz3fe8iKR2SLIyPkA4JHQ4rVbwRor6M+lGz/ANBe8OoGMSOCLgz/AGjzA2cg+b83B4+lbtFZG5hXHhr7FLrN/oTW2n63qjRPNd3cL3MTNGoRS0YkTogxwy+vNUY/Cup69peo6X4zutF8QaZdxiP7NaaVLagc5JYvcS5PQgjaQRnPTHV0UAcroPww8OeHbi+uLeylurm9gFrcXGp3c17I8Iz+63TO5Cc/dHB71W8N/CHwr4Tu0udPsZ/Nitms4PtV9Pci3hb70cQldhGpwOEx0rs6KAOI8OfBnwn4WvNNurKyupZNMjMVgt9qFxdR2ikY/dJLIyxnHGVAOMjvWpJ8PdAk8Jr4bNhjR1cSiBZXDBxL5u/eDu3eZ82c5zXR0UAY3i3/AJAsn++n8xWwv3R9Kx/Fv/IFk/30/mK2F+6PpQAtFFFABRRRQAUVyvxHsdOvPDok1nWZ9F0a1lFxeyQXLW/nRhWHlNIhDBSzKcKcnaB3rO+Ddnqlp4RlbURfRW899PPp1tqc7TXMFmz/ALlJGYls7edrElQwU8jFAHd0V8/eG9Z1DwXqNhqus6XNd3niC41AWF9beIZryJ5Ss9xGjW+fKVDHHtUoWxgDAo+Gt1eaT4j+F11Fql/fyeLtFurvWFuruSZJJljhlWZVYlYyGdkwoAwwGOBQB9A0UVWu777KyjyJ5t3/ADxTdj60AWaKzv7YH/Plef8Afk0f2wP+fK8/78mgDRorO/tgf8+V5/35NH9sD/nyvP8AvyaANGis7+2B/wA+V5/35NH9sD/nyvP+/JoA0aKzv7YH/Plef9+TR/bA/wCfK8/78mgDRorO/tgf8+V5/wB+TR/bA/58rz/vyaANGis7+2B/z5Xn/fk0f2wP+fK8/wC/JoA0aKzv7YH/AD5Xn/fk0f2wP+fK8/78mgDRorO/tgf8+V5/35NH9sD/AJ8rz/vyaANGis7+2B/z5Xn/AH5NH9sD/nyvP+/JoA0aKzv7YH/Plef9+TR/bA/58rz/AL8mgDRorO/tgf8APlef9+TR/bA/58rz/vyaANGis7+2B/z5Xn/fk0f2wP8AnyvP+/JoA0aKzv7YH/Plef8Afk0f2wP+fK8/78mgDRorO/tgf8+V5/35NH9sD/nyvP8AvyaANGis7+2B/wA+V5/35NH9sD/nyvP+/JoA0aKzv7YH/Plef9+TR/bA/wCfK8/78mgDRopFbcoOCMjOD1paACiiigDz/wCPXw+v/ip8JfEXhbTJ7e2vtRhWOKW6LCNSHVuSAT0HpXy/+z7+w140+E3xg8O+LNU1jRLmw015mlitZJTI2+GSMbQ0YHVx3r7gorspYurRpypQ2Z59bA0cRWjXmveja3ydzzzUvC/iTw7481bxJ4Zg0/VYtatoIb2w1G6e1MUsO4JKkixyZBVyGUqPugg8kVgr8EJ9RuPBs2uHTNWNnql/q2swywloZpbmGVdsSMDlUd0A3YOEB68V7DRXGegeUWvwm1Sx8MjQILizTTLLxTbatpsSlgILGO6iuDBjbwQRKFA4wVGQOnb33jjTtP1J7GW21hplYIWg0S9liycdJEiKEc9QcCugooA8rg8J+OPCtvrei+GTpC6bqF7cXlnq11O4n04zuZJAYPKZZirs5X51B4B6czX3hPxd4a8UeI9T8NRaXqw8QQW/nSardPbtb3EUXlCTbHEwkQqEJUbDkHBwePTqKAPH9I+G/inwHrWnXXh+PSdXjtfDVroWdRvJLYiSFmPmYSKTcpyOMg+9Mt/gPc3cmhx6rrdz5dlJfapc3elXMlnPLqN1Judk2HKRqrSqBuyQ/PSvY6KAOD8G+D7nwF4BvdBlmW4s7Oa4FhJ5jSSfZmcuiyFh95dxXvwoOea7yqGuf8ge8/65mr9AFCx/5CGpf9dU/wDRa1fqhY/8hDUv+uqf+i1q/QAUUVy/iz4kaJ4H1LTrbXZ5NMt78lIdRnjItBJniN5fuox6jdgEA4PBoA6ikPPFcjJ8VNBk8ZxeFrCWbWNYz/pUemxGZLFSCQ1w4+WPOOATuORgV19AHBeHfgj4X8K6p9v0savaym6a9eMa5etDJMxyzvEZSjEnrkHNWpfhF4Zk0vTrBba8t49PMxtZrbUbiGeMSvvlUTLIH2swBKlsfKOOBXZ0UAcbqPwg8J6lpemac2mNa2emxPb2yWNzLbFYnxvjLRspZGwCysSCQCc11Wn6fbaTYW1jZQJbWltGsMMMa4WNFGFUDsABViigAooooAKKKKAGS/6t/oa/Cev3Yl/1b/Q1+E9fUZJ/y8+X6nxPEn/Ln/t79Aooor6g+KPqj/gnD/yXy/8A+wDcf+joK+xv2uP+SY2f/YUi/wDRctfHP/BOH/kvl/8A9gG4/wDR0FfY37XH/JMbP/sKRf8AouWvzDi/+BW/wn7RwD/vGH/xnx9RRRX85n9YBX17+yF/yTXUv+wvL/6Jhr5Cr69/ZC/5JrqX/YXl/wDRMNfWcM/8jBejPieMP+RVL/Ej3Giiiv18/CAooooAKKKKACvzW/4KVf8AJdNC/wCxbg/9Krqv0pr81v8AgpV/yXTQv+xbg/8ASq6r2cp/3lejPns+/wByfqj5Nooor7c/NQr9ovgV/wAkR+Hv/Yu6d/6TR1+LtftF8Cv+SI/D3/sXdO/9Jo6+czr+HD1Pr+HP4tT0X5nxb8SP+SieKf8AsK3X/o5q5yuj+JH/ACUTxT/2Fbr/ANHNXOV/J+I/jT9X+Z/b+F/3en6L8gr0L9n/AP5LD4b/AOu0n/op689r0L9n/wD5LD4b/wCu0n/op66Mv/3yj/ij+aObNP8AcK/+CX/pLPuyiiiv3s/mUKKKKACiiigAooooA8z8e/tAeHPh34gfR9TtdSluljWUtaxIyYYZHJcH9K19H+LWia18PdR8ZRpdQaRYxTSyrPGol2xLubADEH25ryD47fBLxZ46+IE2q6RZwz2bW8UYd7hEO5Qc8E12vw/+Et/D8D9W8F66FsrjUEuYWaJxJsEi4VgQcHHXHtXzWFxOPqZhUpVoWpK9nbz01Pr8bg8sp5XSr0Kl6ztdc1+muho2/wATPE+kXWhT+KvDFppOja1cR2kMtpqDXE9lLICY1uVMaqAThdyMwDEDpzVCP4teKI7hL+40PR/+Ebm12TQoJI9Rk+2mQXT2yuYjDtI3LuIDkhcnsabNonj/AMeT6DpHiPR9L0PS9Lvba+vdQstQNz9vaE7lSGMxq0amQIxLnIAwM9aoRfBAafbR+ILTRbVPGtt4ln1OO8R1WSW2kvpCyF84O62kYYPQ47ivpT5A6DwP448c69421jRdW0Xw/Z2ujvFHez2WpTyuTJCJE8tWgUMMEA7iuOcZrf8Ail4k1/wh4Rvta0Kw03Uf7Pglu7uLULqSD9zHGznyykb5f5cAHA96p6Lpdz4V8Y+Pde1CGT+ztRms5LZraNp5HCW6Rt+7jBb7wPb36VJ4qvk+IPgHxZo+jw3hvrrSrm2iW9sZ7RWeSJ1UBpUUHk9unegDAuPHnj+Obw1pkeheG21vWIbq72tqlwLaOCIQY+f7PuLkzdNuBt61i33xq8ZSaV4f1HS/DugyW+qan/YZFzqswK3glljZlKQMGh/dZDg5IP3a6TxV8LLfxp4j8FnW9JtdV0fTNOvIbmK62sqzOLYJhe/+rk5H9axYPhjrlj4W8IaFBbqbXw74rW5t906krpkbymHknJKo6Ljr8tAHrmltePp1s2oxwRX5jUzx2sjSRK+OQrMqkjPQkD6VaoooAxvFv/IFk/30/mK2F+6PpWP4t/5Asn++n8xWwv3R9KAFoopsjiONnOcKMnAyaAHUVgeHvHnh/wAVaLJq2mava3NhFnzpS+zyCOokDYKEejAGpPCXjLSPHOnzahol2L+wjne3F1Gp8uVlxlo2PDrk4DDIODgmgCLxj4F0fx5aWltrMM80VpcLdweRdy27JKudrho2U5Gcjng803TfAelaUtiIjfymyuGuoGutSuJ2EjRmM5MkhLLtY/KcqCc4zzXRUUAcjovwo8L+H9Xg1Kx05o57ZpHtY3uZZILVnBDmGJmKREgsDsUcEjvT/DPwv8NeD9UbUNK09oLnymt4zJcSypBEzBmjhR2KxIWAO1ABwPQV1dFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUleffH4kfCHxGRx+5X/wBGLXy1+zy7H4yeHAST883f/pjJXzuOzf6njKWE9nfntre1ru21j6vLsi/tDAVsd7Tl9nfS172V97q33H2L4v8AiJ4Y8AR2r+JNf0/RBdMVg+3XCxmTGM7QTkgZGT0GRmmeIPiV4U8K6TYapq/iLTdP02/ANpeTXKiKcFdwKNnDArzkdq4rTtS0/wAO/HbxnP4guLewe70qxbTbm+dUR7aPzvOVHbj5XO5hn+JSa4HS4ribW/hjJoT29lZ3PibXZ9JNzbM8K2rW9yUIjDIdjDcVwRgMvYYr6I+UPYtT+N3gDRrWxub3xlosFvfRmW1ma9jKToGKlkYHBAYEHHcEV1Gj63YeINJt9T027hvtPuE8yG5gcNHIvqCOor5/09Eh8Am3mGzWYviNZjVUT/VC5bUYGzEMDEbI0bAHn5jyep+jqAOD0347/DvWL5bKy8Z6LdXbBmEMV2jMQqlmOM9gpP4VLYfG/wCH+qWN9eWnjPRLi3sY/NuXjvYz5Kbgu9hnIXcyjPTJArirz/kePjl/2ALD/wBJ7usb4b293b+JPg/NrslvNDL4WeLSJrGJodshggZ45wzNvJjXKkFRlW+XpgA9f8I/E3wn4+muYvDniHT9bktlVplsp1kMYJIBOOmcH8q6avNP2dv+SWW//YU1b/05XNel0AUNc/5A95/1zNX6oa5/yB7z/rmav0AULH/kIal/11T/ANFrV+qFj/yENS/66p/6LWr9ABXF/EHwLqPxA8vS5Ndk0vwzLEyX9rZwr9ovMn7nmtny0x12jcc9RjntK4r4zalc6P8ADTW7u1uruzljSMGbT4nkuAhlRXEQQFg5UsFYD5SQeAM0AY3gz4Nv8KdTt4fBWpfYvCss2+80C/DTquVwXgmJ8xGyFyrFlPPSvTq+XLDxv4O8F+LPD9/4Z13xs8Ms7f2yuq2eq3cEsHltyySxMfNMnl4ZBwA2Tg4P0xpOqW+t6Za39oZGtbmNZYzLE8TFSMjKOAyn2IBoAt0V5NDeeLND8faBZX3ihNXvtUurh7zRILeJbazsVWUpKhCCUMGEKlnchmZgB3G747vtXvvGHhvwzpmrzaFHqFre3k99axRSTDyDAqookVlAJnyTjOFwMZzQB3lFeE+D/HHiz4l/2Now1v8AsDU4dJuLy8vbG2ikFxcR3TWyDbIrARkxuzKvJ3ABhjn0v4VeKrnxx8N/DmvXkaRXl/ZRzTLH90SEfNj2zmgDq6KRmCqWPQDJrO/4SGx/56Sf9+X/APiaANKis3/hIbH/AJ6Sf9+X/wDiaP8AhIbH/npJ/wB+X/8AiaAL8v8Aq3+hr8J6/ceXxDY+W/7yTof+WL//ABNfhxX1GSf8vPl+p8TxJ/y5/wC3v0CiiivqD4o+qP8AgnD/AMl8v/8AsA3H/o6Cvsb9rj/kmNn/ANhSL/0XLXxn/wAE67yKx+PF9JMSqf2HcD5VLc+dB2Ar7B/au1a2vvhtZxwuzONTiPzRsvHly+or8w4v/gVv8J+0cBf7xh/8Z8k0UUV/OZ/WAV9e/shf8k11L/sLy/8AomGvkKvrL9k3Vbax+HOoxzOysdVkYbY2bjyYfQe1fWcM/wDIwXoz4njD/kVS/wASPfKKzf8AhIbH/npJ/wB+X/8AiaP+Ehsf+ekn/fl//ia/Xz8INKis3/hIbH/npJ/35f8A+Jo/4SGx/wCekn/fl/8A4mgDSorN/wCEhsf+ekn/AH5f/wCJo/4SGx/56Sf9+X/+JoA0q/Nb/gpV/wAl00L/ALFuD/0quq/Rf/hIbH/npJ/35f8A+Jr84f8Ago/eRX3xw0OSFmZR4dgU7lK8/abr1HvXs5T/ALyvRnz2ff7k/VHypRRRX25+ahX7RfAr/kiPw9/7F3Tv/SaOvxdr9k/gdr1lD8Ffh+jSOGXw9p6n9y55FtH7V85nX8OHqfX8OfxanovzPjz4kf8AJRPFP/YVuv8A0c1c5XQ/ESRZviB4mdeVbVLphxjgytXPV/J+I/jT9X+Z/b+F/wB3p+i/IK9C/Z//AOSw+G/+u0n/AKKevPa774DXCWvxb8OyyEhFlfJAJ/5ZP2FdGX/75R/xR/NHNmn+4V/8Ev8A0ln3hRWb/wAJDY/89JP+/L//ABNH/CQWP/PST/vy/wD8TX72fzKaVFZv/CQWP/PST/vy/wD8TUkOtWdxKsccjl2OADE4/UigC9RRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBjeLf+QLJ/vp/MVsL90fSsfxb/AMgWT/fT+YrYX7o+lAC01ywRioDNjgE4BNOooA8M1b9nO4+IV3Nr3i/U7O28SMQYI9GsYxZxBSCizrIpa76c+YdvJ2hT8x9U8Ew69a6DHbeI004ahAxiEulhlgmjGNrhG5jJ7rlgMcHnA8e+J3xE0rT/AIrahovif4mXngbT7bT7eexttNeGMzs5kErSu8UhyNqYX5Rg5GfmxtfAv4z6B4oa68ODxrb+JtThvLhNPlkIF3c2iAEPIFVVLA7xkAZVVOOaAPZqKwfGOn6tqmnQQaVrH9hKZw15eoiNKluFYsIt6sgcnbywIA3d8Vh/BzXNU1/wrcXOo3kuqW4v7iPTtTnhWJ72zD4imKqApyM4YKAwAYDmgDuqK+e/DvxU8Q28Pgvxdf6lLfaT4smvkl0ZoohHZIkU00HksqBywWDa29mB3E4GBWv8P/GXiSPxB4BfV9YfVrbxrpdxfPavDGiWEyJFKqwlVDeXskZSHLHKg560Ae20UVVu9StbFlFxOkJYZAc4zQBaorO/4SHTf+f2H/vqj/hIdN/5/Yf++qANGis7/hIdN/5/Yf8Avqj/AISHTf8An9h/76oA0aKzv+Eh03/n9h/76o/4SHTf+f2H/vqgDRorO/4SHTf+f2H/AL6o/wCEh03/AJ/Yf++qANGis7/hIdN/5/Yf++qP+Eh03/n9h/76oA0aKzv+Eh03/n9h/wC+qP8AhIdN/wCf2H/vqgDRorO/4SHTf+f2H/vqj/hIdN/5/Yf++qANGis7/hIdN/5/Yf8Avqj/AISHTf8An9h/76oA0aKzv+Eh03/n9h/76o/4SHTf+f2H/vqgDRorO/4SHTf+f2H/AL6o/wCEh03/AJ/Yf++qANGis7/hIdN/5/Yf++qP+Eh03/n9h/76oA0aKzv+Eh03/n9h/wC+qP8AhIdN/wCf2H/vqgDRorO/4SHTf+f2H/vqj/hIdN/5/Yf++qANGis7/hIdN/5/Yf8Avqj/AISHTf8An9h/76oA0aKzv+Eh03/n9h/76o/4SHTf+f2H/vqgDRorO/4SHTf+f2H/AL6o/wCEh03/AJ/Yf++qANGikVgyhgcgjINLQAUUUUAMkiSaMpIiyIeqsMg1DFptpBIHjtYY3HRljAI/HFWaKVk9WUpNKyZXu9PtdQVVuraG5VTkCaMMAfbNSG3iZo2MaFov9WdoynGOPTjipKKZJD9jgO4mGMlnEjfIOWGMMfcYHPtWJfeCbPUNSe9e/wBYjkZg5jg1W4jiGMcCNXCgcdMV0NFAEX2aLdI3lJukAV22jLgdAfXqfzo+ywjygIkAi/1fyj5OMcenHFS0UAMhhjt02RIsaZJ2oABknJP5k0+iigChrn/IHvP+uZq/VDXP+QPef9czV+gChY/8hDUv+uqf+i1q/VCx/wCQhqX/AF1T/wBFrV+gArA8eTW8HhPUHu9avPD1uoXdqWnhTPD8642hkccnC8qeGP1rfrzP46w61p/hWfXtJ8U6noA09F86OxghljKNIitK4eNmxGpZiFIyFP1oA4rQ9c/4r7wpB4f+IfizxfFPeOl/Y3ltEIUh8mRhK8i2sYUB1Ubd2W3cdDn6Ar578P8Aixl8feD7XRfi5c+PUvp3F5pdutnJsgELt5zmKLKIrhFIJB+cYPGD9C0AcP4W+F3/AAier32oweJtavLi/uftN4bxbR2uOyoziAPsUfKqhhtHAxUt78NBfLpEz+JNaTVtL+0LDqytbm4aObG+NgYTGV+VMfJkeWvPXPZ0UAeexfBXTNN03SbXRtX1jQZtOsZdPW9sZYjPNFIQzmRpI3BbeN4YAEMTjGSK7Dw34fsfCfh/TtF0yLyNPsIEtoI85IRQAMnueOtaVFABRRRQAUUUUAMl/wBW/wBDX4T1+7Ev+rf6Gvwnr6jJP+Xny/U+J4k/5c/9vfoFFFFfUHxR9Uf8E4f+S+X/AP2Abj/0dBX2N+1x/wAkxs/+wpF/6Llr45/4Jw/8l8v/APsA3H/o6Cvsb9rj/kmNn/2FIv8A0XLX5hxf/Arf4T9o4B/3jD/4z4+ooor+cz+sAr69/ZC/5JrqX/YXl/8ARMNfIVfXv7IX/JNdS/7C8v8A6Jhr6zhn/kYL0Z8Txh/yKpf4ke40UUV+vn4QFFFFABRRRQAV+a3/AAUq/wCS6aF/2LcH/pVdV+lNfmt/wUq/5LpoX/Ytwf8ApVdV7OU/7yvRnz2ff7k/VHybRRRX25+ahX7RfAr/AJIj8Pf+xd07/wBJo6/F2v2i+BX/ACRH4e/9i7p3/pNHXzmdfw4ep9fw5/Fqei/M+LfiR/yUTxT/ANhW6/8ARzVzldH8SP8Akonin/sK3X/o5q5yv5PxH8afq/zP7fwv+70/RfkFehfs/wD/ACWHw3/12k/9FPXntehfs/8A/JYfDf8A12k/9FPXRl/++Uf8UfzRzZp/uFf/AAS/9JZ92UUUV+9n8yhRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAY3i3/kCyf76fzFbC/dH0rH8W/8AIFk/30/mK2F+6PpQAtFFNYFlYKdrY4OM4oA8j+J+upp/igxN8ZU8DHyUP9lNbWMhHX58zRs3P1xxWx8C/EV/4k8KahcXuvT+KEi1O4gtdYmtI7YXUK7drIsaKpUEld2OSrdsVxtxq3i2P4g6r4e1H4jaFpf2K0guUlvtEjR7jzS4+QNMAVTYAWBPLYwMAnvvg/4iv/EXh/UW1DVLfXJbPUp7NNSs7cQ29wibcNGAzAgZ2k5PzKw7UATfFT4e3PxK8PxaTD4gutAh89ZZzbQRTC4UA/upFkBDITglSMHGDkcVL4b8H65pNrp0WoeMb7V3tLlpmdrO2txNEYyiwMscYAVSdwIAOQOcV19FAHm3h/4I2Gh69ZXT6pd32kaXLczaTosyRiCxacMJMMqhnAV3VQ5O0OevBEvgn4N2/g/WrC+l1q+1eHSbSSw0e0uljCWEDspZQVUF2wiKGYk7Vx3Jr0SigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/IHvP8Armav1Q1z/kD3n/XM1foAoWP/ACENS/66p/6LWr9ULH/kIal/11T/ANFrV+gArl/ib4gvfCvgfVNUsNq3MAj/AHskZkWBGkVZJio5YRoWfHfbXUVy3xO1y+8N+BdU1HT+LmER5k8kzeTG0irJLsH3tiFnx320AeN/Cz4gxnWtD0vRvFWk+KtSn1i+tdTXT7S1WS4s1DmO7c26gR7SqAE/K3mkcnBH0bXzp8MPHkLa5oel6D4m0/xLfyaze2upLY2drumsVEhjuZWt0UR7SqBTwH8wjBOCPougAorzfw/428UXnxi1Hw5q+nWOnaSmlNfWawTmeaTFwYxI7bVC7l52AHH941F8YPGmseGtT8PWOlaiukrfLdSTXR0SfVmAiWMhRDCwYZ3klugC0Aem0V5NH4813xVNoek+E9d0i8uptE/tmbWJrBzBdfOI0RIhKDEGbfuJZim0DBJOO2+HPi0+PPAeg+ITb/ZX1KzjuHgzkRsR8yg9wDmgDo6KSmfaIv8Anqn/AH0KAJKKj+0Rf89U/wC+hR9oi/56p/30KAFl/wBW/wBDX4T1+6ktxF5b/vU6H+IV+FdfUZJ/y8+X6nxPEn/Ln/t79Aooor6g+KPqj/gnD/yXy/8A+wDcf+joK+xv2uP+SY2f/YUi/wDRctfHH/BON1j+Pl+WYKP7CuOpx/y2gr7E/a2mST4Z2YV1Y/2pF0Of+WctfmHGH8Ct/hP2jgH/AHjD/wCM+QaKKK/nM/rAK+vf2Qv+Sa6l/wBheX/0TDXyFX11+yLMkfw21IM6qf7Wk6nH/LGGvrOGf+RgvRnxPGH/ACKpf4ke6UVH9oi/56p/30KPtEX/AD1T/voV+vn4QSUVH9oi/wCeqf8AfQo+0Rf89U/76FAElFR/aIv+eqf99Cj7RF/z1T/voUASV+a3/BSr/kumhf8AYtwf+lV1X6S/aIv+eqf99CvzZ/4KUOsnx00IqwYf8I5B0Of+Xq6r2cp/3lejPns+/wByfqj5Oooor7c/NQr9ovgV/wAkR+Hv/Yu6d/6TR1+Ltfs/8C7iJfgj8PQZFB/4R7T/AOIf8+0dfOZ1/Dh6n1/Dn8Wp6L8z4w+JH/JRPFP/AGFbr/0c1c5XR/EchviH4oI5H9q3X/o5q5yv5PxH8afq/wAz+38L/u9P0X5BXoX7P/8AyWHw3/12k/8ART157XoPwBYL8YPDZJAHnScn/rk9dGX/AO+Uf8UfzRzZp/uFf/BL/wBJZ920VH9oi/56p/30KPtEX/PVP++hX72fzKSUVH9oi/56p/30KFnjYgCRSfQEUASUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAY3i3/kCyf76fzFbC/dH0rH8W/wDIFk/30/mK2F+6PpQAtFFFAHzz8ePElpceMrfTNduPDVro+mT6bKthr2nQ3EmpJPcCKZ43mO1FjQkkoCwwSSBivTvhH4gOv+H78xPaXGm2eoz2mn3lhCIoLi2UgoyKDjC5MeRwTGSOteZ/HjxpBb+LINM13V9G0fR7C402eKx1exglGqRzXAiuGEk4KqsSEk+WAwwSTjFem/CXxJJ4m8P386zw32mwajPb6bfW8QjiubVSNjIBwVUloww4by8jrQB29FYnjLVdV0bw/c3Wi6XHq2orxHBNcCCNfV3Ygnao5IUEnoBR4J16XxR4L0DWZ4khm1HT7e8eOMkqjSRq5Az2BNAG3RXk3gP43WPi3VtQjl8QaGq2zXb/ANlwxv8Aa1hhkdQxYvtY7VDMFXI3DpUngn4qazq2teEodZsbK3s/FmnTahp32Uv5luYxG/lSkkhyY5N24bcFSMHrQB6rRRSZoAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWik3D1o3D1oAWiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/ACB7z/rmav1Q1z/kD3n/AFzNX6AKFj/yENS/66p/6LWr9ULH/kIal/11T/0WtX6ACuO+L3iW58H/AA71jVrW9ttNlt1jBvbwBo4FaVEeQqSAxVWYhf4iAO9djXnfjzxlPo3jjQNMl1Wx0TRPslzqeoXF9GrfaY4ii+QhYgKf3m8nkgKMDk0AeZWXxQ8N+AfFXh0WPxe0jxLZ6ldPBqcN5Lp0aQwiF384SwJHsIdVXa+d2/A6E19EadqVrq9jb3tjcw3lncIJIbiBw8cikZDKw4IPqK8H8L+LPFP9reBdf1PUrObS/Gd7JFH4ZfTo45bK2aGWWGRZAdzMojTeSMHzO3Br6AVQihVAUDoAKAOEj8B68vxVPi59e09rT7EdO/s5dKcSeR5hkH777QRv3Y+bZjH8I61ZuPDfjNrHTjb+MbRNSg85bmSbRg9vcq5yh8oTKyMgwARJg85U5GOzooA8psfgrqPhmz0l/DPiaPTtYtbGexub+900XK3Ilk84uIxImxlkLMvJADEEHrXe+DfCtp4H8KaToFi0j2mm2yW0bynLsFGNze56n61s0UAJUf2WH/njH/3yKlooAi+yw/8APGP/AL5FH2WH/njH/wB8ipaKAIJbWHy3/cx9D/CK/Cuv3Yl/1b/Q1+E9fUZJ/wAvPl+p8TxJ/wAuf+3v0CiiivqD4o+pv+CccayfHu/DKGH9hXHDDP8Ay2gr7F/a2hjj+GdmVjVT/akXKgD/AJZy18ef8E4f+S+X/wD2Abj/ANHQV9jftcf8kxs/+wpF/wCi5a/MOL/4Fb/CftHAP+8Yf/GfH1FFFfzmf1gFfXX7IsMcnw31Iuisf7Wk5YA/8sYa+Ra+vf2Qv+Sa6l/2F5f/AETDX1nDP/IwXoz4njD/AJFUv8SPbPssP/PGP/vkUfZYf+eMf/fIqWiv18/CCL7LD/zxj/75FH2WH/njH/3yKlooAi+yw/8APGP/AL5FH2WH/njH/wB8ipaKAIvssP8Azxj/AO+RX5s/8FJ41j+OehBFCj/hHIOFGP8Al6uq/SuvzW/4KVf8l00L/sW4P/Sq6r2cp/3lejPns+/3J+qPk2iiivtz81Cv2e+BdvE3wR+HpMSEnw9p+TtH/PtHX4w1+0XwK/5Ij8Pf+xd07/0mjr5zOv4cPU+v4c/i1PRfmfFnxHAX4h+KABgDVLr/ANHNXO10fxI/5KJ4p/7Ct1/6Oaucr+T8R/Gn6v8AM/t/C/7vT9F+QV6D8AVDfGDw2GAYedJwR/0yevPq9C/Z/wD+Sw+G/wDrtJ/6KeujL/8AfKP+KP5o5s0/3Cv/AIJf+ks+5/ssP/PGP/vkUfZYf+eMf/fIqWiv3s/mUi+yw/8APGP/AL5FKtvErArGgPqFFSUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGN4t/5Asn++n8xWwv3R9Kx/Fv/ACBZP99P5ithfuj6UALRRUdxN5EEku0vsUttXqcDOKAPGviFrq618Qr/AMNat4x0nwfpdnZ293arcQWks127tIHbNyGRQu1RtC7vnByARnoPhB8RLbxJZ3el3XiHRtW1CzvZrW1l0+WJGvLeMDEohVjt53KccHZuHBFcZb+N/Evjrw74RtrC90GDxL4qeS+F1HZi5XStORNxDIzfPIGKRkkgb3bgY47z4P6jd6pperQaqbK91TRtUn0uTU7O1WBLvYFYSBATtOH2sASNyNQB0njSDXbrw3dw+G/7O/tWQbE/tQyCDaeGzs+bOM496534c+H/ABl4a8I+GNH1W40MvpgW0uWslmYS2scGyPaWxiTeFJyMYziu+ooA8mh+FviXWtb0SPxPqml3+iaDcT3FrNZ2xhu7wyQyQ7ZlUCOMBZmz5f3iAcL0pfAfwo13Rde8Oz69qVhead4VsZtP0dbOJ1lmWQIpln3cBhHGq7VyCSxyOlesUUAFQXFjb3ZBmgjlK9N6g4qeigCn/Y9h/wA+cH/fsUf2PYf8+cH/AH7FXKKAKf8AY9h/z5wf9+xR/Y9h/wA+cH/fsVcooAp/2PYf8+cH/fsUf2PYf8+cH/fsVcooAp/2PYf8+cH/AH7FH9j2H/PnB/37FXKKAKf9j2H/AD5wf9+xR/Y9h/z5wf8AfsV5df8A7UXgvTb64tJv7R82CRon22wI3KSDj5vUVB/w1d4H/wCol/4DD/4qvJebYBOzrR+89xZHmcldYeX3HrH9j2H/AD5wf9+xR/Y9h/z5wf8AfsV5P/w1d4H/AOol/wCAw/8Aiq2PCH7QnhTxt4itNF037d9tut3l+dAFX5UZzk7vRTVQzTA1JKEK0W3otSKmTZjSg6k6Eklq3boj0D+x7D/nzg/79ij+x7D/AJ84P+/Yq5RXqHjFP+x7D/nzg/79ij+x7D/nzg/79irlFAFP+x7D/nzg/wC/Yo/sew/584P+/Yq5RQBT/sew/wCfOD/v2KP7HsP+fOD/AL9irlFAFP8Asew/584P+/Yo/sew/wCfOD/v2KuUUAU/7HsP+fOD/v2KP7HsP+fOD/v2KuUUAU/7HsP+fOD/AL9ij+x7D/nzg/79ii81iw02RUu762tXYZCzSqhI9eTVf/hKdF/6C9h/4Ep/jWbqQTs2jVUqkldRf3Fj+x7D/nzg/wC/Yo/sew/584P+/Yqv/wAJTov/AEF7D/wJT/Gj/hKdF/6C9h/4Ep/jR7WH8yH7Gp/K/uLH9j2H/PnB/wB+xR/Y9h/z5wf9+xVpWWRQykMrDIYHIIp1aGJT/sew/wCfOD/v2KP7HsP+fOD/AL9irlFACKoVQAMAcAUtFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAFDXP+QPef9czV+qGuf8ge8/65mr9AFCx/5CGpf9dU/wDRa1fqhY/8hDUv+uqf+i1q/QAVieKPBPh/xtDbw+INEsNait38yJL63SYI3qNwOP61t0UAZLeFdHk8RQ6+2m2zazDbfZIr5owZY4ck7FPYZJ6eta1FFABRXEaP8SJdW+Jl/wCE30O7sYrfT/t0d/dMq/aAJfLOyPkhc9GOM4PGME2vGnjDUND1TSNI0XSYtY1jUlnmjiubs20SRQhd7M4RzndJGoAXkv2AJoA62ivK7P40X3ibTtGbwv4bGpare2E2oXFhe3wtfsqxP5bRlwj7nMu5F4AO0kkCu78G+KbTxv4V0nX7ASLZ6lbJcxLKAHUMM7WA7jofpQBs0UlVP7Z0/wD5/rb/AL/L/jQUouWyLlFU/wC2LD/n9t/+/q/40v8Aa1j/AM/lv/39X/GldD5JdixL/q3+hr8J6/c+TVLLy3/0yDof+Wq/41+F+4eor6jJGv3ny/U+J4khL9zp/N+gtFJuHrRuHrX1HMu58VyS7H1T/wAE4f8Akvl//wBgG4/9HQV9jftcf8kxs/8AsKRf+i5a+Nv+Ccs0cPx7v2d1Qf2FcDLED/ltBX2J+1tdwP8ADOzCzRsf7Ui6MP8AnnLX5hxf/Arf4T9n4Ci1iMPdfbPkOim+cn99fzo81P76/nX86n9Xjq+vf2Qv+Sa6l/2F5f8A0TDXyB5if3l/Ovrz9kOaNfhrqWXUf8TaXqR/zxhr6vhn/kYL0Z8Txh/yK5f4ke6UVH58f/PRf++hS+dH/wA9F/MV+vn4RZj6KZ50f99fzFHnR/31/MUBZj6KZ5yf31/OjzE/vr+dAWY+vzW/4KVf8l00L/sW4P8A0quq/SfzE/vL+dfmv/wUpYN8dNCwc/8AFOQdP+vq6r2cp/3lejPnc+/3J+qPk6iiivtz82Cv2i+BX/JEfh7/ANi7p3/pNHX4u1+0XwKYf8KR+HvP/Mu6d/6TR185nX8OHqfXcOfxanovzPi34kf8lE8U/wDYVuv/AEc1c5XR/Eg/8XE8U/8AYVuv/RzVzma/lDEfxp+r/M/uDC/7vT9F+QV6F+z/AP8AJYfDf/XaT/0U9ee5r0L9n8/8Xh8N/wDXaT/0U9b5f/vlH/FH80cuaf7hX/wS/wDSWfdlFJmjNfvZ/MotFJmjNAC0UmaKAPM/2kNEj1X4M+K7s3mpWF1pOm3Wo2k+mahPZus0dvIUJaJ1LKCc7WypwMjivFfhv481zwT8Ofg5onhuV9T8XfEO2N1Pqniq/ur+3tzFAryPsaXdltwARGUdSa+qNf0Ox8UaHqOj6nB9p03ULeS1uYd7J5kTqVddykEZBIyCDXIap8CfA+seFvD/AIeuNFYab4fCjSvs97cQz2eF2/JOkglGV4Pz8981w1aM5T54aO36/wCWh9Tl+Z4ejhVhcUnJczfdfC0tLq9p2lbZ23PEdH/am8b+Nr74a6ToGj6BZax4iu9b03UjqRnkt4ZrDyv3kJRgxQh2O1hk8DcvLV3/AMM/ip47+J3ibVr6ysvDdt4N03XbrQpre5lnTUG8htjTo4DISW6RFRx/HXZ6P8D/AAR4fuPCc+n6FHay+FRdDSGjnl/0f7QAJyfn/eM+AS0m45yc5JqrD+z74At/GX/CUx6AE1j7adTBF3P9nF2Rg3At/M8kSn+/s3Z5zmojSxCacpX2/JeXe/qdGIx2T1IyjRoOOkrOyernJr7a+y4q924tPR7v58/Z9+NnjbR0+Hmm6w9lrGgeKNe1fTFubiaeXUY3ieV0cuzbdg27NmDgAEN/CND46SeJNY/ash0TS9F8Q+LLBPBUN6dG0fxS+ipFJ9umQ3BYSoGOMJjk8j0r3nS/gT4G0VfDy2Wh+SPD99cajpv+lzt5FxNu81+XO7dvbhsgZ4ApvjX4D+C/iB4qj8SazYXx1yOyXTlvbDWL2xf7OsjSCM+RMgI3ux5GefYVn9WreyVNyvqvy16Pr5Hb/bWXLHyxdOk4pxmnp1cm4uynH7Nk7SWp5L8YP2j9X+D90lvpSaTf6fokVhDqekzR3t5fxtMVG2W7X9zCwVlIMhcycnjvW8cfH7xvqHxgufAnh97HSjba9Z6bNbxWEl1qgsnSOWS/DFvJWEBtpDISAeobAr1PXf2Z/hz4luL2bUdCnuHvltluwNVvEW58hQsLSqsoDuoUDewLHHJNed6x+zf43uvihqmuad4qtNMs9Q1lNVOpWlxdwXUMSlc2v2RHFtLlUCmWT5iDkg4FKpDEp6PRvp21/wCB5GmCxORziuaNpxi7uaVnJ8ltFzXSfPv7zTVr7LN+Gf7WHin4ga/aTp4WZ/DWozXsETW2lXu7TxCJPLlnumHkSq5TaVTaULDlua9N/Zo+IXi74sfDPTvF3imHRbVNUiWWzt9HSYFQGdXMnmMeSVGAOgzkmtbRP2f/AAH4c1xtW03RJLW53zyrEt/cm2jeYESsluZPKQsCeVQHk4xXUeC/BmjfD3wvp/h3w/Z/2fo1ghjtrbzXk2KWLEbnJY8k9Sa6KNOvGSdWV9/0/wCCePmWNyutSlDA0HFtxs2lolzXV7vXWCvbVJ311e3RRRXcfKmN4t/5Asn++n8xWwv3R9Kx/Fv/ACBZP99P5ithfuj6UALRRRQBzOkfDPwpoF9qt5pvh7T7C61UMt7Nb26o04P3gxHY5yfU81p+G/DOleD9FttI0Wwg0zTbcERWtum1FySTx7kk/jWnRQAUVj+KvEieFdGl1B7G+1IqQq22nwGaV2PTgcKPVmIUdSRUnhfXovFPhnSNahieCHUbOG8SOTG5FkQOAccZANAGpRXm+g/G7T9a1jTIn065stH1iW4h0nV5XQxXbQB2kyoO6MFY3ZS3UKehwDP4P+L9v4q1fS7OXSLzSodatJL/AEe5uHQi8hTZuJVTlGxIrBT/AAnPXIoA9BooqtcalaWcmye6hhfGdskgU49cE0DSb0RZoqkutae5wt9bMfaZT/WnjVLMnAu4Cf8Arov+NK6K5Jdi1Xhv7Rn7U+m/s63+iWt/oN1rDapFLIjW86xhNhUEHIOc7v0r2v7db/8APeL/AL7FfA3/AAU4kWTxB4CKMGH2W75U5/jjr0MDShXxEac9nf8AI8nNK1XC4WVWno1b8zrP+HnHh7/oSNT/APAyP/4mj/h5x4e/6EjU/wDwMj/+Jr8+KK+r/svCfy/iz4X+2sd/N+C/yP0H/wCHnHh7/oSNT/8AAyP/AOJr688H+Io/F/hTR9ciha3i1K0iu1hc5KB0DBSe5Ga/Dyv2m+CjD/hT/gnn/mDWn/ola8XM8JRw0IukrXPosmx+Ixk5xrO9l2Phzxb/AMjVrP8A1+zf+jGrJrW8W/8AI1az/wBfs3/oxqya/lmr/El6s/tKj/Cj6IK9K/Zx/wCSz+Hfrcf+k8tea16V+zj/AMln8O/W4/8ASeWuzLf99of44/mjgzb/AJF+I/wS/wDSWfclFFFfvR/M4UUUUAFFFFABXMfEr4haZ8KvA+qeKtZS4k0zTlRpltEDykNIqDaCQDy47109eI/tq/8AJsPjj/rlbf8ApVDW1GKqVYwezaRz4mo6VCdSO6Tf3I4b/h458K/+fLxJ/wCAUX/x6j/h458K/wDny8Sf+AUX/wAer8y6K+w/sjDef3n5/wD29jPL7j9NP+Hjnwr/AOfLxJ/4BRf/AB6vXPgj+0B4a+Punape+GodQhh06VIZhqEKxkswJG3a7ZGBX431+g3/AATH/wCRP8cf9f8Ab/8Aotq4Mdl9DD0HUhe6seplubYnFYmNKpazv08jov2wP+R10X/rw/8AajV4JXvf7YH/ACOui/8AXh/7UavBK/mrPP8AkY1vX9Ef19w7/wAiqh6fqwooorwj6M/Rvwz/AMi3pP8A16Rf+gCtOszwz/yLek/9ekX/AKAK06/oen8EfQ/ler/El6sKKKK0MjxD9rTx544+FPw0ufGXhHUdKt4NLMS3dnqNi07XBmuIYUKMJF2bfMYnIOeOlHib9oA/BuCLRPFlpqXjPxJY6XJrWsXnhfTkjt7SyErKszpLMCowCMAsTsY4Arsfjt8K/wDhdnws1nwadT/sb+0Wtm+2/Z/P8vyriKb7m5c58vb94YznnGK5P4vfs96h8RPEmtato3i1fDh17w83hrVoJtM+2LNbF3YPGfNj8uQCSRcncMN0yK8+rGspylT7L9b6P5H1+X1ssqYajQxtk1Kd3ytO1oct5RTb3qWtfVK+ljYh/aI8OXfiu90a2stUubax0uDWbrWYoFNjBaSwSzJK77sjiEjG3JLDAIDFeJ8UftSXd98HfF3inQPCHiLQbiw0RdZ0u+8RacosryNz8hV45GBOMNsYq2GBxXX+Hv2etM0G48WxDUJJ9I1/w7Y+HDZeVteCG2hmi3eZuO4ss390Y2988c0P2b/FeofCvVvAOs/Esahok2gxaDp8UOhRwLarHgLPJ+9ZpZNgVeGReM4yc1MvrLVvXa3y6/11NqP9hxqKV9E6fxc2q0c9FFq97pp2Vvhud18N/jXpnxJ8T+I9Ah0fWdE1PQxDJLDrFssLTQzBjFKgDsdrBc4YKwzyAcgeX2fxT+LvxOuPGWtfD6Dw6NI8N6zNpFroupQOZ9WaEqJWNx5qrFncdo24BHJNer+GfhZ/wjnxW8V+Nf7T+0f27ZWVn9h+z7fI+zqw3b9x3bt3TaMY7157qf7M/iG0k8Yaf4S+Iknhfwz4q1BtRvrNdL867gkcjzvs1wJl8sPtxyjFexqpqu4q9+u1k/IzwtTK6dabi4pONO3OpSinp7RWSvfez7Xs07M1/GP7Umg+BNfvdK1PQddn/siGyk13UbCCKW00g3RxGszmQMfUlFbA5qr4l/a28N+F/EmuaVceHPE1zBomqW+k6hqdpZxy20MswXyjxJvYMXAwqls9umc/xz+ynceKtV8VLYeMn0rw/wCLINNt9b0+XTvtM8y2mApinMq+WXQBWJR+561e1v8AZhGsP45b/hJPJ/4SfxBp2u/8eO77N9leJvK/1g37/L+9xjPQ4qJPF3dl+Xn/AMD8TenDh9Rg6km3ZX+Ld+zu9t1epa11ZR3e8y/tX+Ho9E1C9vPDfiXT7yw1618O3OkT2kRvEuLgKYmCJKwZSGHQlvRTxVS+/aattR0/RZbGw1Lw/ft4xtfC+o6ZrFhHJcQySYOCEnCqGVlIkDPjP3TXM/Gb4F61YzX2qaBeXOoX3iTx9oerkW2nmQ6WkJSNpWG4h1XZvJIUAcH1roY/2XLu7MV7q3i9L7XJPGdr4vvLyHS/JimMCqqWyRec2xdqAbizHvg1PNinJxtt6ef/AADeNHIoU413K3N09562jdWtsm5avV2Vrq7IvBv7TjR+AtJ1DxBp1zrPiLWNf1HSNO0vQoUEs4t5pRn95IqgLHHlmLCvYPh548034meD7DxHpSzx2d35i+TdIEmhkjkaOSN1BIDK6MpwSMjgkV4hefsdm50/TbU+J7K6j0fX7zWtMi1LQxcwpHdFzLb3EZmAmGXyrgoRgcGvavhn4Lb4f+C9P0OS5tb2a33tJcWenx2MLszsx2wR/KgGcdSeMkkkmt8P7fmtVWlvLfT/AIJ5ebLKZUnPAyvNyelpJKN5WtdJWty+d77rbqKKKK7j5Qoa5/yB7z/rmav1Q1z/AJA95/1zNX6AKFj/AMhDUv8Arqn/AKLWr9ULH/kIal/11T/0WtX6ACiiigAooooA8sh03xe3x2OvSeHbVPD39mnSftY1NTLtEplE3lbO/C7c8ZzntRrmm+O57jwx4pg0XSbnxFpqX1ncaR/aDRQvDO8e11mMbfMvkRkgrzuYZ6V6nRQB4fo/w18Z/DmDQtS0O30vX9e/syfT9Sjubt7aBZZZzcCaNtjFkWR5AVIBYEHgjFem/DnwmfAngPQfD7Ti5fTrOO3eYAgOwHzMPYnNdHRQBHNClxDJFIN0cilWGcZBGDXz9J+wD8B5GLHwMQScnGr33/x+voWisqlGnVt7SKfqrnoYTMcbgOb6pWlT5t+WTje217NXPnZv+CffwIP/ADJcg+mr3v8A8eqNv+Ce/wACj/zJ8w+mrXn/AMdr6NorH6nhv+fa+5Hpf6x51/0GVf8AwOX+Z83S/wDBPX4GeWxHhS4Bx/0Frv8A+OV+U39iWf8AzzP/AH0a/eeX/Vv9DX4T19FlGW4Kr7T2lGLtbdLzPks/4s4goey9lj6sb32qS8vMof2Haf8APM/99Gj+w7T+43/fRq/RX0P9j5d/0Dw/8BR8n/rrxN/0Ma3/AIMl/mezfsb/AAI8JfGf4r3eg+J7W4uNOj0qa7VILhom8xZIlByO2Hbivon9oL9hv4VfD3wTbano2nahDdPfRwFpL93G0pITwfdRXnH/AATh/wCS+X//AGAbj/0dBX2N+1x/yTGz/wCwpF/6Llr874owWGwtGq6FNRaj0Vj9a4L4nzvGV6EcTjKk052d5N6fefnx/wAKD8Jf88Lr/wACDSf8KB8J/wDPK7H/AG8H/CvR6K/CfrFb+Zn9L/XcT/z8f3nm3/DP/hP+5eD/ALeP/rV798Af2Efhj8SPBt5qerrrH2mK/e2X7PehF2CONhxsPOXNcVX17+yF/wAk11L/ALC8v/omGvpOH5PEY5U63vKz0Z8nxRm2PwmXOrQrSjK61TOE/wCHanwfHRvEI+mpD/4ilH/BNj4RL0l8SD/uJj/4ivquiv1D6jhv+faPxv8A1qzz/oLn958rD/gm58JV6XPiYfTVB/8AEU7/AIdw/CkdL3xQPpqo/wDiK+p6KPqOG/59oX+tOd/9Bc/vPlr/AIdy/CwdNQ8VD6at/wDYU4f8E6fhgvTVPFg+mr//AGFfUdFP6lhv5EL/AFozr/oKn958uj/gnb8Mx01fxeP+4x/9hXxl+2h8CfD/AMGfijpeiaHeardWk+jRXjPqV358gdp50IDYGFxGOPUmv1vr81v+ClX/ACXTQv8AsW4P/Sq6r1csy7CVMQozppqzPCznizPaOEcqeLmnddT49/sWD+/L/wB90f2LB/fl/wC+60KK+v8A7Hy//nyj8/8A9duI/wDoNn95Q/sWH/npN/33X6U/Cv8AYe8F+JPhj4Q1i48S+NILjUNHs7uSO21opErSQIxCLs4UE8DsK/OWv2i+BX/JEfh7/wBi7p3/AKTR14WbZZg6UIOFJLU+nyHi7Pa9Saq4ub07n5xeNPgJomk+MNdsYdW154rW/nhR5b/c7BZGUFjt5PHJrG/4UrpP/QV1r/wN/wDrV7J8SP8Akonin/sK3X/o5q5yv5pr1qkas0n1f5n9f4bG4iVCEnN3aX5Hnv8AwpTSv+gtrX/gZ/8AWrrfhL+zno3in4iaLpc+v+IbaK5kZWltb/ZIuI2Pynbx0rVr0L9n/wD5LD4b/wCu0n/op62wNac8VSjJ6OS/NHPmOPxNPBVpwm01GT/Bnph/YD8KdvHXjwf9xr/7Cmn9gLwx28e+PB/3Gf8A7GvqKiv2n6lh/wCQ/n3/AFmzj/oIf4f5Hy2f2AfDfb4gePB/3GP/ALGmH/gn/wCHu3xD8eD/ALi//wBjX1PRR9Sw/wDIP/WfOP8AoIf3L/I+Vj/wT/0Ht8R/Hg/7i3/2NLH+wDosMySD4k+OztYNg6pkHB/3a+qKKX1HD/yD/wBaM4/6CH9y/wAgrA+IGtXPhvwH4k1ez2fa9P025uofMGV3pEzLkdxkCt+sjxhoH/CV+Etb0Tz/ALL/AGlYz2fn7N/l+ZGybtuRnG7OMjOOtdkr8rtufO0HBVYOp8N1f0vqfLv7Nv7UPib4lfEDTNG1DWdL8SWN14a/tm+FlpMtjPp9yCoNugdz9oA3Y3IMcivUIf2rPC9vH4i/tjSNd8PT6LpI1yW11G3i86W0L7AypHI5VtxAKSbGXPzAYNcn4a/ZD1i1k0b/AISH4jSarHoPh268O6MdL0hdPktEmhEJlaTzZDIyqAR0wQD65sfD/wDZJvPAeqXuoxeLNNa8m8Mjw6ptPDcUEZIcN9olRpXEztj5w33sk5FeTT+txik199u3r38z7/HPh3EVZ1ITSVlZRUlrzO+vIl8Nr+5p0u1d9hdftLaDpng99f1TRNc0vdfwabaWVxBE0l9PMMxrBIkrQuCASSJMLtO7Fc7qX7Sy69qvgKLw0htP7Q8Wt4d1yw1KJGuLVlt3lKZR2TPCEOrMpB4NYX/DEllceFtbsLvxBafbr7WrXW7VLPRI4dKs5YEZAosDIysrh33jeN2V6Y53v+GXLmLQ/DaWXiPSdJ13RfEI15LzS/DEFpaP+7aLyfs0br/C332dmz7YAu+Le6/Lv/l8jmUOHqd3Gpd3e6lbWOmlnopP/FdX2HaH+05ZWlkPt1prHiK8vfFd94csodM0uOKQSQqXCFftDblABHmZXPVlQAmr9v8AtZ+E7/w3oGo2Wmazd6hrVzeWltoYihjvFktf+PjeXlWNQny87+dygZJxUPhv9mP/AIR7V9Gvv+El+0f2d4uv/FXl/YNvmfaYWi8jPmHG3dnfznGNo61zM37FdsdI0dE8QWF1q2larqeoQTatoMd7ZvHesGeGS2eXDFSqlXDDBBOOcUf7Wlovy8v+CHLw9Un702vO0rfb0sktLqF7a6u3l7t8PPH+j/FDwfp3ibQZ2n0y+VmjMibHVlYqyMvZlZSD9O4ro65r4ceDF+H/AIM03QhLa3D2qHzJrOwisYpHJLMywRAIgJJ4Gfck5J6WvShzcq5tz4vEqkq81Q+C7t6X06Lp5L0CiiirOYxvFv8AyBZP99P5ithfuj6Vj+Lf+QLJ/vp/MVsL90fSgBaKKKACiiigDF8YXd/Z+Hbx9M0mbWrxk8tLOCaOJm3cZ3SMqjGc8mua+GEniLSPAvhHSdR8MTWFzaQRadeeZeQP5KxW4HnDYzBlZ1ChQcjdk8Cu/ooA8A8O/CPxFJB4M8K6lYraaH4UmvHOrC4jb7ckkM0MIjQEuhCz5beByuBnOa1vAPgXxP8A294EGt6XHplp4M0u4sVuluUkF/K6RxK8aqSVTZGzHftOWAxxmvaaKACvGvi9+yX8P/jh4pi8QeKLfUJdRjtUtFa1vXhTy1ZmHyjvl25r2Wis6lOFVcs1dHZhMZicDU9thajhLa6dmfMi/wDBO34PRnKWutofVdVkFO/4d5/CMciLXgfUavJX0zRXP9Tw3/Ptfcez/rNnX/QXP/wJnzT/AMO+fhR/1MX/AIOZa+Sv24v2evCnwY1jwlb+Hn1No763uHlF9fPPgqyAbd3QfMa/UuvgD/gp3/yMXgL/AK9bv/0OOvSy7AYWpiYxlTTWvTyPFzfijO6eDnOGLmnp9p90fCn9i2/rJ/33Sf2LB/el/wC+60KK+y/sfL/+fMfuPz5ca8Rr/mOqf+BGf/YkH9+X/vqv0e+G37A/gjxN8PfDWr3HibxhDPfadb3MkcGqKsas8asQo8vgZPAr88a/ab4J/wDJHvBP/YGtP/RK18/m2V4KlCDhSS1PrMh4xz+tUqKpjJuyW7PzK8Qfs96LYa9qVtHrGtmOG5kjUtdKSQrEAk7evFUV+BOlx/c1rW19cXSj/wBlr2Dxb/yNWs/9fs3/AKMasmv5pqYiqpySl1P7Ao47Eypxbm9kecL8D9PU5XX9dU+oux/8TXbfBv8AZzsPFnxI0fSpfF3iqwS4Mubix1ARzJthdvlbYcZ24+hNaNelfs4/8ln8O/W4/wDSeWurAVp1MXRhJ3TlH80cOZ5hiaeBrzjPVQk1t2Z6Z/ww3Yf9Fd+KX/hQj/41U0f7FMcKhI/jP8WI0HRV8SYA/wDIdfSVFftH1Oh/L+Z/Pf8ArHmr3rfhH/I+abj9iy4O37P8cfitF/e8zxCXz6dFFV5v2KtW8s+T8efihHJ2Z9bdgPwBH86+nqKPqdD+X8X/AJjXEmaL/l7/AOSw/wDkT5Y/4Yp8S/8ARwfxJ/8ABpL/APHKqt+xP42ycftHfEEDsDez/wDx+vrGip+pUO34v/M1XFGar/l4v/AKf/yJ8jTfsTfETf8AuP2l/Hccf9157lj+Yuh/KvNv2hv2V/HvgH4MeJtb1L49eKvFWn2scJm0jUTM0NwDPGoB3XLgYLBvunlRX6BV4j+2r/ybD44/65W3/pVDW+Hy/DyrQTT1a+1Lv6mGL4szeOGqNTjpF/8ALqk+nnA/ID+y7n/n/k/L/wCvS/2bdf8AP+//AHz/APXrRor7z+xsGtlL/wADn/8AJH5Y+Ns6lvKm/wDuBQ/+VlD+z7r/AJ/2/wC+B/jX1z+xF8KfiD448O+KJvCfxZuvAkFvdwpPBDpEd39oYoxDEtIu3A4x718q1+g3/BMf/kT/ABx/1/2//otq8/Mcsw1PDSlHm6fbm+vnI9TKeLM0rYyMJqlbX/mHoLp5Uzzz9oj4SfEzwz4k02DWPjDceJriS03pPNokUHlrvYbcCRs88/jXk58FeN+3jwk++nR/419e/tgf8jrov/Xh/wC1GrwSv5wzibpY+rCOyfVX6Lq9T+t8hx9atltGpNRu10hBLd9FFJHnX/CG+PP+h5T/AMAI6YfCHxCzx41hI97JP/ia9Iorx/by7L7l/ke/9bn/ACx/8Bj/AJHtml/A39pmTSbB7L462KWz28bJHJpaAoCowv8Aqz0GKtr8Dv2o42z/AMLz0yT2bTVx+kVfUnhn/kW9J/69Iv8A0AVp1+4U8JBwT5pf+BP/ADP5zqcRYhTkvY0d/wDnzT/+RPksfBX9qMf81s0c/wDcOH/xqpf+FO/tRf8ARY9B/wDBef8A43X1fRWv1SP80v8AwJ/5mP8ArDXf/Lij/wCCaf8A8ic78PdO8RaT4L0qz8Wanb6z4ihi23l/ax+XHM+44KrgY4wOnauioorsiuVJHzdSbqTlNpK7vorL5LovI434wfEiD4Q/DbXPGF1ZSajBpUSytawuEeTLqmATwPvfpXJ/Dv8AaItPFmseJ9I8RaFceCNS8O2UGo3y6jdwTQLbyqWWTzo2KjgZIOMfnVz9qDwTrPxG+Avi/wAN+H7MX+sahbJHb2xlSIOwlRiNzkKOFPU14D4J+BvxL8K6f8Rr3wh4Msvh3FqWg29lZeG59Shv1u75HJe56tGhMZZRuOMlSRjNedWqVoVkoJuNu3r5enVfM+xyzA5bicsnLETjGtzWTcrWV6dtObbWevJLbVxtr9OWvxo8CXuh3msweLdIk0uzljhuLpbpNkUj42K3PBbIwO+eKluPi/4Is/Ddp4gm8V6THot5K0FvfG7TypZBncinPLDa2R1GDmvmXTfgT4yvI/iNqGueDtavn1S+0e80yN/EdrFqyPbxFGuI7iM+Ws8bHIB2oeQCR1fe/Bf4uXml/DzxJrMV7ret+H7vVY5NMsdTtbXUhaXQQQs1xgQNKojw5B5DjBJBqfrNe3wfg+9vy1sbvJcr5rfWla/88OtPmsv+3vdcnZLs9bfQWofGrSbfxhpeiWaR6lb6loN1r0GpwXKfZ2ihZBjd0+bzAd2cDFVrH9ojwND4b0bUvEHiXRfDdzqWnx6itndapC5WJ2KhldW2uu5WG5cg7TXjtp8DPF3hFPA76L4WD2qeGdX0W+0/+2Y5pNNku5BMjGVwglAbKkIOCeMgAnC8M/s5+OrHw61tdaDGtyPhHJ4WUG7gbGom4lfyQd/91lO/7vvS9viL/D+D7L/gmiyvJ5QTddLX+aPM/emtrtLRRez7p2ev1Br/AMWvBfhWbT4tX8U6TpsmoRLPaLc3aJ50ZIAdcnlSWA3dORXVqwZQykFSMgjoa+MfE37PnjmNtKJ8NX3iKz1HwVYeHNQsbDXbew+yywlt6TM+d8TCQnMe45XpyK+wtEsP7K0awssY+zW8cON5f7qgfeIBPTrgZrqo1alSTU42+8+fzPA4TCUqcsNV53K99Yva3RO66rXfdOxdooorrPnwooooAoa5/wAge8/65mr9UNc/5A95/wBczV+gChY/8hDUv+uqf+i1q/VCx/5CGpf9dU/9FrV+gAooooAKKKKACiuF0f4xaJrniSLSra21AW9xPLaWmryW+2yu7iLf5sMb5yWXy5OSoU7GwTVzx18RE8D3GmWyaDrHiG91DzTFa6NFFJIFjCl2bzJEAA3qOp60AddRXD638VrXQrLS3m0HXJ9RvrY3jaTa2qSXdrCuN7yqH2gKWAIDMSeFDYNdVoetWXiTR7LVdNuFurC8hWeCZc4dGGQeeeh70AXqKKKACiiigBkv+rf6Gvwnr92Jf9W/0NfhPX1GSf8ALz5fqfE8Sf8ALn/t79Aooor6g+KPqj/gnD/yXy//AOwDcf8Ao6Cvsb9rj/kmNn/2FIv/AEXLXxz/AME4f+S+X/8A2Abj/wBHQV9jftcf8kxs/wDsKRf+i5a/MOL/AOBW/wAJ+0cA/wC8Yf8Axnx9RRRX85n9YBX17+yF/wAk11L/ALC8v/omGvkKvr39kL/kmupf9heX/wBEw19Zwz/yMF6M+J4w/wCRVL/Ej3Giiiv18/CAooooAKKKKACvzW/4KVf8l00L/sW4P/Sq6r9Ka/Nb/gpV/wAl00L/ALFuD/0quq9nKf8AeV6M+ez7/cn6o+TaKKK+3PzUK/aL4Ff8kR+Hv/Yu6d/6TR1+LtftF8Cv+SI/D3/sXdO/9Jo6+czr+HD1Pr+HP4tT0X5nxb8SP+SieKf+wrdf+jmrnK6P4kf8lE8U/wDYVuv/AEc1c5X8n4j+NP1f5n9v4X/d6fovyCvQv2f/APksPhv/AK7Sf+inrz2vQv2f/wDksPhv/rtJ/wCinroy/wD3yj/ij+aObNP9wr/4Jf8ApLPuyiiiv3s/mUKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDG8W/wDIFk/31/nWwv3R9Kx/Fv8AyBZP99P5ithfuj6UALRRRQAUUUUAFFYvizxdp3gvShf6k03lvKlvFDbQPPNNK5wsccaAszH0A7E9BS+E/F2meNtGTU9KmkltjI8LrNE8MsUiMVeN0cBlZWBBBFAGzRXJ6B8StL8Ta5caZY2ersYJZoWvZNLnSzZonKOFnKeW3zKQMHnHFR+E/ix4c8aas+naZcXBuPKa4ga4tJYY7uFWCNLA7qBKgYgblJ6g9CDQB2FFFFABRRRQAV8Af8FO/wDkYvAX/Xrd/wDocdff9fAH/BTv/kYvAX/Xrd/+hx16uV/71H5/keFnf+4z+X5o+I6KKK+7PzEK/ab4J/8AJHvBP/YGtP8A0StfizX7TfBP/kj3gn/sDWn/AKJWvm86+CHqfYcOfxanoj4c8W/8jVrP/X7N/wCjGrJrW8W/8jVrP/X7N/6Masmv5Nq/xJerP7io/wAKPogr0r9nH/ks/h363H/pPLXmtelfs4/8ln8O/W4/9J5a7Mt/32h/jj+aODNv+RfiP8Ev/SWfclFFFfvR/M4UUUUAFFFFABXiP7av/JsPjj/rlbf+lUNe3V4j+2r/AMmw+OP+uVt/6VQ104X+PT9V+Zx43/dav+F/kfklRRRX6OfkAV+g3/BMf/kT/HH/AF/2/wD6Lavz5r9Bv+CY/wDyJ/jj/r/t/wD0W1eTmn+6y+X5nvZJ/v0Pn+TOi/bA/wCR10X/AK8P/ajV4JXvf7YH/I66L/14f+1GrwSv5Xzz/kY1vX9Ef2vw7/yKqHp+rCiiivCPoz9G/DP/ACLek/8AXpF/6AK06zPDP/It6T/16Rf+gCtOv6Hp/BH0P5Xq/wASXqwooorQyCiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/IHvP+uZq/VDXP+QPef9czV+gChY/8hDUv+uqf+i1q/VCx/wCQhqX/AF1T/wBFrV+gAooooAKKKKAPCfBvhTxBaR+D/DN3olzbDQdfvdQn1RthtpIC1w0RQhsln89ARjjD57Z6bxQ0fiiTQNT1bwHrN1Ekd5HtjuAtzaMSoAaOOUKyyBMhtx27V4Gcj1CigDwbwfo/i3wHJ4f13U9H1TX7o6BPp0ltFMk1xA63Blt0ldmG4lG2M4zgoM5zmvS/hH4Yu/Bfwx8MaJfhRfWNhFFOqHKq+3LKD3AJI/CuuooARl3KRkjIxkdaz/7HP/P/AHv/AH9H+FaNFAGd/Y5/5/73/v6P8KP7HP8Az/3v/f0f4Vo0UAZcujny3/0+96H/AJaj/Cvw4r92Jf8AVv8AQ1+E9fUZJ/y8+X6nxPEn/Ln/ALe/QKKKK+oPij6i/wCCddv9q+PF8nmyQ/8AEiuDuibB/wBdBxX2D+1dp5tfhtZv9quJv+JnENsrgj/Vy89K+RP+CcP/ACXy/wD+wDcf+joK+xv2uP8AkmNn/wBhSL/0XLX5hxf/AAK3+E/aOAf94w/+M+PqKKK/nM/rAK+sv2TbA3Xw51FxdXEGNVkG2JwB/qYeelfJtfXv7IX/ACTXUv8AsLy/+iYa+s4Z/wCRgvRnxPGH/Iql/iR7B/Y5/wCf+9/7+j/Cj+xz/wA/97/39H+FaNFfr5+EGd/Y5/5/73/v6P8ACj+xz/z/AN7/AN/R/hWjRQBnf2Of+f8Avf8Av6P8KP7HP/P/AHv/AH9H+FaNFAGd/Y5/5/73/v6P8K/OH/gpBbG1+OGhoZpZ8+HYDulbJ/4+briv0vr81v8AgpV/yXTQv+xbg/8ASq6r2cp/3lejPns+/wByfqj5Nooor7c/NQr9k/gdpJk+Cvw/f7ddpu8PaedqyAAf6NHwOK/Gyv2i+BX/ACRH4e/9i7p3/pNHXzmdfw4ep9fw5/Fqei/M+K/iIvl/EDxOu5m26pdDc3JP71uTXPV0fxI/5KJ4p/7Ct1/6Oaucr+T8R/Gn6v8AM/t/C/7vT9F+QV3/AMBovP8Ai54dTe8e6WT5ozhh+6fpXAV6F+z/AP8AJYfDf/XaT/0U9dGX/wC+Uf8AFH80c2af7hX/AMEv/SWfbf8AY5/5/wC9/wC/o/wo/sc/8/8Ae/8Af0f4Vo0V+9n8ymd/Y5/5/wC9/wC/o/wp8OlmGVX+23Um052vICD9eKvUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAGN4t/5Asn++v862F+6PpWP4t/5Asn++n8xWwv3R9KAFooooAKKKKAOE+K3m2h8JauLW4u7TSdbS7u1tYWmkSFre4hLhFBZtrTKSACcA8VmfCy+lsY9Tu7nS9QtIvEniG7nske1cFItnyyyqRmIP5RYbgDl1BwTXp1FAHzxoOm6i2qaJpXhtPFOlTzy30evWesPK9raxPHMQysVERYztGVaHkgnPGas/Dex1DVvEXwut/7Kv8ATn8I6JdW2qvdWrwxpM0cMKwqzACTJjZ8oSMKDnkV79RQAVVu4bmZlMF0LcAcgxhs/rVqigDO+x6h/wBBFf8AwHH+NH2PUP8AoIr/AOA4/wAa0aKAM77HqH/QRX/wHH+NfBX/AAUwhuIfEHgTz7gXBNrdYxGEx88fvX6DV8Af8FO/+Ri8Bf8AXrd/+hx16uV/71H5/keFnf8AuM/l+aPiOiiivuz8xCv2Y+C9pft8IfBZW/VFOj2mF8gHH7leOtfjPX7TfBP/AJI94J/7A1p/6JWvm86+CHqfYcOfxanoj4b8VAr4o1gMdzfbJsnGM/Oayq1vFv8AyNWs/wDX7N/6Masmv5Nq/wASXqz+4qP8KPogr0b9nlJJPjF4fWKTypCZ8Pt3Y/0eTtXnNelfs4/8ln8O/W4/9J5a7Mt/32h/jj+aODNv+RfiP8Ev/SWfaf2PUP8AoIr/AOA4/wAaPseof9BFf/Acf41o0V+9H8zmd9j1D/oIr/4Dj/Gj7HqH/QRX/wABx/jWjRQBnfY9Q/6CK/8AgOP8aPseof8AQRX/AMBx/jWjRQBnfY9Q/wCgiv8A4Dj/ABrxX9s61vY/2Z/GzS3qyxiK3ynkhc/6VD3zXvdeI/tq/wDJsPjj/rlbf+lUNdOF/j0/VfmceN/3Wr/hf5H5JUUUV+jn5AFffv8AwTRhuZvCPjYwXQtwL63yDGGz+7b3r4Cr9Bv+CY//ACJ/jj/r/t//AEW1eTmn+6y+X5nvZJ/v0Pn+TNz9raKeLxlo4nnE7fYOGCBcfvGrwqve/wBsD/kddF/68P8A2o1eCV/K+ef8jGt6/oj+1+Hf+RVQ9P1YUUUV4R9GfoZ4bs78+HdLI1BQPssWB5AOPkHvWj9j1D/oIr/4Dj/Gm+Gf+Rb0n/r0i/8AQBWnX9D0/gj6H8r1f4kvVmd9j1D/AKCK/wDgOP8AGj7HqH/QRX/wHH+NaNFaGQi5CgE5OOT60tFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAFDXP+QPef8AXM1fqhrn/IHvP+uZq/QBQsf+QhqX/XVP/Ra1fqhY/wDIQ1L/AK6p/wCi1q/QAUUUUAFFFFABRXLad8TvDOreJG0G01RZtSV5IgohkETyJnzESUr5buuDlVYkYORwaueK/G+jeCbeCXV7p4PtDFYo4beS4lkIGW2xxqzEAckgYHegDdorkdV+LHhTR9K03Up9XSSy1GIz2slpDJc+ZEAC0mI1YhBuGWIAGRkiunsb631OygvLSaO5tbiNZYpomDK6MMhgR1BBoAnooooAKKKKAGS/6t/oa/Cev3Yl/wBW/wBDX4T19Rkn/Lz5fqfE8Sf8uf8At79Aooor6g+KPqj/AIJw/wDJfL//ALANx/6Ogr7G/a4/5JjZ/wDYUi/9Fy18c/8ABOH/AJL5f/8AYBuP/R0FfY37XH/JMbP/ALCkX/ouWvzDi/8AgVv8J+0cA/7xh/8AGfH1FFFfzmf1gFfXv7IX/JNdS/7C8v8A6Jhr5Cr69/ZC/wCSa6l/2F5f/RMNfWcM/wDIwXoz4njD/kVS/wASPcaKKK/Xz8ICiiigAooooAK/Nb/gpV/yXTQv+xbg/wDSq6r9Ka/Nb/gpV/yXTQv+xbg/9KrqvZyn/eV6M+ez7/cn6o+TaKKK+3PzUK/aL4Ff8kR+Hv8A2Lunf+k0dfi7X7RfAr/kiPw9/wCxd07/ANJo6+czr+HD1Pr+HP4tT0X5nxb8SP8Akonin/sK3X/o5q5yuj+JH/JRPFP/AGFbr/0c1c5X8n4j+NP1f5n9v4X/AHen6L8gr0L9n/8A5LD4b/67Sf8Aop689r0L9n//AJLD4b/67Sf+inroy/8A3yj/AIo/mjmzT/cK/wDgl/6Sz7sooor97P5lCiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAxvFv/ACBZP99P5ithfuj6Vj+Lf+QLJ/vp/MVsL90fSgBaKKKACiiigAoqnq+s2GgafLfaneQafZRffuLmQRouTgZJ4pdL1Wy1uwhvtPu4b6zmG6O4t5A6OM44YcHmgC3RWVY+K9F1LWLrSbTVrO51S1GZ7OGdWliwQDuUHI5I/Ok0nxZouvX13Z6bq1lf3dmdtxBbzq7xHOMMAcjkEUAa1FFFABRRRQAV8Af8FO/+Ri8Bf9et3/6HHX3/AF8Af8FO/wDkYvAX/Xrd/wDocderlf8AvUfn+R4Wd/7jP5fmj4jooor7s/MQr9pvgn/yR7wT/wBga0/9ErX4s1+03wT/AOSPeCf+wNaf+iVr5vOvgh6n2HDn8Wp6I+HPFv8AyNWs/wDX7N/6Masmtbxb/wAjVrP/AF+zf+jGrJr+Tav8SXqz+4qP8KPogr0r9nH/AJLP4d+tx/6Ty15rXpX7OP8AyWfw79bj/wBJ5a7Mt/32h/jj+aODNv8AkX4j/BL/ANJZ9yUUUV+9H8zhRRRQAUUUUAFeI/tq/wDJsPjj/rlbf+lUNe3V4j+2r/ybD44/65W3/pVDXThf49P1X5nHjf8Adav+F/kfklRRRX6OfkAV+g3/AATH/wCRP8cf9f8Ab/8Aotq/Pmv0G/4Jj/8AIn+OP+v+3/8ARbV5Oaf7rL5fme9kn+/Q+f5M6L9sD/kddF/68P8A2o1eCV73+2B/yOui/wDXh/7UavBK/lfPP+RjW9f0R/a/Dv8AyKqHp+rCiiivCPoz9G/DP/It6T/16Rf+gCtOszwz/wAi3pP/AF6Rf+gCtOv6Hp/BH0P5Xq/xJerCiiitDIKKKKACiiigAooooAKKKKACiiigAooooAKKKKAKGuf8ge8/65mr9UNc/wCQPef9czV+gChY/wDIQ1L/AK6p/wCi1q/VCx/5CGpf9dU/9FrV+gAooooAKKKKAPnzwPp96lr4G8Ly6VfW2raJ4kv728kktJFhSHddsJRLt2MJBNHgAkneeODjr9e8SQyeM/Bfi0WOqf2LHaapYyv/AGbOZoZHe3KFogm8Bvs8gB245X1FeqUUAfOfw4tr34byaDrOt6NqUNjdaDeRpZW9lJcSW8hvGuEhdUU7WaOQAZ4yhBIOK9X+CuhXvhn4S+E9L1GE299a6dCk0LdY2252H3GcfhXa0UAI2dpwcHsapfZ7/wD5/Iv/AAH/APsqvUUAUfs9/wD8/kX/AID/AP2VH2e//wCfyL/wH/8AsqvUUAUWtb5lIN7Fg8f8e/8A9lXy7/w7f+HH/QU1z/v+n/xFfWFFb0q9Wjf2crXOathqOIt7WKdu58n/APDt/wCHH/QU1z/v+n/xFH/Dt/4cf9BTXP8Av+n/AMRX1hRW/wBexP8Az8Zzf2bg/wDn0vuPCPg7+yF4W+B/iqXxD4d1HUHv5LV7NhessibGZWPAA5yg7+teieP/AIbxfEjRY9L1a+ZbaOdbgG3jCNuCsBySeMMa7OiuHEf7WnGv7ye9z0cL/sLUsN7jTurdzwr/AIZF8L/9BHUPzX/Cj/hkXwv/ANBHUPzX/CvdaK8f+x8v/wCfMfuPf/t7NP8AoIl954V/wyL4X/6COofmv+Feg/D74aQ/DXRZtM0i+draa4a5b7TGHbcVVTyCOMIK7Siuijl+Ew0/aUaai+6ObEZpjsXT9lXquUezZR+z3/8Az+Rf+A//ANlR9nv/APn8i/8AAf8A+yq9RXoHlFH7Pf8A/P5F/wCA/wD9lR9nv/8An8i/8B//ALKr1FAFH7Pf/wDP5F/4D/8A2VH2e/8A+fyL/wAB/wD7Kr1FAFH7Pf8A/P5F/wCA/wD9lXjXxm/ZI8MfHXxRa694k1HUEvrezWxQWJWNPLV3cZBB5zI3OfSvc6K0p1J0pc0HZmNWjTrx5Kiuj5P/AOHb/wAOP+gprn/f9P8A4ij/AIdv/Dj/AKCmuf8Af9P/AIivrCiur69if+fjOP8As3B/8+l9x8n/APDt/wCHH/QU1z/v+n/xFfR/hfwrL4R8M6RoVjeg2Wl2cNlAZodz+XGgRdx3DJwozxXQ0VjUxFWskqkmzoo4Whh23SglfseLaz+yz4e1zWL7UrnUb4XF5PJcSeXtC7nYscD0yap/8Mi+F/8AoI6h+a/4V7rRXgyynASbboxv6H00c8zOKUY15WXmeFf8Mi+F/wDoI6h+a/4Vr+E/2bND8G+IbPWbDULw3loxaMTBWTJUryBjsTXr1FOGVYGnJTjRSa8hVM6zGpBwnXk09Hr0KP2e/wD+fyL/AMB//sqPs9//AM/kX/gP/wDZVeor1TxCj9nv/wDn8i/8B/8A7KnxQ3iyKZLqN0zyohwT+O6rdFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBjeLf+QLJ/vp/MVsL90fSsfxb/yBZP8AfT+YrYX7o+lAC0UUUAFFFFAHnfxiu7PTZPBF9qksVvpNr4ihlu57hgsUQ+z3AiZyeABMYcE9DtNZfwZ8QaNDa649ve28Wm6x4nvhpGHAS6ON0hh7MC6TNxwcE16ndWsN9byQXEMdxBINrxyqGVh6EHg02Oxt4Y4ES3iRLf8A1KqgAj4x8o7cEjjtQB8teELiW80/4WaTpdwjeONOvNXbVoUcGe3Yw3KzNOOqhp2h5bqSvtW38K77TtS8S/B220KaGW80vw5ew61FAQZLdSsC+XMByrGdT8rc5VvevoiHTbS3u5rqK1hjupgBLMkYDvjpubGT+NFtptpZzTzW9rDBLO26aSOMK0h9WIHJ+tAFmqM91c/bmt7eOJtsayM0jkdSwwMA/wB2r1N2LvL7RuIwWxzj0/WgCnu1L/nla/8Afxv/AImjdqX/ADytf+/jf/E1eooAo7tS/wCeVr/38b/4mvCf2kv2WZv2jNQ0O6utcj0M6XFLEqwxGbzN5U5OcYxt/WvoSitaVWdGSnTdmYVqNPEQdOqrpnwr/wAOxo/+h5b/AMAR/wDFUf8ADsaP/oeW/wDAEf8AxVfdVFd39pYv+f8ABf5Hm/2Pgf8An3+L/wAz4V/4djR/9Dy3/gCP/iq+xPBvh/UPB/hLRtCRra5TTbOK0WZmZS4RAu4jHGcV01Fc1bFVsQkqsr2OvD4LD4Vt0Y2v6nzfqv7I8uqapeXp8QJGbiZ5ighJ27mJxnPvVX/hjmT/AKGNf+/B/wAa+mqK+WeQ5bJ3dL8X/mfax4mzaKSVfbyj/kfMv/DHMn/Qxr/34P8AjXRfD39mufwB4w0/Xo9ZjvHs/MxA0ZQNujZOvOPvZ/CveKKunkmX0pxqQpWad1q918zOrxFmlanKlUrXjJNPSOz36FHdqX/PK1/7+N/8TRu1L/nla/8Afxv/AImr1Fe4fOFHdqX/ADytf+/jf/E1BdX2oWgiLQ2x8yRYxiRupOP7tatNeNZMblDbTuGR0I70AU92pf8APK1/7+N/8TRu1L/nla/9/G/+Jq9RQBR3al/zytf+/jf/ABNcZ8Y/hze/F74ba14QnuYNMi1JY1a6j3SNHslSThSBnOzHXvXoNFVGThJSjuiJwjUi4S2eh8K/8Oxo/wDoeW/8AR/8VR/w7Gj/AOh5b/wBH/xVfdVFej/aWL/n/Bf5Hk/2Pgf+ff4v/M+Ff+HY0f8A0PLf+AI/+Kr3n9m79nW4/Z10nWrG11ePWxqc8c7PNGYfL2qVwAM5617jRWNXG4itHkqSuvkb0ctwuHmqlKFmvN/5nj3xa+Bd18VtYs9Qm1KHTmt4PIEcamQN8xOcnHrXDf8ADHMn/Qxr/wB+D/jX01RXzFbJsBiKjq1ad5PfV/5n2WHz/MsLSjRo1bRWysv8j5l/4Y5k/wChjX/vwf8AGj/hjmT/AKGNf+/B/wAa+mqKx/sDLf8An1+L/wAzo/1nzf8A5/8A4R/yMjTbXUdO0+1tAlq4giWIMZGGdoAz932qzu1L/nla/wDfxv8A4mr1Fe+kkrI+YbcndlHdqX/PK1/7+N/8TSGTUgCfKtf+/jf/ABNX6KYiGzuPtVnBPt2+Yivt9MjOKmpqqI1CqAqqMADoKdQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQBQ1z/AJA95/1zNX6oa5/yB7z/AK5mr9AFCx/5CGpf9dU/9FrV+qFj/wAhDUv+uqf+i1q/QAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFcOuoXV/8apdOLFbDS9BjugqsRvlubiRMsM4O1bU4443t613Fc3J4VaP4iQ+JYGVRLpjaddoxOWCy+ZCVHTgvNn/fHpQBxHwjbWLX4ofE3S9W127137JJpzxyXICLH5luzMI41+VFzjgenJJ5r0jxHp97qmj3Frp+pyaPcyDAvIYkkeMd9ocFc47kHHpXBXXwD0pV8UT2Gta5b6l4htXtbq6uNSnnXldqvs3j5kHC88DjpkHt7jwzFdeFl0Jr3UIoVt0t/tcF08d1hQBu81SG3HHJ75NAHk+reIb/AMP/ALOfhnxML26udQ0o6ddPNLLue5UzxxyrIx6745HBPqQe1e4V5ncfBW0tfB1r4U0++vptEfULW4uk1S9kumEELrJ5Ue/O0M0aKQMYDMetem0AFFFFABRRRQAUUUUAFFFFABRRRQAVxHxs8WXHgf4T+KNas2Md7b2Ti3kAB2SvhEbB64Zgfwrt6xPG3hW18c+EdY8P3pK22pWslszqBuTcMBxkEZU4I9wKAOC+KGk6p4f0rwWdO8Q3tnp9jrOj2UlpEADdhryGJjLIcsVKscqMAnrnpXrFcR4o+GcPxE0fRIvEd9qNveWPlzuNH1GW2ja4Uowf5Nu7a6BlJGV6jFaPhnwSnhvXNU1FdSvb03ttaWojupmk2Lbo6hiWJLO5clm4zheMgkgHkt38UX8UfGjwTc2HiG1g8KjU7zTYrWC7X/T2WznLTyDP3PNVEjHcgt/Etek/DXU7iTUPGej3D+aNI1uSKB8sx8qaGK5UEk9VM7LxwAqitK++G/hy+1jR9TfSbWO70qZ7i2aKJUAdo2jJYAfNw5xnocHtR4J8KyeGU1ua4kEt7q2qTajOysSo3BY41GfSKKIH3BoA6WiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDG8W/8gWT/AH0/mK2F+6PpWP4t/wCQLJ/vp/MVsL90fSgBaKKKACiiigAooooAKKKKACiiigAooooAKKKKACsLx5r0vhfwVr2sQR+bPY2M1xGh6M6oSo/MCt2sbxl4f/4SvwlrOjeb5B1CzlthL/cLoVB/AmgDh/iFN4s8IeF/DEmn+II/Ltb3TbTUp7i1EtzfGW7t4GwxO1AwdySFJ6Abeteo1wXiLwTqXxK8KaLFq2p3/he7QW93d2WnG3lUXMbxyqN7xvkJJGMbcAjrW/oukatY65f3F5qsl9YPbW0FvDJtyJED+bKdqKAzllyBkfICMZwADmLLVvFFt8Zl0vUtStZdFutMurq2sbW32+WI5oVRnkYlmciRs4wvseta/wAO9an1IeI7C5uXu5tH1iey86QYYoVSeNT67UmRc99tRah8Opr74gW3ipfEuqW0lvF9nTT447c2/klo2ePJiL4ZowSd2Rk4Iq14A0K50m31u7vbYWl7q2qz38kIYMVX5YoskEjd5UUWcHGc0AdTRRRQAUUUUAFFFFABRRRQAUUUUAFFFFAHA6RqGv8AiS18YXOk6nax3Q1RrDTzeRNJBaxw7I5SUUqWff5x+8M/IOAK0vhVq1/rnw/0e91O6N7fyRsJrgoqGQh2XO1QAOAOgqHQfDupafJ4t09XfTbW8vWvbDUbXyy6+cimQbW3fOsoc5ZcEOuM4OKXh/4b6p4T03RtO0/xTqN1Z2d+txKLwQKWg2ybof3cQ3BmdTyQRgHPG0gEHxk8aanomh3mmeGp44fEkljPei4eLzFsreJSXmYYxknCIG4LNnBCtWna6nqMPjLw/vunmsNV0iQywPtCpPEY2V1GM5ZZZA3OPkTjrUXj34K+DPiVJPca9oVreahJbG1W+ZAZolwcbSe4LEjjrTdB8CL4c8S6Ja6VZJpvhnQ9Lmgt0jZcTTTyKWG0cjYIiSTgEzcdDgA7qiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/IHvP8Armav1Q1z/kD3n/XM1foAoWP/ACENS/66p/6LWr9ULH/kIal/11T/ANFrV+gAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAoorg/jnrk3h/4Va9cW7PHPOkVgksZAaNriZLcOM9183d+FAHeUV8w/ErwPpM/wAarfS1+HTeNrTTfB1rHZ2FtPBbi0C3M6qQZJEwMAAbckY6V0fgvxlq1n8LvAmnReKra31gwTQ6g99p9xqF8zwMY5ESFCGOyQFGduOBjrQB73RXzpD408XeMvHXweurfXV0mLU7fVVvrOK1k8iaS3ZFdmjMikhsfKGGY8k8kkV23w18W+MviJDB4ohudIg8OXGoXNsNIktpBcJBDNJD5nnhyDIWj3bdgXBxkdaAPVaK+YPhn4u8T+CfDPha6S406bw/qvi+50ZtP+zP5+Jrq4/fedvwCrD7uzBHfJ4+n6ACivmjwj4Xtfh74m0GbxRompW+u3GoJCPH2k3qzQavLIxVI7obt6rIWVdhQqDtCsMA1Z8L+PfF3hH4f313qGvx63f6h4pn0ezmuNPml+yYuJhI+xHZ5F2IdkS427QuSOaAPo6ivA9U+LnjHTfD9rHCLefUJPFNho0Go32k3NjDd29xjLGGTDKysWUlSR8oPetK7+I3jjSU8eaELe013xDoS6fc2l5ZWLqstvcuVkZrcSMS0SpK20PlwAAPUA9qorwTxd4yn1D4LeIPEk2qad4h1HwrqCX9pcWtnJZyRvCY38uaGXLRSFWkRsdUk7ZNe90AFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAY3i3/kCyf76fzFbC/dH0rH8W/wDIFk/30/mK2F+6PpQAtFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRRRQAUUUUAFFFFABRXzd8VNQ1S6+Dfxm8QS3cyvJqBsrDyZWXyba3eKIBeflJk89jjrurT+H96fB/xL8R2FrZ6j4V006CLy20TxDeNLFcTRu2+4jlMkiRIoZFcBsncGK4XNAHv1FfOXiL4xeJbzw7470m01nR7rULLw0NZttY0mCWOOFt7LJEN0h3kBcpIpAyQSvau/vPEfjb+2dE8LWVxoh1uTT5tTvtTntZfs4jWRUjRIRJu3MXGSXwArHHIFAHp1FeC6vf+Nbj466HBZNpun6zJ4LllvIbh5Z7SKQXUW4oqlS5zhQSRgMTzjB9T+Gfi5/HngLRNelgW2nvbcPNChJVJASrhSf4dwOM9sUAdPRXi3xe8LXF740OsavoOseKPCcekrAtrod66XFlciWRpJhCsiNIWRowCm5hs4GDmqPhnxFreofEbwHa+HvFP9p+FZvCsl3JJqkDvNdrHc26PI+GXE218BivynfkHPAB7vRXiPgX456l4q17RnlFg+l6xdz2yWFtbzG5sUUSGGWSXJRw/lgEALtMi8nBqj4F+Nni/VdD+HviPWrTSP7H8V6h/ZRtbFJRPbyMsxjm3sxUqTCQVxwCCCelAHvlFeS+BvibrXibx1d6PqM2kWSx3F3BJok0U1vqEUcbkRTIznbcI6gMSigLvHJwRWv8AA3XJ9U8I32n3Ejzy6Bq99ognk6yR287JET7+XsBJ5JBPegD0OiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKAKGuf8ge8/wCuZq/VDXP+QPef9czV+gChY/8AIQ1L/rqn/otav1Qsf+QhqX/XVP8A0WtX6ACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACuX+JvhVvGngPWNIiCm5miEltu6CeNhJET7CRENdRRQB5vrXwqPjXXrLxZ/b/iPwhrUulxWM9tpc9sAEDtLsffFJlg0jDKnHAx60q/AzR7NdFfS9W1rRrvTYriD7bZ3Sme6SeUSziZnRsl5FDllCsD0Ir0eigDz6P4J6Na2vhiOzv9Vsrrw7cXE9lfRTq0+2dy00Tl0ZWR8gHI3YUYYHJLtN+DGkaRr8d9a6jq8WmxXj6hFoAuh9gjuXYs0oTbv++zPt37AxyFHFd/RQB5/D8FdDh0HRNIW61A22k63/b8DGRN7XHmvLtY7MFN0jcAA4A571t+C/DeoeHpPEMmoanLqH9o6tNfW8ckrSC1hZUVIlJ6AbC2BwC5AzjJ6WigDzHRfgDouh3WnJHrWvXGh6bcpd2Ph+4vFaxtpEbfGVAQSEI2GVWcgEDjirjfBTSDZ65aDU9XS11LUV1aGJLhFGnXYkMhltiEypLncQ5deOgBIPoVFAHjPxE+Deo6l4b0LTbHVNX164Hiiw1O/v8AUL5FuEgjYB2jKhFTaoBCxqOecEkmun034N2Gn6fraHXdduNX1eSGW516S6Rb3MP+pCsiKgVOfl2YO5twbJrv6KAPJ/EnwfiTwbq2h2z3+vXfie+t01jVNQljMzwBkDsxUIqhYUKKEXqV4PJr1iiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigDG8W/8AIFk/30/mK2F+6PpWP4t/5Asn++n8xWwv3R9KAFooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooA8ruPhRP4j0X4heEdZnnj8P65f/brO8tXQSRpKEeSIAg4KzRu2SCCJB3zi5c/BGx1qXUJ9f17WPEF3dabPpUc128KfZYJgPM8pY41UM2FyzA/dA6cV6RRQB53H8F7O6udTudY1nUdauNR0Y6HO0wijUW5Zm+RUQBWG4881YufhbLK2kXkPinV7fXdNtZLEasFt2lngdlZkkQxbDyiEEKCCvfJz3lFAHHaT8MrHSfFWneIBf6hd31lozaIGvJhKZYjKkhkdiNzSFkHOccniofCvgK68FzeHdN0zUJz4e0qyuYpI55FLXMkkiMhZQg+5h8EEffIweo7eigDidf8Ahq2oeJLjXdI8Q6n4b1C8gjt7w2IhkS4RCdjFZUYBwGIDDt1BwKh034P6Xod54YutLv8AULCfQraWzVleNzeQSyJJKk+9Dnc8YYsu05J55rvKKAOG8KfC1fB91bx2PiHVv7DtZpp7bRmePyUaUuWVnCB3QFyVVmIBx1wMcH8FfgbfaX4H8DJ4rvtRW40GRr2LQpHhaC3usyqr70XcwCyEhd5ALH6D3WigDh4vhn/xUVhq1/r+qavHpc8t1YWV15JEMjq6nMgQO4CuwUM2ACM5wCH/AAj8J3XhPwiw1GJYdX1O+utWvo1Iwk1xM0pTgkZRWVMgkHZkda7WigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigAooooAKKKKACiiigChrn/IHvP+uZq/VDXP+QPef9czV+gChY/8hDUv+uqf+i1q/Wd9mvIby5kgMDJMytiTdkYUDt9KfnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8KAKni3/kCyf76fzFbC/dH0rH1TT9Q1S0a3draNSQdyls8HNWx/aQAGLX82/wAKAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8ACgC9RVHOpelr+bf4UZ1L0tfzb/CgC9RVHOpelr+bf4UZ1L0tfzb/AAoAvUVRzqXpa/m3+FGdS9LX82/woAvUVRzqXpa/m3+FGdS9LX82/wAKAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8ACgC9RVHOpelr+bf4UZ1L0tfzb/CgC9RVHOpelr+bf4UZ1L0tfzb/AAoAvUVRzqXpa/m3+FGdS9LX82/woAvUVRzqXpa/m3+FGdS9LX82/wAKAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8ACgC9RVHOpelr+bf4UZ1L0tfzb/CgC9RVHOpelr+bf4UZ1L0tfzb/AAoAvUVRzqXpa/m3+FGdS9LX82/woAvUVRzqXpa/m3+FGdS9LX82/wAKAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8ACgC9RVHOpelr+bf4UZ1L0tfzb/CgC9RVHOpelr+bf4UZ1L0tfzb/AAoAvUVRzqXpa/m3+FGdS9LX82/woAvUVRzqXpa/m3+FGdS9LX82/wAKAL1FUc6l6Wv5t/hRnUvS1/Nv8KAL1FUc6l6Wv5t/hRnUvS1/Nv8ACgC9RVHOpelr+bf4UZ1L0tfzb/CgC9RVHOpelr+bf4UZ1L0tfzb/AAoATXP+QPef9czV+su8t9QvLWWBvsqrIu0kFsj9K1KAP//Z" preserveAspectRatio="none" id="img0"></image><clipPath id="clip1"><rect x="0" y="0" width="5943905" height="2531076"/></clipPath></defs><g><g transform="scale(0.000157472 0.000157472)"><g clip-path="url(#clip1)" transform="scale(1 1.00107)"><use width="100%" height="100%" xlink:href="#img0" transform="scale(6343.55 6343.55)"></use></g></g></g></svg> | 4 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/gremlin_example_image.svg | <svg width="293" height="293" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><image width="512" height="512" xlink:href="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAMCAgMCAgMDAwMEAwMEBQgFBQQEBQoHBwYIDAoMDAsKCwsNDhIQDQ4RDgsLEBYQERMUFRUVDA8XGBYUGBIUFRT/2wBDAQMEBAUEBQkFBQkUDQsNFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBT/wAARCAIAAgADASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDy1I6lWOpFWpY491chFhiw5qT7NuxxVuOHOKtLbUAZ0VrjtWna2+MVNFbe1aFva+1FyRkMJ4rRtYTmnQ2vtWhb23TipAWOI7RUywmrcdv0qdLbJ6UgKsUJ4NX7eE8VLDa9Bir8Fr04pMYyC39qvQ25PFTw2vtV6GADtU3LsVFtTiporf5ulXhDxUkMOT0ouMiigOKuW8PNTx2/A4qzHb1FwIPLPpUkduTVtberENvRcRUW3IqaOD2rQFsPSnpa9OKm4WIYYeBVuOM1NHb1PHBSuMiWLin+XVhYeKesNMRV8ml8irywe1O8j2pXGZrW5p0VqdwzWiIKSRlgbmKZh/eSMsP0yf0rOUktWIYkOO1O8sVLDLFPjZub5tpBGCD7g4I/KrBh29OfrUc6exZTEdL5eKW4Xy5AzzsgYhQoIABJ4plxfJp8JkuGXyx/y1UHj6jt9elRKoo7k3HNBkVE1uTT4bh7r57aSOWE8h8ggfjmnecysyEAPjKDPDAe+Ov19qj28WBSe1LdqrvakVbs9YgvridCvlPF94SgZBzzxz/kirNwimQbfmDcDnOMDOPyq1UT1QGP9nPpSfZ60pLdo13MMD3pBB8oOMcZrWMr6AZjWtVLi161uGKoLi3+U1pzBY5x7brUTW/tWxJb1C1v7VorCMd7eq0lufSttrY46VC1t7UbBYwZLfrVaW2IWt6S368VWmt/ampCOcmtiaz5rM+ldRJa+1VZrOmyDkprMgmqzWp9K6W4tOtUpLY/3aIyAwZLc+lVJoTXQS2/tVGa39qpsDAlt6pzWp9K6CS274qvLbe1VGV0NHOS2vtVWS1roZrbrxVSa2qkwsczcW+M1TaGugubXrxWfLb7e1XzEmPLHVCWH5jW5LDVOWCquFjMK7VqrIK1JIfaqc0JppisOjj3Yq3DF7UlvHkVfgiqBrUdDHV2OL2pYYM9qvw2/TipsWQw29aMFv04p0Nrz0rRhteKQiOG34HFaFvByKdDb+1XobfpxSAWKHjpVuO3GRUkMPSrcUNQMjht+elXre2+bpT4Lf8AOtC3t+eRQ1oLqNgt+lXUtuOlTRW+O3FWUhrO5ZTFuasQ23A4q2tv7VZit8VPMVYrx254q5Hb+1WIbfpVuO39qm9hWKi2/tUscBz0q6tvUyW/PTNO4FaOLjpU6w/hVpIalWE1DYEEcNTLD7VOsJqVIsUXGRLDT1h9qsrGKdIp2EpwfX0pcwFfaFGTwKXyx3qKbUDb5DRs4HeJS5/IZNZUPjbR1kaJb2Jgpxt3gGNv7hBPHsDis5VIx3EaU91HDEziWIbeu9wAPXvx+NZS+IPOb5bm0MQ/ukOD7bt4FQ+IPiDouj2qy3UTK2QEa6gZVBPfcy4x9D+NeP8AxQ/aStvD9iyW98zTSg7YVI3kcfw/NjPqwAJ7ennV8TCP2iZSsjtvHHxih8I+SZ7dJZD9zynYbhzkHMZA6ZB3EcHBrh7z9qazaGKaCzumgkPDfZiBjjOHUnJHPQDtXzPq2t6h4ymP9pqfnw8duhYAKRnd13E8jn+dS23heY2piieSC1OS0e48t1zxx19a+bq5t7KTVzl5pvY+lF/ag01neC7sFTanm+XLMEJH0kVeenQtXN6p+1JFeMkdhp8EW5wrNlpMZIyTwoPHbP4ivFZPB7QsSYmkKhUIDEHae+cdc10mh+FLe32lkbnI8yIbmzzleSMfrXn1s9ny+6y7TfU6y0/aC1pdUWeOwSz7PJGpQOAMYODjJ+nbrXVt8cP7ahRjOdPkVsFLlQGUgfeXOPX1ye3WuPtPDqRsCIwyFsss+DhR6ZxtP0PesrXvDMpaWeEMnzDEbgnA7Bc98cda8inn1XnszRJpHo2ofHHTpL+CVgp1WIeWbqEcXCDj5hn8R1xjGea9Q8K+OovE0UVw00UXl7SI4ZVkZ88bgOgHI+v4c/K0PhI6lGDEGBiyu1SeO56nio49N1vSp8JO08SnKxuSBk++cg9Ohr26GfQTtNCu0fdEN4l1aggCQsMMUycHHIp0RLxKyjt0x+H8xXyz4B+PWp+H72Ow1re9vJhUnIBK8fxNnkDjrzx9a920T4iWNwV/0uKVZcMFyVOccj2Oe3IOeD6/VYXH0q8eaLNVK51rR7uQOfT1qNo968c1D/a0cm0pJHJ5vKbSOv59OCfwqzbjzELK25c/eyefwzXqRnzbFFRrXcelRNZ+1aMkTL0/MnP9P607YGrbmY7GNJbY7VWkt63JLeqrwVXMyTDlt/aq0lvW9Lb+1VJLc81QzEe368VWmtvat17b2qtNb+1aLVEM5q4t/aqMtv7V0Vxak9qoy2p9KZJgy2/WqMtvmuikt/aqU1t7UDRz0lt7VVkt63pbWq0lr14pp2Gc/Nb9eKpT29dJLae36VQuLXGeKq4jmri368VmzwZrpbi1zms+a19qsRz0luearSW9bskHXiqskPWmmBgzW/XiqU0Fbk0PtVKaDrTQiraw8CtG3h6Uy1g6VqQQdKOYIxH28VadvBnFRW8PTitS3g6UrlhDb+1aEMNNhirQgh9qTuFgit/arkNv04p8MNXoYPasgsRwwe1XIYPapIrfpxxV2GCo5gsJBb+1X4YfaiGOrsUXSq5gEjhqzFDntT44qtQQjcahlIbHD7VKsdWUh9ql8jvis+YojgTJrRgh6VDBEOOK0IY6hyBgsPHSpFgqxHHu7VOsPtU3sIrLDUsdv7VZSHPap1hwKTkBWW3xQ0ICnAOcds1dEW7pzQ1vKyHyxtOPvMhI/LvWbkxlWJRMqsrfKehpqo7gtwQCQASe3esXxB5+jxG5nxErHBmiDo2fXkFT+NeLePP2kotD0mc6bqNvfTRrkgQEgE8gF1ZRk84AGeOwya45YyNP40TzI9e8Z+NLLwfp8l3fyxwRIOGkLDnt1T+RzxXxt4o/aY17UNbu7nTLxjZtyglnMY6AcDj0/hz+PbgPHnxM1/xkwkv7wJp5y32eIEK2T1YnBbPbJNc7Y26QxrcTJ1PyoRyB2yOMnnp0FePisU6mr0Rzylc6vWfG3iLxd+8klL3L7gzKuxUz3JGCT9fxzWfY+DYm4nuZHuCcEkHk9MBRn9a0NPs57W0a+jQqiruXJxxgkAHjv+Gea7r4c+HPtSRy3EjO7HeTn1r5PGY504twYoxcjD0nwXf2Me2ARhc7syHPGOFOM12OgaXdNlSRbOXzuhXLZ7kEDjt713U2n2FnFHCqJnoFDDJ+v0966Hw2+nWexY41LchW5P618dXx06mrWp0Rp2PPdY8ATLp5uCkkbZBWNjuzjJ6Y61e0Dw3DqVosrDdu5wyZC+1ereIpLX+x3ZCjSbdiHHVj0H9fwrmfD/hdLeTzIw0bJ94x8D8u9cTrSkuWTNuVJ2JtF8KWke1Hi83gLnBJA+nSt278O2LWyQw2hc4PUAEfgK0rGaeAiEyhnHBOzn+ddLp9n5ikyycZ44I/Osl73wmnLc8Z8UeCVhgSeJGgmyGDqMHr0PqPasibw2PLIureIE8BlcYz7g4Kn8MV7VrVhHdTLajG5m3FlPYd/aq914VP2cjzPMXbgq2Gz/jT5pp6dCOQ+erzwlbvNNDIf3UiZG05ZSPX1x1rkNN0++ju5o4jcIlu7Zw52fKeCF6Y+vvivd7jwmbfVJFij2weXu2ngKScYye3XiqV54fj02zup1VIpPvbQMKSeOD616VHHypL3HqYOmeYad8StU0K5UmVHA++r/uUKn72GB6kcg9+nFfQPgP4ueHtas133i2bKoCwTDYc9CM4CnHAGPX3rwPVvCbaxLcLBIFwRlZT5YORkgDnvnGf0rnfs+reG7wiBEmAG3aCVJ/HqD9K+3wOeyppKbv6mdnHY+37e8S7t2mijl2fwu8bKp/Ein+TKu0s3zeij5f8a+ePhl8Y9G0+S3sb6wuo9SPysZULvu54DMxx3GOO9fQUOoR3Mg/0yNeOAu1wfxyc/UfrX3eFx0MRG6ZpGVy20eagaHHarKzNu6CRfXy3T+YI/UU9ozIobBGex6ivTUk9jQzHhzUDW/tWqbf2qGSEVXOthGTJb1Vkt+1bLw5qtLb+1axkQYNxb9eKzZoea6G4h+U8VmSwZPSrchGNJB7VUkt+tbjQe1VZLfrxU3GjCktqqtb+1bk1v7VSmhqbjMia34rPuLfrxW9NHjNUJo+taRZJz9xbVm3Fv7V0k0FZtzbnniquxHOzW/Wqc0PXit6W39qpTW/FVcRz80PPSqc0NbctvyeKqTW/HSqQypa2+AOK0oYenFEEHAq/DDTEh1tDyK1beHgcVWt4cYrSjUKuTwAMkngD3rOTLQQw/NwK1LeHgcVz9/4gh09lWMxzSFtuS+FHr0BJ/D86IfGir80ktpAqN86kFmYe2WGPr7dO9eHXzrBYe6lO7XZX/wCAd8MFXqa8tjsIbf2q/DD0yMema4uX4qaTaxqA6tITgds/TA/xrD1j42WUASaKbABzu3bcYHQk9e3FfN1uJ5S0w1B+sv8AJHfHLJWvNnrSx4ZRtZi3TApXnaNSQiLzj53B4HfC/wCNfOGoftNXcLOkc0CHGMebuz74H+FcpP8AtD6vctIomZx91WVW/kccdeea4pZlnNde7FR+X+dyll69T6pudavIVPlhGbqBHHk/+hVBb+INSuJkZhJCi8sq4UdcYbAz+Rr5LX47eJLVmdfObPoG/PrV6P8AaW8Rx5EkUjr6jI/qP69a4qss5qK3P+Ni/qFvsv7j69hnurxTHJM4BzllmdT+B9aazskm2PUL9HZSP3Nw5GPXnPrXyEP2mtTEhS4guSC3LeX/APEmuk8P/tGi9mj8yf5unlZ+b8iM141ehnMY8zlL72Yzw8YaOJ9UaSl75yqdcuJPVZW3D25xWvC2t2rNwjjO4KSfmHuTkV4FpPx2t28j94oG4EhyRn0z1x+IruLH4vB4zJbzbfmynPA559q8v67mFGzlKRzulA9RaTW96yiNIIiPuho2G706f1q/b6pqVvGPOsxMw6+WVH9f61heG/iNBqEY+0S5ZhxuGCeeMiusj1SO4hAiCGVhgN3HH8vwrqhnuOTbVV38yPZR7EFn40t45xDexNayEdHUqPzyRXS293DcqGRgVbkYx+X1rmh4eS9tg7ASORkqOAD6A/4istrG+0O6keEvFFgF42Bwfrzx9RXt4LiivCShjI3j3XT1MJ07bHpEMYbkcipjFXHWfjW307YLx1hRjglmGAcevQitvT9eW+jMkMiSR+YyhgQQQD2x/n1r9DpYmlXSlCV7nPsWLq0luJCGtIZUH95ssfzXA/OsTxBNpGh6fPd32ieRFGMtIwjVB7lt2P689K6S41S3sYfNmkVMDOAefp06/Sviz9pr47DxBq0mj6FeeYYPllkVx5cTDIOCDjOeB1xg9SwAupUjCPMzKW2hx3x4+L3/AAlV9Jp2hQLYWsZKt5O0yE9CCwAwO2MmvG/O1KZVglnEjZ+VMkhc+/rVq3jFsjZxI0h3M7E9T1JP9P61s+EdJEsj38jb0k3KnrxwDj0J9vWvmcRi1Zza0RiYUegztG88jNM8Kb9qrxxz0PsO/vXTeD7MeIbiF23OpUmNZsE7ckZ9yTnn+VdP4X0v7ddTttG7d95gDnoOfc/1qrH4bn8NeIo/Jid7C6YrCM8hu6/jzgd8V89Wx3tVOlfXoCOv1KzLeGbiELhXRkHytkkqcHJH9ag+H9nqcsMUUUscZA/1kn3sdhiu70e409dH/wBM2BsDKR/eB9cnj/PQVwvh3xN/ZzMsUW5I5SscjD7wBOCcdeK+VhUqVac4xjsdKR6Avw/ubhlku7yW4V8L5YbywCT14HSu/wDC/wANNNHWWQEDBfzyuffjmuH0m917XGhCo0a4GNy4OOwx1Ar0bRPDZs0jbVtZMDEcRq4j/U15tSctFJ/I0iTeJvB+n2Nqk6Xsj+SuQk0xk59j61F4Xa1uJEWb5kxne0hH6Ct/Ufh/Z6pb/LJLIMcHzmb8jnpXP6f8LHkmkT+1bmEKQo2sBj68c1zytfVG0dDsrPw/pl0pxJLH/tRykHFWrrTYrVUWK/mIxjDEHP6VHoXw+TTYwZtQup3x955MAD2Aq5P8Pru9/eWeoSFscRyjcB+NdMYyskoD87EFn4UgvwrNLKJOvmCQg/n3qHV/D+p6ZGz2souo16rIMMAPcdabNpXifw/kvafbI1/jt/mJHrjr+lVIfiJcTXDWwtJ5JF++iIfl9c56VnJQgrSi1IRBHopvod7bvPc7i2e/t6VyWvafLZXkUbbTz5pGMluoHGOufXgV2U3iyzjhmd7J4pFGMqMfh1xUOn6bB4iAmuZTFI4AWNT0H8OT/nmuK66PUh+RwOm6ELy41FpIzJGXjDhc/KwXrx3w1c/4j8GvMHRAtxGcgKze3QE9/wBa9Yj0n/hFVuI3XdBM+8uhzyeufypl5pltqEbvBjGOPfjv61oqs4yM2ro+XtU8O3lndGMtynKsM709+vHoa7fwH8btX8DQtaagFutPU/8AHzCh3L7SAcZ+v611PiDQ41uCroNrt8rHsRj+YzXAeItBGnLJIip5U37plYnawP8AC3f3B69O4r6bL81q4eacTG1j6k8J+NrDxZpsN1FOiSSjiMx8k54AxnJ5HGM8jiuk8lhGrMpyR34r4u8A/EK++GfiqIXjStotwFQ/P80SkEHB6lc54zz7Gvr/AEjxJpniDTY7zTruJ4ZCAGhkUgE9jg+9frWBzGniaSndXNY6l3y9wPcZ4I6Gs66cJMY8ZIrQ1LULXQ7F727uFS3T+Jj+QHqfavHdS8fy+JLyeDTELRk5Zyfk57Fu/wBBWePzahl8Oeo7volv/wAAryPRJtYs7VXaVyQvXZyM+mTgZrGbXr7VCRp1inlnpNPIQPrx/TNZOh6Q8kiTXMjX8/RQw+RPZRXf6XprS4Eq7BjqQd1fAy4oxuKqclFcq8v82bRo6anKWvh7WLqRZL3UP3OctHbx7R7DPBPb0qGbwLfXM8ghupY4mJxulct9cBgK9D1G5s9DtfMlK9OFPOa4vUvilb2jyKoSFc9eneuavmVajK1as7+pccPGS0RDa/D2W12G41K4dgc7WlOD7YyaivPC8LMwN4ynqQshz16cGvPfGH7Q9lp+77RfxxxqeAZApPTtkf1rxjxN+15psLSRW0r3MvZEVmyfTp/XFKOJx2K0w/O16s3jhltY+l7XQ7eYZluirITjbK7A89+fSpP+EdtZGcG7BP8A106e/U18La1+1r4iuWIsdNjt0XhWml2n64ANZa/tN+N5WD7lA7mNmAH5gV6tPCZylfnt/wBvf8E6Y4Fy2ifekvhBWjdU1KMyZ+XcAy9enG05wRWJeeF7+3kZS9nKBz8srIcdjgrj9a+NLX9qDxPayqCtw+D/AAjPPr96t3Tf2uPEkcgaQOyqQMyRNg46diP/ANdenRxOd4fR+8vkynlsn0Pp2fR7pVXdARu6YdOf/Hqzb7R7uGMu1rNt/wBlC3/oOa8p0X9spzhb+CEjgbtxHfJ4OP8AIr0TS/2oPDt4oJjXf24BP55rs/1gxdHSvR/D/JmEssqLZFZ/L3EFgD3DcH8qrzQgity+/aO8JRuIrt48yYULIp29PU5H6VVHxX8A6tkK1gjKNxZSi8fgRmu+nxNSaTnSkkc7y6t2OdmgwelUpoevFdfJq3g3UIw0GoRpJIMhYptwH5nn9O9YFxHE2WglWeLOA69K9vB5vhsbLlpt381Y5KtCpR+NFSCE1fhtzxxUtvbVoQ2/TivbMCG3tuRWN8QrPWW0GRdHtHu5mH3IyAc9v8+o5rrra35rVt4RxXNWh7SLg9ma0p+zmp22PlGTVNZ85rW/d9OuwMtDdKyv19xg/wAuazb7w/qF8pJvJMN3UAf0r6813wdpfiyxNtqNqs39yTHzxn1UjkV4h4q8B3vw9vkSZ2u9JmOI7kgZ+jH1+tfGYvBywr54LQ++wGOoYpcjVpfmeVWfgm7fInv2fHCJvI9+enbsKt2/w9U7yIo5WI5Lru/UmvRf7KyyT2u1kx6c/wAqtQ28jnawMbdCRu/yK8aWIqdD6KFCm9jyO58Gz2jDMISPOS+wD/AVEfDP2fEgUMpPbH9K9futEdhgsxT3PP8AKkh8H2c1u03l75D/AAyHIP8ALFOOLdveOmNGMXc8h+yoWAK7s9OeahvtLMEe9QrKe20Zr1C88DidXeBcyKC2z/6x5HpXOSeG55FcNHtxxtLE8+nIraOIjLW50qmmtDzebTYrhfl5bsRx+hrMk0mVmIPznOQG4Nd7qGhZXujf7Q5P+Nc/qFrJbDax+XpnGQf8DXoU6z+yzmqYdSXvI5p7jUbMGJbiQRjnyy3H5Gtjw78V9W0JzFOftluflKyHZIB7OP6ioTafa1JZvmz3P+NZOo6Zt3bkLH9RXT7KjXXLVimeLWy2D1se3eEfi9b3kkZgvZYWDBms5uCD0zjpznqpP0FfQPhD4uXF0oQykRkjo27rz17/AEzX533Ctp7ho3ZQpyrAkbT/AErvvh38V7rSNQt474lsuAJScK/sw9f9rv3r53MOHYzi6lDp0PmMRg3R1Wx+pHw+15dYRvMcMRkorgBj6cV2bWHnQvHKuVf+6Pu/h3FfNfwf8YR6ktvKZfLbbhSzdK+h7DXI1t/3kiu7p/D3OK+JwsYxqSpVtLHlVF1PMfGnhX+y7iVcb7YnI3McEZzyPr+XWvPlutS8OrJLpk7qp5ZZjuTI6EA981678RNaFvFIswLJIOOMjGPX0rxLUteg+yzwbnk5Kjdxz6nt2/lShUqUZtUZNJHDJI8n+Kvxr8WarE+nz6wtlauPMllgQK23IGCwGfXgfrXjFrdx6hIGU4iA38nk+7e59PpTPijqDap4m8uORlSZ+hJx17/571DoTIjuTGTFE6lhkjJBOBx34ye1fo9Pm+rxnVk3KxwSN28iMkawOrRzswRdx4Cn72cDrjHA6D0rsfDEPnwx2cLtjGeF3MRjkgdunXoM153ZyHUtSleEMcttiXAAPIySc+uK9X8M6kPDdopjUyX0g2sUcAk56A4xj3rycbzRgo9Q0N7Rbg6CtyYreY3LthUKduOMnp9TxVhrPVfEV5E7xxw/vCyxx5+XjHJPJOO9b/hxYrRhc3N3DcvKql1kjOMgcBSTngEjOOetStrNrpfiGSKJ/wB26hkHoO6/gc8elfKTm4yulqUdF4M+DcVxHGt7fF2dcNtbG4467sc1yXizwDL4L8YN5DNd2M8fmRmQAMpBwwJ78jr7iu80/wCI9ppsY2yDzFwoUck/gK43x144/tDWbTz4pIjsJUSIV646Z6jjt6VyU61eTatuavltod/4JR1i3SyNErAk7Dlznnr25/GvQtJn0q3ufM+xw/aj1uJFDOf+BHmvAdK+IkOnSJF5pOcnbGMknHTArtdHvtd15lls9GvgG4E00RhQA99z4/QGuOdOdP37GiZ7sviG2trfqMn2GMelcrfeNLS21AspCh2XIz0+v61HoHw81W+tQ19qEVvu/wCWduDIf++jx+QqrrXwc0G4kQXuoXzXG7I23G3Jz6KBWcueSUp6I0OrsvFC3gDeduTOB1I4rpLDxFtjVVlCr07iuRtPhsY7dfI1WW3BX5V2o49uwqWH4a+IYld4ddt3ZuiSwFcfka1pxrQ96K1FzM7keJDJgGVXA9qpfblkZ2+RNxySo5ry/XrDxtoZUQWMeqhv4rebH6Nil0vxd4jt7cDUvDuqQOc/8uxfp2+XNVKrVqayTYc+tjrfEFrYapcRW7gK7HHnKAGC47VAvgeezi3WV2kpIz+/+U8e/IP44rm9F8UQXOseZqEckFyRiOCSMqVGeWwfeulvfGtrHbnY464wo61583HVzWoaS1Ob1yTV7cra3FpLtZtqyDlM+5GeKq2eg6ppkIkikimj5JhEh+vHGAf58VpR6+2q3UaLIY4c5dicAAc9PXOK145oL6xdPtEglxgdM9eeT2rnjr0J5U3c5W+httUsydpViDlWHKkE8EdjmvONfs5Iw0EyeajZAJGR07j+lei3mnPpz3SySeZJKd0BB4BxyDx3P865S8vI7yFozHz3B9R39iK66UrS1MmeParps18k1s0RiaMjYpcv1A5B9Mfyq18PfFV/4MvLuzs5tomjKNDcJvBPByPT/DNbmr6aY5hNEWR0bhjgANz8jZ7Ed64qa+ih8Q2kmNm98bSMFeR8uPUZIxX01CvNLmpuxm7ppnrd7dan4sYS6hcyXGG5V+EHOeADx/n8NDTLVre++xspiMP8Oz5fYjmtfwjYwXWn26o4cBAGI55PbJ9u9aGsQxpq1hCQGlMeJMddvY/Xnj16+leLVrSqtub3OpRaVzuPB1vK1ur+arDPXHUfUVra1rEWnwlQ22THLEdPTn1qhodwmn6fG2cBVzncK8z+JnjIWNlcTlwvlrlnY4C45J/Q12RqeypJR3Z2wi5Oxk/Ev4vWfh61mlvJwiR5O+U4GccAe/XAr48+Inx/1jxJPNFpVw1nascfaOsrDsF/uj361xXxG+Il/wDELXJriWWRNORj9mi/H75/2j+grn9PEoTMKNliQWYfnX3OX5FTo2xGKXNU7PZH1GGwcbK/UPsMt9cPNdTSTSynLTTHe2fU55P0q9a+F5OWEmc9+n4Vr6XoV1fMrFMKOenJxXZaP4NbmVixXvnv/nsBXv1cRyK1z6Cjl8bXaOM0/wAMqGx5Rl9ATx9a110TaRHGqg4ydo5ru18OtNhdmB/dQH+pP5/41pxeGBDHtEZUkZJY/nye1ebLFdWz1aeFhFaI89ttIit1wYld/c55/rWnDoZmYFYlBB6quOe54/8A1Cu203Q4o5GL5ZAPvqDjPTHua6nTfDUkkEjwxrEWAH78YPXAyM/korlliH0HUpwjY8ig+G4aPLKzOckB1I/D/PrTW+Hccy/IFOOu1efzr2aPwLDZBnuZ5biY8kEhYx+A6fnTJdBEY2qmF681g8RUk9GcE/Z9DxlvALQRvsuZIivPzMVAI7flmsVvDMrTEGaSUZ7jnrjHT8K9Zv7Nr68+ywrlTxlOc9scdu1aVj4R3XSWmnwh9SiYGS7kQGO1HTIweW/rwO+OvD+1rT5I7s5q3saMHOrseZ6R4T1K6uo7S3tWadvuwqNpYAZxnHHsTgcjJGc17T4V8ISaCoea5Mj7NpiX7g75ye/sAAOevWt/RfDlt4fs/Jg3SO/M1xJy8repPp6AcDsKvNGK+7wmXwoJSnrJH5vj8d9am1BWiLBF0q7DHSQRjir0MNepY8vUdbxbcVowJ0pkEHSr8MNQxktvHmpNS0Cz8RaXPYX0Qnt5htYN1HuPcVLDHV6FcVjOKkrM2jNxaaep88Xfh+78H69Nod8xaD79tcED97Hn5T9e2B34qKWE2t4Pkwqjgt3z07V6/wDFTw0NW8Pi9iwLmxbzQ2OSvG4H2xg/hXA6lb/bLKzkEXznh2zgD/Ir87zDDrDV+VbM/SstxjxFFSe60ZRhhe8t2wqk/wCyCAP05qO3svsUu2QbBjjPIP8AKtnT9Ffdt2OG68//AF66W38NfaISCgbjpzivFcl0Pc9pGKsziG095SWATnj5Ris2+0tCGVyA5GBkYb6Z712c+jz2Lorxt5XTf6e3Sql7ZrNGYyAQe69f/rU7hGs76bHjOtWFzayeXcwiaM9MLXN6lpWPup8rDJVucf8A1q9Y1q1kt5SjoZI8HaeePeuRvLcTK0ZIZ2zgdD+Hv+PNehSq2PShLmWp55cabEUO0iJuy5wPzrmdSRo3ZHXAX2/lXdahYukjRycH8efzrkdehlhTYy5jB+XjOPp6V7VCpdoJpWORvo45CTiub1G2MW5l6Z+8vHHvXWtau8gKrvHXGRWXqdsqxuRxzz7f4g+te7TqJOx87jaKlFux7v8As3fEieGzgWe6VmhfyXV2xwMY7Hqp79wa+3vBvil7y1hmNxtbaVODxn+tfln8N/EH9heJBC7+VaXgETtx8rfwPz6E4+hr7R+EnjCU25sru5aQBWEcmMAgHhWx0xX5txJl7pVvb0lvqfB1IuEnFns/xU1dW0qOTJeRiNwHfJ/nXzv401x9PsbibHyYCuWByD1B/L37V6T4+1Ua1oyrbyMLqFseWxxlx/CfQ4NeD/FjWpJdLeMKsPOyaHILbTnBBHB6kcdyPWvEy+lKtUSZ5VWOrPC9W1A3OvzXgbcUyRk/xZwP1Oa0zfPaaTDaxBWmbLFh1LN39+M/lWC8JkuLgxtkLukBPc5P+fxqxFIG+zxu/wA0incV67Rxj8Tn61+oypxcYx7f5HFy3Ow8L2BtgHnYJEE3oTjkjOD9P8a9C8K6HNql0XgLMdoIiiPzHIHPPQemfWuOTULa6s4rcIIo8LmTHKjvz+n5V3XhvVPFFxYxW/h3Qp7mNcYlYCNW7Z3sQD09a+Txkqkrz2b76ILHrHh/wB59qovtQa1zj93bIrOPq5P8s1v6V8KdCklmN5e3F2jNlW3gMv5KM/jmuI8OaD8RbqbdLFawBhgwtdoW6c5wSK6jTfDfja8zG32HTVBAL3dzsH/jqtn8K+Nre1i/4kfk0aJLseneE9B0bwswW1dlVupVsMfr3P4YHtR47j0bXrBJL61ivRC2QJV3KOQCeeR1rj7f4a+Kb7LHxnodsOh2xzy/hnaM0X/gLXrG3kgbxZpd7NICirPBPCOR143d+54rg5Zb+0V/XU16WsdF4T1DRNFuE+yafZwMq4V44wCB256/rXUeJPGh1K1hjhdYvmHzdzxXimk/DHx7HgY0+4ReA0N5nI9cFa6i48B+N47NSdI+1si/KsMyscenXk05038CmnfzJT02PXvCOtTCNiZfMx0PPNLqlj/aV+k7SsfQZxz14rwq6+JXiLwxdRadJ4c1OO6I2hWtHAPbA4wa6Dw/4g8b6qxeHw7dg5+UuwA/+tSlh5RglNfih83Q900ORbWTMkjPheM8mr93riQsXkfYnJJY46dc15dZ6H8RtQ+U2dvalv78/T24FSXXwj8caxIRqWtWdnB3kBd9o9hgA/nXRTlUjHkitCvkdxD8RLOOZkEqK/17fSrMfxBs5phGsiZI4JI/z61xdv8ABbRLN1F/4jvLqfusZWNc/rVl/hv4RhkKprOpLJtyAtwGP5bTUSrVVpzpBaXY3vE50nxNYsJ1hllHCPIA2D7en4Vj2/gXQriFsKYGIKr5czD+vFc9q3ghbeCSSw8RTpIOUjvYwV47ErjH5fhWRpvh/wAXXkgitr22ugT8rAvhe5zha5JVOaWtmOy6o6S3+H9la3Uoj1e4B3fdWQHHGQucdMGq1x4P1i2ug9pfLcY42OMZB9x+FQW/w18dxs8v2q0JY7irsy84x6c02WHxroLOHso7k9H8mYEgeuCQfypyi07yjYVtDK8RtrOlx+beWzbY+PMh5x9a49dWjmuZnbgFhIwX+Hjk/wBSK6LUviNLuktNRgmt5P8ApshXP0zXEajqFvFfi6iRVDfM8eMDgY5HuM1VOPkYyRDrd8I3dsloZdsUir/ED9049R6j2rg/EFi1hrVm7fvXjlU5JOOcgN9Dx+VdB4hH2eOUwofJHO1skp349qy/ECnUNHsLpXzKZViK7zkEkcZ7g9q9nDdDKXmew+EdZks/DTN5auRmRQWwWGcYyOQMnr+Qre0218xZ9SkZi6ZwMnBPJzg+ledeEr6fUFghE0fkqAqvwN2OCR6nGcema9JW786OPToAojXl2HGI/T8egzXkVYOMmn1O6n70UdirT3WjJKURdyh/LXJOCPrzg5r5k/aW1EL4M1f94IWktnRfmIyWIX19xX0zqmqG00VTGURSoKZwMg+xHGP89a+LP2tfESx2sFij5ku7jLc4yiDcf1K162XUfbYyjCPdP7tT18PDmqRTPm+O13KiIcxquTx+ldh4Z0L7Q4j2fOx6kc/WsLwro8l9cJJK+I2cbRj8uK9P0mzNnKYlKg542kHj1J9TX6fiq3KnFM/SMLRjL3ma2naSsmy2HMQ5Yj+L3NdhZ6R5y42qB0PJH4AdenasbSlMLKxO5s/KPU9s+3tXWWELGHzJDtRhyV4/D6dPrXzlSUnuepU00RMdPtLaH9yhMh52gc/y4qOzsZbyT5oQEGS3y/pmur03w20lupkXy16jOc/ljgV0On+G7bCeYBIy8jGcHHtnp/hXNqtWefLExirHO6donnfKyqO37tcAewP+e1a9npxAMNlaknoXcEAev1rrrPR/myY8r0z2Aqzc/ZrW2dIdrBuS+OuPerhFy9Dx62L+Zxs2j/Z1zO3mS9iOAPp2rmteytu6Jw78KcZz+fX/AOvmuyuW8xmzz6k9R9T1rmriFL/UOQdi/exgfz/qa0k1HRCoycnzSOXsdJa3tUigV5L66by42UnKcHJyO2M/p+PfaT4cj0LTRBGcufmkf+83rVXwnosd1r11qz7X8pBDF7Z+voP/AEI11VyvBr7vJcOqdH2r3l+R8ZneLlVrexT0W/r/AMA5+aHFVJFrWuI6zZkOTX06Pm0XbeMn2rSgh6VDaw/KM8Vpwx4qCrj4YsYq7DH0qOJOmKuwp0qGNEkUfSrkUXSmRp0q3ClQUZvibTW1Hw3qFuq7jJCwwB146V5Fplyp0WyEis8wOCq8YI9hXvSxB1CkZBPevEPEFv8A2P4k1mwnjWBUcTwR54MbcqQcY65FfJZ5RcoxqJbaH1WSVlGUqT66nV2NvBeWgUgK7N36g1oW+hXvyxxPhe2TgGsDwzfhrcFl+b2Ib69q9G0OZZFDN/D05718RFWke/XlKnc4bxdp95aqgkuYy7/KsYGGH5jpXPwW5aNt42FRznPWvVfF9ml5Gsnlhm77QOBXKrYww7SIygPUgdTXQ46mlDEfu9Tz3VbeOaNo2Tr1UfzFcD4g8NhQzxJh85HPDjrn2Nexa5pR+eaKPevfaOfxFcfNaGRSshLJn8h61duXY9nC17ni+qSJJmG5QhugYj5lrkdWtRFuiuAJ4D9xwc/r2r2TxH4Yi1C2bzI2hnX7rAYI/wAa8n1yC70qZ4LtAQw+WRfut/ga9HD1OZ2R6stVocJNp7NdMqzbUALBiMn6Vg3zOsxjnxxz9R6102oq0fzRkj09a5+8dZm2spDc5HpX0dGTep5NfVcuxy06mNjjIYNwRXt/ws+Id3AluXnRp1IDKxwxI4JB68j+teQtbibzVOBtOOBWrpzNpF6LVZnjDL83lsfvckHr2J/WtsZh4YujySWp8FjqTj7yPqDUviNLHCk80Qdcj95EMk46cZGSPTrXmnjjxTYa9DNtk865bc4PII6ZxkA84rgtP8eX1tMiXbEhcfMFGcH16Z/MGrmvX0N5DJdpErDZyyYxu7ZwR6dx2618xRytYWom16NHzcpOW5iaTIk1xfB3wZIWAGCfcDH1/lUfhm1/tBmuZpFKABVBzkAAHP8A9eqOkzG11GCeXCxsCWB/i9RXsfwT+Gf9oRRXOoRk2IfMcLcGX0zyMr3969jG14YOlOcn2Mzb+F/wtvdeliv54WW1T5lV+kpyCCR3XjvX0dpPw78S3tvGI5kjg24RmUfL2yAOPTrW/wCCdJs1VUt4ipA7ngf0+g/pXp+nyLFCCnAAydpzX5LjMbPGVLy2NI076s8ubw/q/hlYPMuw8mdolBwc9x+tdHoa+ILhlVpI2jA4Eih8j6Gur8RRRNYWjkK/+lR7mxngn9e1beii2hVWypLHjP8Ad/z/ACryPYKVWzehqo9Ece2lzJk3NjZqx9ISisPcrkD64qlqngOPXrPzrcTafcJ0UnzE/PJ4+h/CvYv7Pg1SNgUV/buBWVJor6KcwuzQE4Kkcrn+n+NdUsBKD51qirdzwCWHxL4RmZ5o5DAp4mgy4I/DkflXZ+CPiYZ8Qu/mZORnnFdprnh2W5Vrix2xy4yUx8jcenvXDx/DvSfF1w4xJpGsxtky277CWz19D+I5rN0nGST0b7GWqdkaHxS8Xpa6TZ3O0bxKv7xuBg54/Ktbwx43gurGNleMlUBypHPFcf42+EPiHWvDNxpo1GG6AG7e6lJQw6YH3c/lnPvXCfDP4ZeJdUUQf2wlrLGxXGwN0OMnmtJUJfHzWkJSadrH0Va+Ph5nls+0dfmP6VU8QfE+zjsTGZ1jLDkbvu1wcfwT8WszeZq8LAdHWPaV/pWSPAtvHqjWtxqEuvXNv80rMALeFh7A/O314Gazl7eCtVloXzeRQvNUuL+W4vZrloNNBOJpixaTHaNP4vr0q/osutattEG3RbHHRV33Mg9WJ+79K120c3mrBZMMISFx6v3/AAXoB0zmvTND8IxWdqjlQZD6iopYWVaVoxv59v8AgjV7HM6XoqJCJLiFpSpwHlO92P8AwLj8hV1brUbZsWqLbRn+I5J/z7V1NvbGafZEqgrwGx19aivreJbyNWO4t94D0H+eld7y72MedysU1poc9JbapqhXzL24IPcYUfkBS/8ACFStktK0rduc/wBK7y2t08ldiqq4wNw/QU53+xqQ5Ab04/Kuz+zqco882Rp1PML7wsnllLq0M0B/hkG9fyOf6V5t4z+GdpLp8t7pKNFPDljZqSyMByQo7H2r6YjSO43KyYBHOa43xp4bjaEy2pWOYDOGJAP4jpXFWwcqEfaU3p2IlHQ+VLmYy2ThoSC4YliMfXPv9a5a8t/N8FXvmxM3llJlUEjPzKcD8Ca9a+IngeZbWfU7WIxKFxcxxkOFJ43jHOCcZ+teamF18NyWe0vuQh5ADgZU43fpx1rbCyjZSj3OWS7k/h/Umt5kit5cfKPJL4+RRnk4xjufXJr0jTbueSGOGKRRubJZ2I3cdT/nvXg2k+I4NNmkdbpG5+z5IO4beBkHHPGcDPWvQNE8ZpYr5jSyRPj0wMe+eSP0oxeGmpcyOmlK2h2XjTXLrTbFS15tt41O9dwx27dvpXxP8X/FQ8Z+N1jDg21imwnOfnJyx9+Ao/CvUvjR8ULlrK7a2k8x1jZyo/hHHJ/MV896XpkjMLnLTljvctk/MeSTX2XD+X+xTxVTfZH1GVwdSpz9Ed34ds0uMM6kwLwq9C/p+FdrpkK2YMk/ysPuqfuj0xXKeEJfNuUgcMr/AMOeld7q1u0NnG4yDkAFTg/WvUr3crM/RKUtNDT0FTe3BbaVTOAD168/5969Q8M2tsyoTEJp1yd2Mqvoev8A+v6VxPhLTR5Me75thBYgZz6df5n8hXqWi26w27k9W5X2+leTV+LQnEztA2odzbYzIGGBx9e9dFpumqOgz+GKTw74Znkt1nuD15Abr+NdRHYxLsVVAJ/u96532PmalVbIz5LEyWphjZgT2XuK5PUFkUlVGAOPy/CvSZLMLCYwRnHPX0/nXMXml5bf/HjHXpXVDY8+NRc2pxFzIduGHyjrj+lY5smtdPurtTiHH3T1Bz/D2Prg/nXReILY25MUagvnbt45z/n9azdcsbi7t7HTbU5lmkBlAx/q+jHqPWsIwdSqoR6nqRqRhT527It+B7JofDsbFNhmdpfwJ4/QCtS4iPPFa/2ZYY1RBhVGAMdqp3CCv1mjD2cIw7H5pVm6s5TfV3MKeIms+aE1tzR5J4qnLD7V2IzsXoIfarkcVLBHVyOHpWYxsUdXYY6bDDzV2GH2qGxjooTV2GLpTYY6vRR8UhoSOHpXG/FLwm15Z2+v2UPnahpqneijJlgPLrjuR94fj6130MXSrSwiRdrAFTwQRXLXpKvTdOXU3pVHSmproeA6fYizmjvrNcW0yltjE8ZPpXc+HtSMi/wnHUcms7xZFbaXrz6MR5S3CGWInuPQH6j9DVbwzN9hjkSQFvm+vFflVSnKlVlTnutD7yNX29FSZ6Tb2/8AaFuU2AgggDHQ+1Yt1pTWoKFFZRx2U1s6PfbVVoishYA8Hpx0PtWrdWcWpJlXUTEZKMcV30oKUfM81ylB26Hn82nqq8DZ+VYGpeHrPcJWjBm6kjofrXY6tZ3NjJghlGfcZrAubhWbDAkjPJ5rKStoz1MPOWjTPL/HWlyLC7QjLKBn1H0rx/xBpwmiIlTBboCOpHqK9z8SRiSZgZWKEZK4OPevJ/EUf+tGxsqw7eh/wzRTfKz7DDS5oWPC/ElmtmsnlA7AeY2/hNcpbxi5tRJIil3JIZhzgcZ/Ou+8eK6W88qgAyDnPrnNcVqUsdlp9u4BAwFUdzgcn8K+qw8nKCseTipKM230Dw/o5dLy8kwyxHau7+Jv/rDn8RWRdr9jvZDINhb7uffPNemwQ2v9lW62kYFtsG07sk55OfUn+dcf4r0s+WZF+faf4eCPwrro1eaTTPmsdTcoXRkW2L5fMEx3FTwDyDnn/Go9YuJvsKxvITuYAtuzu9+KxodUeznKnd5YycdcH6VrQQpfyIHlCxDMjSHjC9/oe1dEqfs5cz2PiZrldjq/Bfhpb64huJ0zBDtft84BHX0HrX0/DcQyeHRPDCrLahYzGvA2k4B+nHPH86+Y/DeqGRyWwiPyirjp02n6j9a9Y8L+NJdFtlUwte2U4MM0JYYYYAI5/iGP5dxXwec06tWSfboSlc988J+JUW2t4WYJg+WOn/AQenoR+GPSvQrfxYlhCBNOFVztyT364+v86+VLbxZPBKEtZrW6giO3ZNMsU4Bxw6uVOfpkHqKoeJfjBqGnwz2JhR03fK7S73i9BkfeAP3W59Dmvk/7NrVJ+4Vzcu59GeJ/ixZaZNd6ZBPJMHQSQMDu2P1MefVWHA7g8dDWh4S+N1hqtvHJFMu4cFM8j1BH+e1fFlt8RUS9Z57iRLrA8ueFgzf7sinrjqGH0NT3Hit/7RbUrWRbed8eZFFkRyHBy49D0z6/nXfLJJJWej7mXtJR1P0n8N+OoJ412y/K3Q9O/SuxOsxXFq7O6sCD3z2r87fBPxsntQqXDsig/cJPX/8AX2r2zw/8cGuLOKMMrhwVaQZJC5HP64ryJwxWDbjNXR1RqKSPqfSJPtNouG3ccfSuU8baT/Y9xDrNuNjxsqzbf4lJwM+4NYXgf4iQ3luVeTZt4Xaev/1vetrxr4qtptFuI2lXbJGRwc/55xUSq0qmH5ZboqS0udjbX0WtaSrJtFwEGGPAf2/KvKdB1hLHxdfwou0tcGTG7kBsZ/DrW7oWufY9MXEuQnOd2egryzUPE8dj8YrtCS0N1Crx/N1YHcRg+gcf5FYSrvERT+1H8TG9rHv/AIs18WXhC6mifZJHAzE556da898Pwx2OgxyAYBIlkYnrjLHP5CqPxK8YRWvw71d0bLeUqBs4yS6jHvxmuX0Pxl/bXw7JjkzOsJAjDZYnaRj+X51dao6jU2r2K+1odV4b1QTTSO7b3b5zjrnOTXpa6/ELNXVsFkAGT3PH55r5V8A/Em3mYkOQy5BRidwIPII9a9LufGRXTfNh+ePG7dkEj/Ee1FGpLC3jJamkJcyPT18SxrI2ZAgxgZPoelc/rXi60j1UsZCwXZk9ecZr5/8AEHxijs2kW4cLJngEHDejA+teb618d1mutrXAVpAPkA5yOMf59amVPGYxWjG63OatX5F7p9hXXxcWzmBNwkKdCpxz6Y/TvVe3+J1pNKzzXXOf4mAH/wBevh/UPinqErMSkxEnKYQnPbqRiq2m/EnVmmMcFrcSz55Krk/UAc11vL8xqQ96RwOtVbulofohY+OLe8KRxSDP3VweW+g/xrbjvY54dsj9s7hyP16mvhrwj8ZLvTZVinyk+MOHwrjHbB5x7d69q8N/Fprq33PO4T0CrnH+02fl+gP49q5HLE4d2rK52U63N8R6HrkFnPqEsRXiUFJB2JI745r56+I0MPh+SWxPmGSNi7SSJ8pOc5GOOw969Qu/F9pqET3FtPHcMDz5LE4PpkDGa474ovY+I9Bhu4G+fiN3GMle2ffp/wDWzXNh6ijW97S7CdmtDwfwnbsusXcywNLdSMWjj2s7fMCOi5689OTk8jrUGvXiWsLJuktdxwkbHBlYYzgDpg5ySf1q/rEKWFvHc2mnrdBWZPs6Hk/KMuScnjgE8AZxj5q831KS8vtYa7uZJZ5UiUMwHyxLgYUe2MCvuqNP6w+dvTsRHQ6W60K31DwrcWzShri7VkM3OXyOB34Bx/WvOfDtg2mSy2F0nlvbko6Pxz6/Qj/PNeneGbGM6sluJYmkK5wucpzwAe/HOBxzT/iR4TXSdW028tXKCc+TKc4OQNyn64BH4CvUwdZwvTk9Hqff5LH3Ls5CytPst/BLGMKrfdzz7H6V6zeaW13pUJRC7SPGoOQCx9OeOn4D8a46TRHtZrd5Q8hdhu5PfnnH9K9ntdIa8srJYl2Mi5J6KMjHqOTn9feniKivFo+0iuRFvQtJt7W3hhUDzQvzEdASOT06/qa9e8G+FfLhjuLmMMc/Ih9PU+9c34Z8NC3EMsqq5UhsMOM9utehW98YlxgL2GTXn9bni4ys2+WLNK43NhMADFSW0KxqrFsuRwPYVTjdiNxYY/urV23kdoz0GR6UKGt2eFPREV3fFI2wvOcfMOc9c1ki8EbDcN7MQFGcEsasXbNI+eAuemMAUrQ/Z7dp2HO0+WN2Tz3P1rbZXI5UcjewfbtRMhHHQFTnjpWb4Zhl1DxRqt3Ku6G3CxQSHkHOScH6YB+ta2oR/Y7W4cyYZlbaSwG3JA3HP1rX0PSf7N0eCEhg5Xe+45O48mvTyPDupiHWe0TnzOuqeH9kupDcJis+VC2etbNxD8pqhJDX6LE+PMuSGq0kOO1a8kNVJIa0TGWYI6vRJUEK1ehj6cVmxjo4+nrV2KL5fWmQx1dhhxiouIWKI5HFX4YTUccPpir8Ee2iWxaHRQ+1WFiqSKPOKsLHishnk/xu0adbTTdes4lNxYTL5j8cREgHPr1P0yaisdPjvI45YsMZlEnHI59a9U1bR7bWtNuLG7hE1tOhjkjPcGvHJtD1L4Y65N5jfatEuWC27YAwDklTxgEfyAI718VnGDlGp9Zivde/r/kfS5fiFOHsW9VsdNoljcWN2p27IwdpHOfpXYKSNhAwenArJsNUgkfJJIfaR78ccjvzWjcXEXksVPzhsqBnn/OK8ajaL0Z3VLzKPibzPs5JGVbBI7V55qrRgny/l/UV6DeailxCY5SqqRggtwfx6fyrjte0dJ7dmgba4547VrU97VHThfd0Z5h4svJLW3MiICM4OQOBXmviC4XK3CndC+4HIwenGRXpeuSSTefbXcW3nYflwa89utHWbTbmFwylTuRvX3/T+dYpo+xw3uo8g8YRia3dBjIBPHt3/KuA8VWoaWziLZCwq2fc5Y/z/SvTtUjDXjQOMNJlD27EV534mjYyRsFKnYoHqCBjH5ivosJK1kcGKhzNlXQ/FC6bDLp0xKiJt8W7HfOV/A9PrTbu+lvJOA0u7j1xmuO1KNlu3lkbpyoAr1TwRpsWoeGZLyEbSw4LHkEHB/r17V6GIcMND2vc+XqVW049jzm80lFumWVWaTrhTgZ6A81pW9uGgkjc8sqgsvOMgn+q1d8QWqQ/vmfa8m7y8cnjv/L6Vl6GfNBBJV1flAfmX0+tV7SVSlzHx1d++buh2r2bLG0ZAXkbyNrA+h/L+uO/UQ6s0cS2+5lj75UM+PQqSN4B7jkeo6HCtbxLWIR5Lp1VcnIJ64z/ACqjcXAvJBAFUE/MC54UD+I8cY9vp3rxqlP20m5Izj2RLrCy3B2TXeIt2EDZZuegRep+nHbpV7R/BMC7P7T1SHSYMZzeSjf7kKMcnjqe/NVLRSswS1uGjLHY99INrn1xjO0D0Bz0Ga6ezvvCvh2ET3ckDz8YmnAfBznc2c5OB0zjk06knTShC/yR2UqUXqyWz+Hfw91GQRTeLYInbgTSxyhM+vAOB+FbVp+yzq2uWjXPhDXbTVowC6raT78j6EAn8qr+H/i58N7Jlkvr17l1AA2whwPU4APJ9+AK91+EHj74KeKtQs7b/hJ59C1WR8RztfTWhD9MhgUT04Oc5PXpXFz45T5YxnbzSa/JHTKnSsfJniTS/Evw/vms/EenyRBFK+eiEMh7bgcZHtx9a6Dwn8QDbQoolE0ZAJk5xnr+fUYPpX6AfFb4T6d4l0H7HrlxBroSMLDrEMYE0YAIUzbVClSMgyAKCccevwR8Rvg3f+APEP2WMC3gn3C1mxmKQjny2I4JxyPX0pupQxDdCvG0vw/r00OKrhmo88Nj1Pwn8VUtLZTcXcYhj+bdIwDA5/zxzXaaN8XZ/HN5E8GYtEt2zNcTMEMzDkKgPO3uW9vwr5Dg8QLp999m1KxjFzHxtlQEfUHniuqs/iFc24UgCS3QZaFi23HYYXbxnseDkV4uJyKOsqcdX16fLucfO9mfaUnj+GHQwDcBZromKAc5PYvjsOv5ivI/iN4xaT4jW93Ztn7LIoBYZxlR9eyj868h0z4lXt1qpurjfLcY3Ipibyx1AxtGMD0/E112iarptxMtxqE9xK+PNdo7OQhBnJydvLMcZPRVAxXkRyqeCk5SV9PzE7yeh7J448cLrXhm60mZzHdFDGysMBX6gZyeMjj615f8OvHUujySWUk7Ikq5X/ZI4Jx6g9vaug8R6baeOtDGp+H7mOW4hHPktu3AHpIhwynJ64/OvFtTS/066YXFtsmDZ/dZyjeuD6+31pYPDU61OVKWj7PdMHzJ3Z6TrmoWUOqXN5G/2C9YhpFjOYpD2bbnjOeorKuPilcW0IEvLrws0MmTj3GAa841TxXuT7POzJOo6iMnH0Zc4+h4rlIbPVfEWpNaQtIseAzyyoAEU98YB+lfQ0MpjON670XVju3ojsda+IT67dS28J8+QDc248Rj+8xxx/OsbSNB1XXhJL55tNPVtj6hK/kw7v7sYwWdv9lct7Ct/Q/B9jaxjzQyaZbSIbj5wJLmVvuxg/3mwfoMn69ba6rZWNx/amtPBb+SMQQqAsVqn9yMdh79W712e0pYWNqEfTv/AF/Wp6WGwyn8RU8L/AL+3GUiCS6JxmfUpHwfpEMtj/eYfSvRrX9kvUJrYfZZNJhLfwLpLj8280n9K5y0/bU8F+CcR2tpJqUi941wv5muz8Kf8FPPDMMiw3nh37LFnAkkdsY45O0N/KslTzis+bkko+l/z/RHW40aeiav6nD+NfhF8QPh8nnT6XJqmmKv39NuZ5NnuI5ckfQZHt0rhvD/AIpm1K+Mv2+S7MZI+yTjYVxn7yjrj6fXFffPg/8AbF+GXxD0sNM6LG6tv2bZlUAfxFclfxWvnT4yfC/wb8QZtR8XfDicm4ibFxbr8rFwAc7MAg/XII/Ok3BJxrrXa9mtfNP80cdWgpK6RV8N+MnUQxXKyBeyRrgJ+agH8Mdua63UfFAuNNuIbdVc3CqDlQVJz1wc8nj64rxvwh4kTTwlhrloxlZB5eWKrIvqjdD9Dx16V6L/AGcW09JNMZJTN8oiZtrKxBxz2/X6dq+NxeFVOrqra/I81por+L9JnXSStraPeXsoEaRxDcAxbliRnOCSAB3PXmuRs/hnf2t5GL6VfOnOZVx90nB/HA7jjjA716TDPdR2cRcyZt38xYzudGcKVztxlgMk4Ix1zXTeEdFMjLf3UeZ1/dq033hkZLEcckkD0A7d6unipUKVov8A4JUPedjyvx/8Nrvw29t4k0on7PbqGu48Z3oTjJ/HH5iqHiLVofEX9iJGm5lYzuVPRQpUE/XJr6V+MFpaad4BvjLGPsxsm3LgfeCA7fzJ6e9fK/w20ySaR7ySLCNiPa3YD19PWvfwNZ1KHPPeOn3n6blMVyK56JqPh3zvD6zKojMQVmY/xEY6ZPoTXe+CwlxbwjuFCle4GB+n+cVBrjw6T4OfdjcIiORnnHHBwOpGOD0qHwDdTDTIpZFTbjBAHU9u/wDTrS5nJXPfl71Js9L0+RY5AB86j+Ff557D9a2DeJeLt+yrjGORXMaTJLMSmWBPSM8jp16e3610trGyhWCk54BA4rrp7HzVZJPUuvI1uqtJzngY/lVy2vkhjABZiw4bH6VAluTGzTMPlH3Bn9f8Kri2E1yWVgsYOcf0q+upw6SNSO3FwN0qDbnIXqPxNUtSm8y5+/8Auofnds+n+R+dWW1DyoHUgbj8oz3PYAVzmqRX+rXI0fT8SyykG6mONqLxnJ9hjpk88VlUbm1Thq2Zr3bzm9jP0G31DxNrN3KJDbabHJhgGIJXAGzHTkA59iehNdxND1q1puiwaJp8dpBkqvLSMMGRj1Y+5pZo6/QsvwiwdFQfxPc+TxeIeIqc3ToYlxB1qlJBz0rbmj61RnixXqrschkyQ+1VJIfatOZOTVWRatXGECdK0IVx71Xt4+lX4kPSpkCJ4Uq9CnAqvEtXoVqepKLEUdW4Y+lRQr0q9DHVSLJYY8VZCU2ND6VZVKz0HexEIqhv9Jg1SzmtrhBJFKpVlYA9Rj+taCR1YWPpxWUoqSs9iotp3R83WWpah4X8QXuhaohjMBYRScYdOqkcDGRnp247YrqrLVpWw1sylim4LJkHn/PWu0+JHwn0z4iWZMu211SNNkN6qgsBnO1vVevuM8V4Lreg+MfhzJ5Wo2rPpqnbHdQHzIz1xljkr+OK/OsdltbBzcqSvD8vU+0wuJpYuKTdp9u/odPruvXzSsskao6nBCjGf85rB/4SaW2co0gbPB3cmr/27+1LItuNwoAIdFBIyD1x9DmvPfEGqQ28zQXkih1OMkgjP1z+teOqk5bHu0acX7tjc8U6pb6niUMpdR823qPX3rg77UoFY7h+9+4qn7rg+nrVHUNSvoWIs2G3Gfm64/r2P41zWoSXeqIYmG1xhx1Bzz0rqpwnvI9inyxVrmT4s0ndcedGQA2HV+vzf5/rXNax4fXxNYvcW+1btPmlg4yDjlh9cc468Hsa6SbVprFzHqloSh/iBxn+h/ECs2+k02ZQ9ndy2kvYcrj8RkV69OUo28hVI8ydmeK61p7QvJHLCUkQkbWrsfhrOg8J39sx8tI5dzY/iyFPP/fJ/Kqvii3eSbdKfM3H73A/UVh+HtQfTY7+Jo2jt5VBDdQWXoPxya92sniMPyrfQ+NzBezfMjT1S1bUtatoRd/ZywIjJx1ycD645/Oqd1pLadITd20junHnWnI/FT0+lGm3X2y6QGLzRsAdZMbcDod2eDmtW5+32+6SO0Yx9QPOZlx7EMPyzWTcqbUD4up8TbMqA/b9qJOArdN1u2c/QjH5VauDb6XYyKXxHn5ppsKXI74Gemfu547mqDXWo3UpMCRxbzjIG0fizcn8662z+Hn/AAk2mvY7lm1a6QpDJJ3kKnaijoM54+mOM1NSUadud2Rph4e0kkjxTWvGEst08GmkMC23zHGR+ANRat4Nm/4R+fU7q6lurnbuUsMj3xUMOjm2urq2aNo7m3OXRhggg4Ix7c8V3Nuv27QzbydGjK7T2yOT+tfSSlHD8vsj3cLhFWjLn3seFwTOtwhkkfGeSCcivtP4T/sX638UdCg1LwX4pkhWS0+0q19iS2Y8AxyAE4yT0KsPWvjPVNPfTb6W3kBVo2I5Hbsa9C+Ff7Qvjb4RxSQ+H9UeG2frBJkqPpg9PavXqKU0pQ1R4cGoXhPQ9PPj74kfsx+N7rw1r7/YplUM9tGQ9tLE/wDEoHG1gOqgHqD6V7vp/wATdE+NngWWxmk+zankOkbtwCCSpR+20468kenSviPxD4p174reNJNZ1y9kvtSvJFDyMPoAqjsAOgFe23HhGf4eWNpq1ug8lgvnRHOAT0ZcdGH17/hXyua4TDycE/dqPa3c9PC06tWE5wV4x3O28QfDlvE+hTTSRBdVsnaGU453Lnn3DAA59CK8q8O2ztqk2n3kPm87cSYDKQccMfp7V7/4Ki1a8hi1NWa4s5oQJABmQx9iB/GBzjHI5HOCBy9x4STU/Hl6LaON4JkScOGCj5sAnPXqM9PWvjaGMnh5VcPN3itV5d0eXioRupwNXwX8E5tYiS4t9RkgiY4HnKHAPcY9fX5q918G/B/xDYQwtDqdrK0ZyZJLJmGPTIfNcn4P8G+TJGIfEUWkTBVBe83pj/gaE5H1H4V7R4V8E+KnjJsvHWm6ge21kkUf+OA18bjcVWrStzprzX/AMoxbV2jlJ/h34vuriW2srTQZhyHkDzQbCeeVRScc8c1wPi79n3xjbzS3N5bWIRVz8sz7D9C6jNfR95ovxH0eEH+3LB9q9TF5Yb0HQetcp4i8N/EfV9Pmkm1KzmhVeYoZt344yBn6muSlWnRejSfoaON90z478SeHbnTbeZrlbeMw5J8olsAe/b8K3bHwbH4T8K2zOu3ULwGaRSRlZCMkE+iL36DDGtnXPCbp4iis9S8z7XvLuJHG3avJyoHqB3P61lfFDVv7O0t5LiceQY90uR0iDZVW/wB44YqP9kc4NfW06sq0YUk73d9PwNcNS55a6HAat4otNFh+1vLttrXd9nVurEjDSEerfywO1eDfEHx3feJJDNL5gtmbCp/Bntk9zjsOlbev6hN4s1ESyRulmvMcI/iHrU3ibQ7fU/Dk8ELl50AeIjozDt68r29hX6DgMPTws4zqq8n+B7Dws6tKTjolt5mZ8ABoeofFDQbHxC1vZ6beXCxS3k1tHceSDn5gkgZSe3I4r9MvGn7N/wAMtPm1fTZ10XV9IjsFmsr+RbQOZip3IQiqVwApB759q/IFyytzlSpz6EV0Nt4+8Q/Zntf7Xujbuu0q0jHj69a+orUpTV4Hzy6Ju1jc8W6PD4V+IGpWOkXDC1hmzCyv8yAgEDI7jpXo/gf41av4D1G2vzva7ACS3kYBMsfB2yr0bBGc5Ge+c15VodlJLJJcXWTJJzlzznrmvcfg34Xh17VL6WaBJLOK2VHlkGVRndMHk4+6rnntmvFzGpRjSbrq6SPZoYOU6fZs6/Sda/4Ti3a5t7eSVJHaTy4RvCOewHXOeueSMV3fhbTfE9rGVvLWa0tV4VbiYKAOOik//r4+tJ8N/g5b+HrqbVtDle4idGKxAFlAUsTIAAdoACt3HzbTXr1lpF1NYq97epcyQpvTyEHmFT22nkjkdF4zX5JmWKpKThRs4+d7nlVIuLszGs5rtdLu2WSMQYzc3QG8BfRnOFUZ/hAP41u6PqMyvHcW0v8AosgjV4njA83aSVck8k5Ix9B71Zt49Lt9rXcVxfSZzH/aEqhEPQELuGP97qMcc1X1bVEcTtBbNeyxjcrRoIreLjnJBJY+5z9BXz3NzXUFqRFdblL44eO4rqxTR5ZSzXkeweWNw25w5AyM8gf5zWH4F0+20uzt97R7ScqmAzyP14Hcj8uMmvJ/EWo6n4j8fSNqEiwLZIIUU/cjXrzj3PT1NdVp/iaWKLyNASW9u5P3MmpMhGwf3UBOP6fWvraOHdKhCF9Xq+x+kZarUEd5401OTWb+20G1AZy4luVU8KvZM9M57n616Hoekx6faxwqVMezcTn+lcF4B8Ly6X/pc0ublznKksR1ySemT/KvTLaySSMs245757H8P85rOVvhWyPVr1o8qhHY3NHNnFKGkK9M9sduDXQXGs2y2/yPGDt5Cgcn8Oa8/vLgKqqoOBkbcc4+n/16s6YZ18pyqjDBgsnAH1HfHvW8KrirWPEq0VL3rnWRXRvi3D7B1G3A/nU13dpax/IAMYAHueAKhkmMNnHJLIsju+xETBZvw9Sf/wBdRSYRhJOUWZTtiijO/wCY/nk89vWtJSb0W5xaLV7FW81OT7HLPGMTJ8kYU9GJ/U/TtXXeBfDs2j6L5l3u+2Xbec+77yr/AAqfwOfxrI8P/DmWTUBqOqTyCL7yaeuAD6Fz19MqOPX0rv2z+NfV5Vls6D9vVWvRdjwcwxcaq9lT26mdcIRVCYGteZd2aoTRnmvqYnhGXIOuapzKa0pozmqcy1oMy7haov3rSuFrOnXGauLA0oLcVcjt6SFelW404qXqAkcZ9P0q7CnSmxx1ajjqASJYVq/CKrW8VXolpNjJ4Vqwo7UyJasxrzSFYWNcYqdVoRKmVal7FjQtMuLSO7t5IZkV4pFKOjDIZSMEEdwRVgLT1XNZtX0Yz5L+JHh+X4V+KrkWxmTSr1mmhzGGVF7jIzwuO+CAfSsHXoNI8SLD9nniW4Iy7M4/Pjvye1fWfib4f6V4wt5YtVjNwG/1Z+60XptI/Hk5zn0ryfVP2RdGkV5LDWbi3nGSiyxAoPQfIVbpxnOa+JxWT1nWdSitPl9x9lhM2pRpxjWb5keAah8ObqeEfZ3+0K3AKHcMenSudvPC+qWd5GksL7VXa3HT39uleveIPhr4m+GJaSVi0Dci8UtLb8HoScsuf9oY/Ksy58eWLQxx6jGiS4y0ykMm0enHrXmy9rRly1UexCsqq5qTujyK/wBHu5oSl5GskGCQFHIOOOD7+lef6j4XiuLiRY5hZZPK4J/If/rr6V1C90m/jL25jaJuS+Rg85/wrz/xLeaXbrKjJGWx8rRkFscen+NVRr68qR2c8mtTxXUPD9vYWxiYfbmI3AyIVTHpgHrx1+lcg2nNcOwlVlB3MTjg5YnAH411WvrNcK7wnIJyY+Pu56HvnGOfeuSuLx7EMHXlflKggkelfR0XJxsnqfOY978yH29ukNwI0A3E7Q2AccjP6V6t4b8P/araOWSBQhAYFlXcB169en4eleUeG5vtOsLMzK8cPzFWXgnPAPr9O/SvfPh+v9qLlGdMt8q7vfnJHU+309K8jNpzowunsfHVLSk7Hm3xS0gWq2s8KBFZvmYjpwfc8HFT+Gd+r6fHawOIr1cPC+cc9R9eg/KvTviZ4LOqaPKfK8wqMr6//rrwzR1lsb7aWYPuDI+Tw2M8H65/Ee1ceDrrFYblv70WFJuMro6vxb8KIPinJNrOnvFpni+FQl9aSyJHDcEABWLMw2E9Fc/KQQrEMAW8sh0W+0C6uLDUrG4029tnCS2d1E0ckZIyMqcEZHI9R+FfQOg+ILTXBayyXUmk+ILNj5V/BIY5BkEEFh1Bzznj1HOD1l1cQ65apZeKPDlj4gsolIhkteseR/DH5itECcf6mVE4+4a7qeZOhH2VVaLby9O67dUfU4fFezfNa58eeJPAsHiRRMI9swGBLEM/n61yL/Ca/hfAlUx54bbX3fY/AP4b+IYVe3uPGXhq6kO5o3g+0QjnopeEEfQufqa2I/2YPhzoY83V/EutahEjk7ZpoLYMOMAmNZDj1ABP05r0IcQU6Mbc2noOusFWfPKL5j4++FXw80vRdSS+1a/SR4TuSAdfyHJORX0hqPwt8QfE7S4V/syXQfD04Qm8u4S91cruB2WtqMPIxXkMdqYByw4Nes+Cx4Q8BvqKeD/DVrALhcDVGhZpY+BuYTSsGOSAeQqDA+Qnq9dWvNJguGtpFsI7hWWe5mYxiRMk4dzhnX/YUbOmFHSvmMdn0KldVFdvpfp8u/bf0MvrzpUXh6EOVPfu/meWaFo5+EUdzbwXCy2McjMkM0geWJhwMleNx4ztOc84UVt+D/Bdt4+mk1O6tHtlmk3/ALvcpRQSQwIIwTn8ePrV/TfhreeNNRkuSJP7NTDLPKhTzhjOUQ8qncZ5PtXcQ6n9n1G28PaYoRsBZ3QfcHQ/jjtXy+Kx0m3Nv33q7HhRUq9RRRe0XwfoMNwkVtHJexRHDrvZwp78EkZzwPU16Vp/w9triIC209odwAOGVj/6CMfmKk8N+E5tIt1W0iWJTjLMQxJx1+v0rtNPsbqFR5ku/PHy8YHauOjh51NZ3Pbjh40467nN2PhfUPD7BlvLl4wflgExCf8AfJ3CpdZs59ctGjBhtbpVwrugBJ/3k2nt0Ir0axtUmU71IHYCqXiDQbe7tiVRo3I6nvXrzyqcYe0hr5GDVOXutHxJ460S40DxJK+qw7pnj8lJmbcCM52g8ehHQcGuA8SfCyf4lX1uyXNndeWzFtBvXNv9oyMBlmVhhsYxn5Qf1+w/GHgi38ZWlxYXFus11En7uRhnd7H6Y7V8w+JvAWp6bcS2jNI7W8pMTK22aLnja2QDx0zjOajCYmVNqUHaS0v/AF+lvI5bewfKzxrxP8B20u8FnZO1tqLdNJ1xo7W6H/XKUkRyjJ4OV57c4rzDU/A+s+GZDHqVldWMrH/UXkDRN+G7qO4xxX1xYeJrpdJOl+JLKDxRoyth45EVZ4uo+4cFWAJGRtPJwa6fw7oulahpr2Phnxdi1Kn/AIp/xJCLiGL/AHRJhlHX++K+mpZtXpL95Z+f/Bs/xXzPSpY6UdJK6Pzv17wDHfO1wYXidurqMbj6nisa18IwaXN5gLPIq7vnII9MYr9D7/4DS3EzLN4E0O4QKD5tldvZqCRwAuV9egyODXOD4EtpuoO8HgXQoyWJ3ahfvOg5P8JJP5Dt7V79HiSMocrT+9f5lt4SUudR1Pkj4d/C3xJ8Rr5l061MdguDNqE4K28C9PmboT/sjJOK+qPCvhHS9F0m28M6HFNdacrYu9QEf729mIG4J1G5sbR1VFPOScH0rRfg5qWqWHma3qdla6RbgsIbeFLewhHchdqZ98BewLHpWg2oaPoMA03wiftM4IibWJotu1c8rCg4A6//AK+ng5jmtTFP3lyxWtv1b0b8unqUq0YJuHvP8C5Y6engmyktINpvZlCXIjO5YsggRA98A5J9T3xz7l4Z8M6dd+G7ZZI49rR5BIG5cD73PfGPavGvCugy3N8gy08cS75mcltxLDA9yTjp0zX0BZ2sVjo9sq/IyqCNp5HA4+lfBKftqrqS1PnKsuabb1PMPF3hSzt1cQQbZYnG4jgE4OMDtn9OMVwGpalAbaWNi3mAAgsCPwOen/169a8TXkbwTK77C/Qnpzgf4V8/+M9QXTdRuGkEixOrHGBgMDypFZ4de0ny2Mb22OTu/AtnrGv3Et+8gmIwfLXgtjGTn2B7V6R4Z+HA0GOM21pcTWj4LkYwccggrk4zj5eleVr46ittYhx+8VwWZSp5GeBmvcPAfxLjvgqJGY2wAoY4HTjIr6lOsoqM3ZH0eGxE3TSR22g+Bft9uj7FtsgDIXDfjnpWnfabBocjWxRp5goAjjO78eKjs/FX2YrK8tuST95Nq8fUYNZmsfGHToboCzs/tNySQzINx+h545rp/dqFk9TrhKtJ3exo2fh+4vD8tu0anJJPy4yf0/8ArUy4s7TS5Yxe3lraguABgN3GCM/1GKXT4PHXjazZ7O2/sizkU/vZhzj1AOOfpmtPTP2cdNkkW71q/uNQvDyx3fL+o/oK7qOX4nEWdOGnd6BPFU6ek5/JalPXdf0TQ9HZNMJ1K/kBBmb5myev1PPQV1fgfwK2iWi3eoO1zqcw3EyciEH+Bc/qfXNdBovw/wBB8Psklnp8fnL92aUmRx9C3T8K3HSvrMuyx4Z+0rWcultl/wAE8PFYxVY8lO9ut+plPFioHWtKaKqkkdfRHjlF0qpLH1rQkU1WkWgDLmi4NZ08Z57VtzR8Vm3UfWnEDEuFqhPGcniteaP5vSqkkOVPFapAXolq7EtV4lq5F2psCzFHVyFaghq3AtZDLEUdWI170kS1ZVeKB2JEqzCtQIDVqEGpuBMq9KlVeKRVNSqMUDGhaei80HinR570dAHUtFHSsyiK6s4r+1lt50EsMqlHjboykYINeN+PP2VvDPiSzkfR/N0XU+WjlEjyREk9GUknHXoRjPevbEFS+XWNShTrK1RXOilXqUXenKx8dap+xPrjXBFh4gtWiAyrXG9cHJznaPx6d/auauv2G/G97vE+u6S2PukySkE/98fWvurYKPLFcX9m0E7pHes0xVrc34HwhH+wH4nkRVuPE2mscY4WY49MYH86+YPjF8Pbf4f+K73Q/wC3tN1m4tl3TTafMWVX5yhyPvDHIGf6V9rft1ftHr8P/DJ8H+G9TMPiG/BF7Jb53wW+CCocfdZjgcc4z0r8xrq5dd9xK7CM89etezhclg4+0baPFxedVpS9ktTp9Dna2huFRlWVuAy9RkY/r+pr2b4TatLYxpBHkxsvzdMjoc5weuOntzXz/wCGNRjuLpyysvG/gf1/z3r0zwtrRsbqQZQt0QdducnOPy/Wvlc8wvuygteplTberPrm1sRrGjCCdwDIvDEEZ646jr329OtfO/jTwcvhnxBdW/2RzCx3o20lQCc7TjtnkHOQc4717L8L/EB1TS47W6ujG0Z2BWOTjqMnpjOePevV7bwrYahb+VexLcxysoHyZB9M5x+dflVPFSy6pKNr33Ou3Nax8aWtvY3MMOydbLUY3BjumYh8c/K4yFkXoMnnHBFb2k+G/Ethh4Ea6hY7l8l+Py7fhX0L4u/ZV0fxEsj6a/2CVuQAWwD24PI/zxXnDfsreOPD14JNL1BZIxyNkgU5+gFetHMqVSGsreUl+pspSi9ifQ7bxTPbJHLb3lrwM7iVA98l1X9avnTbLTJBcatcieXoI4iJ5WPoHO5B/wABDY9RWvZ/Cf4kTxRQXd3wgALTTcDj/az+ldV4e+Dun+HWa91u6/te86m3ViYs9t56n/dH5V4VbFrX3l8jo5upjaCtxqcbXkFvDoumqObkr5kpx6SOCxb/AHcKKbo2gjx3rryBGXRLU5/e8yXUg53sTztHYHvjitTxHqUutXkdjAqtDnYsaDavAztGOAAOpHQY9RXa+FdJGm2sMMaeYw/iA27+PToo9ugH1rgpzlKXN9xx1ajn7sWa66R5dm4iX52By2eTn8a+XviN401L4O315r8lpNJbzz+VuRMhCeoPp0XBPrX2tp+nxyWuJAN/U+/qap6v8O9G1/Trqyv7C3vrW4G2SG4QOrZ55Br1cPh4xqxk480Oqv0PSwdRYWoqjVz5C+Hv7Wtn4kUx3U99o9xnZG1wzSRH15XOOcdcV9F+Cvi2mpzDTr2WIXSgPvjfcsq9Mg1ymtfsoeHbHd/Y1r9iVzkQRZwD6AnJxXO6T+zZqXg7XrPVLHU7+/t4c77Jm3qqkfw4AIx6c/zr2avsebmo3il0ep9a8Tga1O0n73pY+mNQ8YWnh/QJNRuLlYLZInmknkOAiLyST7DNfMmpft0Wt9qFxDZ2LGwjfAmmuF8yRc8Ns28DvjNegeNvAerePvDkGibZl0+d/MvWbhpEQgiJVzlctyW5wBWh4K/ZZ8KeHYYp00OxN2CHMssAlkDDuXfJrX26lFppt+X+Zy4ergMPFyrQ52/M1fh94ik8WzWGoeQ8f2iJWGVKkbl9PyrS+IPwzs9chZ9rLPzsmj+8OQce49jx1xXdeHfCNvoynbzJ1LN1rXmtUmyr8d849q4aWCl7Nqe7dz5vEVI1Z80VZHxz4r8CjT7qGHXLdk3ny7bVIWZOeylgRtbHY5B964HxZ8Nb7S7dpfmvbMn5bq1IDr/vJ9wn/dKfSvtHxR4XttStbmzu4lubO4G1o2P8vcHBBHIwDXzhrM138Pdam0+9j+16bIcRSy9JUOeD/tYzxxnr3rz/AGlbDSt0/rY4+ZxPIdF8Ta7osUkel+K7iAq2Psst40QDf9c5Ecf+PGunsviB44m8gTeIbxSPvNaPa4P/AALyNw+oNejn4J+FPiNapd2lytnNjJUYYKfTnOKzZf2Qb2KRmsL+xlVu8qEA/kAf1rvhiYVPejv6Ir2y2aOK1fxHe63bhde1S2ijQkrJNcPc3J9cFycfh+Qq34es21ArDpiNAhG03dyMFVx2Hr3/AMiu50P9mHU9NuAHuLG3yOTCDk++cZ/WvTvDXwW0vQwsl3cNcFf4M7UP1Hf8TXLiPaVVaKv66IHXfLyxVij8PtGWO1hhgjYxxctMVOWHOST3Y56duK7HxBMDD5Q+XtjOMe1a3+jW1usNqoREGFAHSuJ8TawLZjG0eSV+9nII9elcM6aw8Gk99znZzPinVVs7OREAbaByeBnnivnf4iahPdrvK/LMdsjZI29SCfXoK9N8c+KNsEsYXLZ4O4HP4A8HOOteC+NtdK2bfvW3OrN8jYJGMY45Oc/rXXleHlVrpW6mEnYwLedJr3FrdW0d2x8vcWO5e3OD0+nrXvnw7/Zx8aeOLFLlPFdlp1q3ys0Qd2/IAc9+tfn7rWpStrEztNuj3EKwbr+PrX1l+xL+0DqPh/Wl8PXdzJNFIN8RmywIGMqeegGCPxxX7ZT4bpOKle77HP8A2rUw6u17q69j6w8I/slT6BI0t54xutRkkGGZomTAxyAN5/z716z4T+Enhrwl+9gsEubwtva6uVDuT6+3866TR9Xg1qwjuYGBB4Zc52t3FXs1FPLsNRnzKGvmek8bVrRXv3TGuoK4IyPTFVmX5qtNmotvtXpHLcgZTUTVZZagZetAyuy+tVpE68VbYGopFpkmdKtU5FxWlKvWqk0dOwGfIODWdcjOeK05lxmqEy1cRGTNHVORa1JlqlJGPxq0IsQir0a1Xhj6VchWpbKRPGKuQ1DGtWolrPcZbhq0i9DUEC1cRelMBY1q3ElQoBuqymOBRICVRT8U1VqRRUDuCrzUm2lApcVLGhu2nbS1AXNSCmtNRiKu2pKbUgHtQISvP/jv8XLb4J/DfUPE88H2ueNlgtLbOPNmbO0fQAFj7Ka9A/lXxx/wUJ1KW9h8JaCk/wC4/fXk0AX+LhI2J+nmcfWujDUvbVVF7HLi63sKMpo+BPGGtX/jHWr3U75v9JvJWkIUkhdzEkDJJxkn3J5Oa8+8QXCtIIvm2rxtWvS9ctRB5z8KFH5cf/rrxnULg3EsgUkAnJ+melfTYiXLHlR4GCj7WbmzV8P3zQ3wMrJEmeI2JJ6HAHJr0Syu4o3SU/72F7A/SvHofm2hAxYH+EZrvND1KJoxA42MPurnkD0/z618jmFD2sbn0UU0z3/4f+IFsLqAxhdhCkYXPpzntgmvozwb8RLhUhL3LSH7u1vmHPbH6V8UeHPFMui3sZlRJYFOcMARXr3h/wCIFmxhn80Ryrj5FGe/X1Hb2r8jzTLpqXMk/U2jeL0Z9raX4qkvIY8NbsGA4B24PvyfXtW6utkbUYxqDztU7iR9OK+XdD8fRiN5Dc8k8YJ6egx26V01v8Rtq4UpIHGQYwxAH5ZPfrzzXx8oVINpnSpXPbbrWxM2wSeWxyd6gE49s8DqBXnXxF8Y2ujWBiWfMjA88sQM8n6k8D3IxXFa18XodKspJTOSR/z0AHT16/lXkFp4uvPGmvNqc5/0VXzCrH7xGcufpnA/E1dHB1MQnKatGP4+QpVOiPoHwb5NrGb27XFzINoB58teu3356122n619qu444Mlz8zOpHyjrjpz7/wD1uPn238ZszJbq3yAZIx1XHT8cD6123hPxJJb3iiWULI+R8w79wOP84rCtTqpKWxNLWVj3i18QNuXadycAjkn6H1rqtL1NLr59wPPJz7V4xa+IY7e83+YGiK457jPTp1B5FdfofiCBsN5nLEMeR064/wA+9dOFrVIs9X2d0epbElUOvJ9fx/8A1fnT4bECNtudx9Mc1zNv4nhhZFMqKrDgg+3X9K0rfxVaLyZ0CdQxccD86+gjVg9zP2cuxt2unmNtoUrwRwOc+lW1UrjYvQk/r0rLj8Wafu+a4iwOQCwzx0707/hJLR1wkseepCsOP1rs9tSitHqRaRoTXD7RgY96Ysm5y287h13dD61nPrluRxIpGMkk9Ko3eqW04Zkn2svzKwPv+grgrYnl95O4KOpfvZlK72OQzYHp9K87+I/gvT/Femsl3ErpnGD0yM4Geo68HtWvc65G9uUZ8s331U5/zjishvFUfktazoztjnJBVl568e1eXUxMa+kt+5jOPQ+fTZ678LtV82FmuNMklzHI5zhfTI6enOPavWvDPxgjmgWOYlD1+bp9Mg1z/ju/b7HcQRR+aUO8KSC2CCNwB4bIJBHcE14BY+LLez1KSG/jmt4sbobiMkb19SDxuHT3wawp061SLlHddupzc0dj7ctfExuQAjxsCN28MCMfh/8Arpy6pAiu8lyJeRwp4Hr+NfNuj+NLvT7eKa3nW7gdASY+GK/h1rSb4seSuXlLNjiNs4z70lVqNWauVoeyaz4xS3jZIW2nBADEKPf/ADxXlnijxi22QvJuP9zdnA9e4/I1xGtfExpYS0suxeuAxIHt/wDrrzjWfHkmqyNbQYCgZabcSq9Ofc/5xVxw1bEPVaESkkWfFXiCW6vxGrDAcruU8+5PpXinxE8bSbbgRQPLCflXaT0x1I5re8ReKGsLeby3M0Az5kjdfcnHbjrzXgXijxVLq00iNJuRj1BZSPbAIH6V+rcP5VZqpJaI5ZuyEjaPVpn4dJ252Mc5+hwPy5rq/hX4mHhXxhpl/wCY0Yt7ldzLk4Qna/14J+uK83jYKy7Cdvsa2LcptMinGTyB2zX6nTXKeTWpqcXHufsd8J/FiadeWsJmD2d4i/NkBQeobnp1/Wvb+OCOlfGHwb8Sv4g+FegXys8Un2SMMwPOVG08n3B55r6u+H+qR6x4P02eMnKx+U4OchlOCOSfr+NefmFNKSqR6nLk9eVpYefTY6E5pOtPxSMteSfTkTL3qBlq0agkU1SEVmWonFWG+7VdyaYipIOtVZauyCqkgpksz5hVCVea1JlqnJHVIDLmSqcqVqzRVRmStEBJbjjNX4Yxx61ShGMVfh7VjLQEWY1q1CnSoY6tw9KCixCtWo1qGPtVmNaXUCWNelWEWoo6sKOKdwHKM1Iq01RipFqLDHqM0u2lXpS1LKEAxTttLt5p9VZAMC04UppuaQC18R/t4STReLNJnYt9nisWC5Py5+82B69Mn6elfbYzzXyj+3xpkSeEtD1BlBZrlrUsWA+8nf2wP5V6WAko1lc8rM4ueGdulj88tYvftek3kxG1nDAL1wMED+teLtGTI3BwxJJr3HxBppskuIiu2F0Yqw559P514tGh3NuO054H9K9PHS5UmZ5LRVWUl0IYyVJA+UkfLg1JaMyyD94Y/my23/DvTJI/mDD/APVTsHk4J78da8Xn7n2EsKpdDqdJ1/7yXas6f3kXcR9e1dFYyswM9lNuj67c15srXdudykxr/tLmtzRdWljY4kxIuerAE/nXnYjDwqXcTinhpRPVdP8AHGo2LBG3bT1QHGf0rrtN+IFy0YCIE3cksCfw6V4xbeKZEnRLhtyt02rV2TXpJMnfIitwdh//AF18xiMphUesUYcrR2viLXrrWr5LRJGHz4kK/wAA6kAjucGulh1UaXpqyAiFQFRIwcEkcAYzx/8AW968vs5hHMfLUqVXI+v9a6TU5E1DSrURsjODvKMQAQePwI5/Kuapg4Llpr4UTb3tTu9J8SC3j33NwWaQ7sA9/f8AWtG2+LFlb3Xlo7XlwpzHHCpkc+2FyeprzvSdIsFjAu/9NlXOI5JGWFc/7Kkfr+Vb8dxNBtS2jjtI+hSBRGo5z/Djj/CvMrYTD3fMm/wPSpxgkekW3ijx54ikjOm+GpbVCDsm1S4jtAc98Md3p2q7b2/xSSTa2ueHNOB+Vv8ASLib9Fi615nZ6jf2WHhvXGDydwIHcZH4d66nSPFmvLzNqEsithjuLEn6+34158lTor3KcbfN/qd8KtPsz1PQPh/4v1eRV1P4oQWjBc+XY6acAf70kgP/AI6K7Cx+A73MJ+0/FXWw2T8scUCj8eCe1eUabfare3Eb+bMCDuLQ8E/jkgflXY2mnahcMkq6zO3RpFZDhDgf3RXlVsfy/Co/cv8AI0lVjbRs72w/ZtspF3P8VPEnzDgq8Awv/fuoZP2eb62Lmy+M+qxMvzK13ZwyAdccjaf/ANVZtjHerx/bEzeipayP+HSrE2n31wyY1S5jn3KVF3CEjYZ9Ac9u9c39ozlpaP3L/IwVVX1ZZk+FXxQ02ILp3xD0XWwCdq3lvNbOfUEq0n547VzGp2Xxh0nzZZtDt9TUkKzabqEcmQPRX2t/WtLUptS03m41DYQOXUFlx+BqFPiJb6XCm+/uruVVzu8wAZx6dSM9qr28Jv3oRfov8rIpypPdnJXnxk8Q+GpA2v6BqmjiM8y3do6R+/z4K4/Gpbf47WWoqs5nyrcs+7g+uMVv/wDC2Ly4Z0hMpLAnaDhPxHpz9K818Xw+GtSLy3+jWEt455+wxiGRj2BaPbn8atU8LUduRr0dzzakYP4WdPqXxOhvZLaXzt2JPLPzdQ3b8OteafEq2l0i+XU7Zw1pNJ+8AOVUkfeA9D0PvXORw2MGsW06O1jbwfO8InaQOwOVUFuccZJzW54j8SQalpbx7SU6fe+U59v8/hXZDDrD1I8mqe55042bsczcaxJDHG8BaHYSc27AKffjv71S/wCEq1G9Yo2oTuwyQpUNtHtkiuTTUrjT2dUkW4iUf6wvg+31PbinReJbh2H+hCMf32xz+nv+tfSRwXaKZOux08P2qdiXn80j/no2P61Q1bxA+mq0bzoHUcglh1/ugDmsDVvH1xp9vhUiUgYCg/e/T/61cTeeK5NYlxMvl9eM5IP5fpXr4XL5yfNVS5TroYeVSViXxN4kbV7raZtyAHA2jAHoOlclPZnLYw247v8APJrporWG+zmAMV7oxGPcZqjq9i2nphCssbDJG0ZH6V9fh68aSVOCsepVymaj7S+hzi5jYqfzrRsAPMKE/fH6jms+4DM5JOT710Pgvw/eeJtc07T7KF5ru4mWCNQOrMcAZr6CnLmSPk8TBU73P0M/ZPtS3wZ0ppDIykylPMJwF8xh0x0r68+FSwr4VAiVl/fyBgc43cZxkfT8c14d4D8MxeB/A+m6RCB5dnbLEZOMEgcn8Wya+hvA9r9m8K6aPL8tniEjKRzluea5se7QSPn8pvPEzmjeooorxD7AjNRSVO3AqFhTFYqyA5NV5F9P5VckHWq8i1Yio9VpBVuRarsvWgllKRc1WkXFXpR1qlMarcLFOYVnTqOa0JvSqU3c0QEMhz61ehPrVNF296tw8GqVuotTQh7VdjqlBV6OpLLMdWo+mKrQirScipYyeOrSLUEK1ajFADtoFPQUvWhRgmpuND6kVflFR1KrfKKmXkUIaN1DdaSkhC0CkopjFr5J/wCCjGpLbfDXw5aZPmTag8gx1wif4sK+td1fDH/BQ/xRHc6xoWhmRRHZQNcPlhkPIeBjr91c/jXo4CPNXiebmE1DDyv10PkV1GqaKsbrk9sdiBgmvGte0GbTLyYjJKncR0yCT8wr1rQrvzLdwBtG/cm4dulUPE+irqaLgbDz5cmAAp4yjHsCfyJr1sdSlOF47o4slxMMPiLVPhlp6dmePqxPUc1ctWfaNnB+mT9a0L7Rdl06Mnlyg49ifQ0yO3ksTtliKlvbIPvXysqiaP1mGHknfp3KskckikMdw7HFUJLY7iecj0rok8vZyctnnilEKbRIpAOMHI4xURq2CrhOZXRzsdxLESHmIbGBvXIxWhaa1PZqWlTK45MbcH04q9dafBMvykDjvWJNYvA52sSPTORW8ZRnuePWwtuh1Fr4gSdg8E3mMBzHjaSO4z2rfsdQGS0UzsjHOJuo9s9PyrzJoWjdWjcxSdmU4NTR61f2ci7j5wx0PH8qwnhI1PhPLqU3DdHqK6zMkhZI9vsM/d6kVtWHixY1AILHGCGG38OhrzHTfGCspV38h++/7v51ZPiyBJmSaWBj3khBGfxAry6uXOWjjcx5uXY9ktdUW+RArqGJ4VuMfjV+xmvI5lL7TGDk7e/H8/avHdN8QwzOhgvCyryVyenuMDH1r0PRNdLKmG3DqCvJx7etfOYvASop2RpGpc9S0a/1FY8R20sxIwGJwR9c12vhmTVYZftZSVnbquCFX0H1rzrw74+8kqnmHcDgMB+nSvU/DfjyNI1ZLtGbuXfaenIzjivh8XTqQv7htzOR1KeKvEJyM3GRxt2nn9aq6pqmqTRiSUMsmcehPtz+FakPjqCKMF5csRjO8Nn6EVBffEPT47WUfOCBjHY/WvFjTne3IOKkcbN4iumYQPkLnPzDP6dKzr7VNK02F5ZoImPcyDjPbjp+Fc74q8ZxzTSk7UZchiuNpIPU9iSOvv8AWvG/Fni97yRoo5CX7Rq3TuTyeP6V9Zg8rniGtLEVPd3PR/EfxRWNpPKnKQqcblbaDz+v4Vw8/ima5zLv8uNxwH+QY/TANeXah4oshKUa6jMueWYMV47DrWVe+JLV2J8+NlX+GMH+WK+2w+Rwpxsl+BxPXVnp0+sG6kP78EDjOCc+w44FQXmtPLEsMkjiNeqqwTdj1IHSvL28ZPGB9nVio7scGqN14iv74BUcqM/wjB/PrXqRyl3vsVGKeh319rVpHhpp0t0AwpIPIHYDr+NYF/41edhHYxkYGBNIvT6L/U81zsOnyXcheZmdm5y3zFq0obQRkJtx9Oprtjh6NHfVnrYfAynq1oRi2lvpDJNIS3dmOSfapobNFYBPmbpV63tHyAq8dlHfA6/QVo2+mBDnGG6lm/nUyrn1eHy/RJIj0ezuWnWOFPNdzjbxgn0/QVv+KPDMtpoExliInXDHA4UYyP6ivT/gt4Cj3NrV0okij+SJSM5Y9SOOT0H41t/FSxtW8N6gqRpCzRn5gBz8uQB7Y/KvDlj/APaY0490fUxy9fVZOXZnyLbafJeSeVEC7EhRtGcmv0C/Zh/Z2j+HehjXNdgb/hIbiJgImJBtEbIKcH75BGfTp3Neb/sVfBN9c1KfxbqVo0dlAzQ6fK+Pnm5DyLnkbOgOOpODla+6otFFjEq7CT15zz/9av0/DxtBSfU/mzOa7lVdCn03OX12RtN8MyKX2NINvXkE8dBX0J4ZXb4c0sDtaxjsP4QO1fMnji+F9rmlaREAWmuFHUHJB6c9+fxzX1RYWY0+xtrUdIYlj/IYrhzJ2UUc2SRd6j9Cakpfworxrn1gxqjdakbvTG6VQFdhmoX71O/eq8hqkxFeSq8nBqeQ1Wc/NVIm5DJVCcYzmr0vSqNweTVoChMDVOTPpV2U1UlNGiAVeamjqGOrUS5anESLtvWhCu7iqUC8Cr8NKQyeIYq1HUC1YSsnqMtRVZjxmqsVTr2p7FFhafUStT1bOaVtRDqkX7oqOpB90Umhi0UULzU2AQim1JimnimMb2z2r82f25Y2uvjjdXbrvSOGKBBnkFUX9PmNfpMzZwBX58ftreDtT/4WTe3qxu+mzxJcRzbTtBwFcA+uQD+HevXyxpV9ex4mbc3sFbufMGmwec2IxtEm7Cjtg8fnzTvMEDNbTxs6t8rL3ZRkZGe4Gfyq7pNrHZ3ixS79r8lyM89xW7q2kxzWrFcF15yAeD619JONz5mNTlOJ/wCEetNeklspphHcoD9luSR+87rG2T6ZwfXg8HIz18I33lyW88JcDOVEZ+fBx6dQeo6ipby3l+0fupdsyn54txxIP854/Guu8N+IPtVmkFwu+9j+8SRucKAA2f7wHB9Rivhs3wc6V69Fadf8z9k4WziE0sHiXr0f6P8AQ8s1LwXeWmXWJxGDkArg/SsTy5beRgS4JBBx/hX1ZYx22pW8ccgjuVbPGOD26YrJ8T/CSx1eFjFH9mOc8EMo9wOv5V8pDM1F8tVH6bUyrmXNRZ8yFpoxnKsn+z2/DtUUzK2C34V33ij4ZX+gXLl4mlhz8txCdyke/wDdPtXHS2eWYSRneP4h1/KvapV4VFeLPnq+CnF2tZmcyeYPX3zVWa3LdBn6Vvafp5mZgWCgd81Le6JLGm5DvB9Dz+HrW3tlGVjjlgHUheSOIuYzCSeetQqRLg1t31m/mMhGH+uKx5I3tpihXFepTqqSPkMXhJUJX6EsczW0iSoWV0OQynBH4iuo0nxrJYj95kq38QUZ6dx0J9+K5ddrcjkd6bxkJyGHy896dSlCsrTVzz1pses6T4yivywEqu+35VVgrevRuv0roLPxlfRgo+7Yvdic4/GvCvs5k5TJI6ir9rdX1qB5V1PGvYRscflXiVcpozvb8TRVGt0e8x/Eya3+VLiWE4xhOh9j6inXnxLkazMhkZFXgyM20fQnNeIxatq43f6VMM/3R/8AWpq2d3fNvmeSY56uS3864lktBO8mi/bdkdfrnxFFyzLbSyTnPLAYH5n/AArjtV1y4uFcKPLV+oUnJ+pP/wCqp2to7NNzAMSSN2fT29KzfJa+k4Hy+n+Nezh6FCivcWhhOUpbmZJGZGJBzj+L39qjCBO241rXNp5fyqPlXrnqSe1ZkmFl2A7m/ib+lelGpzbDjDuLsMshUED8f0rf0/T9o3MNoxxxzVbR7PYpuJE3sw+Qdh75rorLR7vUJAkKGRj+n+fWvPxFe2iZ9Tl+A5rTktWQrAnQceuT/nitGx01rgbYYi/q3YfX/Peu38OfCq5khWW7QqvUbuAT9M9BXbaN8ObfdmWYRwgfdj+Ut68n+dfNVsdTjonc++wuVzdnJWPMrXSjDCDtLHqT+Hp6Ve8K6Fca/wCIrWyt4xKZnCMuc7VzyfwGfyrvfFXh6yht1h0/dI7MIlU5OWPAA98/55r1n4R/DW08G6abiePOqSJmWTg4H90deB0PqRXn1Mao03Pq9j2FgrTUVsi9qkcfhnR4rG3jhWXG1FRQpYY6n8PbvXmXibT7vxNcWfh62kWXUNTlWIKnz7OQWY+iqu5iegC13viKdrzUSY0ku71pRDb21uMu7Y5xn+Z4AGe1dj4L8FWHw4upLu5VtS8Q3wCsqnaIU4JRW7ID1Y/eI6dq6sky2pjKqqNe6t2fMcU8QUMowzpp/vJKyX6vy/M9f+HfhGx8IeGdP06y2i3s4FgjYjkqo7nvk5P1Y1p+IvE0Gj2cjGXzZMYWPdg8/hgVl2+ovLp8UEZMa4LE5x1J4Brznx/qzWcM0kRDSxoSq46HHBPsB/Ov1+nT/mP5XrVnLbdnS/BuOfxl8Wre5lbzIrRzI6r0G3Jx9M7RzX1pXzh+x54dK6BqWvXK4uLiUwoST0GCx9+oGfavo+vmswnz1ml00Pscooulhk3u9QpM0NTa849oQ96Y1PNNpiK8neq0lWZPvVXkFUtRMqydKrSVZk71VkNaIzK0hPNU5zzVqSqsoqxlKZapyVflqnKopSjcfLcIlq5DjiqcdXIe1UBeharsJqjCauQ1m9xltRViPqKgXpU8dAFuKp17VWjPBqZW6VLKJs4qSM1FnIqSPtSAlp+ajpae4EnXNApq0uakY6mtRupu6kFhCtcf8SPhlpnxI8PzafeRos+Mw3G0Eo3bPqPauxzThVRk4NSi9UTKCmnGS0Z+X3xf+EOt/DPWJIL+yZrZTmKZVLDbngZxyPQ/rmuLtNWtmVUGd3c5/wA4/pX6xeJPCukeL9New1mwh1C1YEbJVzj3B6j8K/PL9sr4J+E/gzNBfaN4iaK/vDvTQ3jDzBf7+8EbU6jLAk9s819PhsfGt7k1ZnyWKy10Peg/d/E+ffFWkiOZ7m3OFZt3ynBU9T+tc1JdvFcI5LRTKwIlHYjofr7+9UrT4i+YzQXse0Z2+aBn/vod6t7o7iHzIGVozzjOQPau+UY1FYmlKdFpvRnV+H/GU9ncJtHk3khBKs2I5j3KnOA3qO4r0vQ/H1ndM0WoK1s6tyj4B/M8V4P9mjucJwHweD3wM81a03X7zRw0dwxuYV/5YzfeHH8LYIP0I/Gvg804fU71KC+R+wZDxm6CjRxb+f8AmfV+n6Hp/iC3VklXy3X+JucdP84rlPFn7Oen6vCz2q/ZpScgqMKWx1I6Z+mPxrzXwt40SPbNpN1JbTKdz22SMfVD29xkV7B4e+L0c22HUNsDHo+cA/hnFfnlSjisHP3bn65TxVDH01KLTT/rc8C8RfBDXfDtw5NvJNCpys0CsQR6kdRXHTWd9ZvtmTco7qAceueOvsa+4JvFWnzQof8AWxZAZcA7fU/ToTWFr3hrwx4yjcXFtDNdbcebF8snoCDwT+NdFLNZx/jRMJYGL+DQ+K7zTUv1yGCle/p/9aue1jRjGoYEbkHABzx/h/8AXr7G1T9n3QpNNaex+1RzKMkkhtw9/bGewI96+evH3gm68N3D7xtjLZSRe3tXv4PMadaVoM+czHLb03zI8c8n5twG2po0+6+R8h9elal5bKtxwuI5Oq9lPtUIsZbWQOo3qfXuPSvqo1FJH5XWoujNxZPHEkkYZtkbKvBHCt9fSrENuFALlc5x1GP51PZ6Y1xCJrUnZ3X+fFbNlYx3OI5PKRl/hclT+vH61wVcQorQwS1KMdvCoyJowcdsGpfLHITMz46qOn6V1Wk+HLIsPMubOKMfecufl+ozWidPsIdwtduoFuAzIUiT3x/F9K8qWNXNZJs20R5yuh3FywlnPl25GVAGCw/w96nsbVfMkKrtRflBxwO5P9K7bVNPk2najS3MvG4jknsAOgA/IVlX9n/Z9rHCiASn7vGQB6k/ma3jinUVu5k1dnGa7t5jiXaFGcd1Hv7muZtYRNeMGHLcCuovlF45t7dmZc/NKwxk+uO1VY7OOHVLfaoUADd+HavWpz5Y2OyjSdSS9TrvCnhK48RahBbwBlj/AIpFUsFAH8+QK9x0LwtY6AqWqwF7lPlwcEKfw6n/AD9Oi+BPg+0jW0fKtbsquWKZJ/d7s/Tdxx6V6ff+FrW41a4kjxt3duMjH/66+AzDMHKbh0R+15Xg4UkrrVnn9vo9xqUaRiPKrghccZ9T61evPB8tvYtNcS+TbIMvJnaAAOea9KtbOLS7UsYwI8Z2sOlc3ql2dYuUEkReCP8A1cQ6FgeGPvnp+deCsRKbstEfTR3skYHgXwYst9LqkybFjH+jRzHGzIxvIxwSOg6gHnk8dZ4i8RQaHp5VAWuJv3SrkO2eMsQOw/LJAqHVNSi02w8uKRYblVz83O1fUDqSfuj1YjqK4XxN9sjuJNLsFYasSovJmyWso+8eQcCZgRnPKbm7nj6HK8tnmlbmk7RR8hxBndLJcO3vN7f5s2vCuq/2W0o04/atZndo3lZ/ktQTkoDj5mzjeRwDkdsV6HpmmPZwebNM8s8nLyScM34elcJ4D8NW+k24dSiOvOG45/x5/X1rrbzxALaGSSQrLKp2KocEZHYnPGO/5V+3YTD08NTUIKyR/I2ZY6tmGIlXqyu2bk/iSPRbNt8m9gciNmAJJ7Vo/CnwFP8AE7xIkl+jmwhYS3Mh/ug8KPdjx7fhXmfwrsbP4w/FBvDltq0Uc8cb3FxORuGVwCEAIyRkAc9j7V93eCfBen+BNDi0zTlYoDukmkOXkY9WP+A6VzY7FqhFwj8T/A6cty+WImqk/hX9WNa1sbext44LaGOCCNQscUahVRR0AA6CpafTWr5Bn3iSS0GnpTafTKBjG6Ck3U5qjaqJIpKrvUzVDJ0rRRaJK8g68VVlFW5KrS1bIKUnFVJOTVubvVSSmNFSaqchq3MapyGmh3HR4q1HWfDJuq7FSFdF6HPFXYW6Vnxdauw1Nhl9G4FTo1VY24FWY2zUgWYzUsbHioY6mj60FE4FToKjXFTLSYC07FJT6QCLRSZpM0mUhWNNzQaT8akY6nA1HurhvjN8UbT4T+CLvWZQkt5/q7W3Y482Q9PwHU04xlOSjHdmdScacXOb0R5v+1F+1RF8FbN9K0iOC58QSxbvNmYFLXcDglf4nxyAeOnXpX5dfEb4haz471i61PUb2e9vbjLS3M0mXcnj8gMDHQDpXSfFLxFqXxI8S3+raxqTyXzv5jDk5znA9gOw7VxUtlvtA+3btGAPQjqP0r62hhVh42S16s+VniHXlzzenRdjjxFuYoeXHpViyurvT5MxSMnr7/Wr8uno5DqOucYqH+z5xk78j1NdCTRtzRaNew8WR7lFynlMOPMXJH41uT3FrrVuHDIxC4Jj5B9zjofwrhLi3eL5niJDdx3/ABqj9qlsZA8LshPYHkU/aNbon2ClrB2Z1T2s9nMcbmVfuMOqehyORWzZ+Or6xyl3E17DGwHmcLIM+pxg8Z6+lctZ6+0ka+eCpHHmIevswrpprNbqxtZYFISeIFtoJ3kMcj3Of1NcGKweHxUffX+Z62AzPGZdNeyk1+T+R2vh/wCJVtHIEgvNkbAoUlO3qDx83+PGOK3LHxVc28i3ENwrx5z5kZP6/wD1s15r8TPBcHg/wfYJuQ3cky7iAflOGL5PQn7ox225/irg9L1afQ7uKQTP5ZOTySrDkZr4itklOcXOk9PM/UMFxdU51TrQ17p/oz72+Gfi6PxY0qpcfvFAL78A/X07nnoetcT8ZtHtoZmsp4f3EwDRK3ORjt9DXifg/wAa6louoQalYXQjkzwrN8j9yhPv7/hX0Jf6nF8YvBP2q2heHVLBgHt8/MjbScZ7qecH2r4ethpYOsp/Z/I/R6OIhikfKXirwg9iH8sZi6q45/8A1GsLSVFxmCdenY9vp/hX0daeD21jTrmG4T50JDLg7gcdcHnHNeJ61oM2l61cW2wxzRvjYwIOO3+fSvqsHjlWTpt6o+E4gyv2SVemtGUYtHudJkE8B8yPqyHow9++a6uxj03V1j80fY5eh3ggA+gqfw3cJeKbe4UKyjAjY8HPoeme+Dj6jFdrZ+AhdIk4hZkP3dyhse3AJ9etceMxcYv95o11R8C4MxtI8D/OZTfxGE8KS3IHXuf8OvetC+j0bScRW066lebQSkZDdT6jp+ldRZ/DyKRAxsjtA5ZIzz9cnH8q1oPBtjpkZkaFo4Ty0kgEa9s9z/XpXzksdCUrubfkNRPP9P8ADc907TSRq8zrwuQERc9z/Oue8XaQI45bZZFaVhiWYHOBjkZx9K9Hk+06xeNa6NbtLFjBljBGMerH/PFQap4XtPB9mZtSlWbUJDmK1Uhh9eD/AD9z7V2UcU1NOT17F6J2R4hfaImlQqGVlkwHIYc+qj68ZrE0fS5LrUs44jdWY9h6Z/Ouu8TTSyb5JVUnJYLnqT0/oOPQ1v8Aw58Kr9lkmmXcxZc56nnJ4/H9K+m+sOnSc5bs+tynA+3kn2Pf/wBnu3hh8MxyjcTCGg3NxlVbcMA9iDXottC6rmRwZWO8queM89h/nFcT8Hr5LPwrreHEkdu+8EDsYxwB9VI4/wAK2NW8SnS7O4vIkUKmYkZjkkg4P65/KvgcWpVa0tOp+rYSPLG3yLWuakHk8lZECJwwJHJx05649PesG6AsopJZZU3bTsj3DJPPU9h71yd54sjjkeRg0sy92kwF75LH37DmuV1rx1Jqy/2XbHz7+4dVWOHcckcjr/CMdT/jXoYXLqtaahBGeLzOhgaMqk5JWNi38UXOseLFh0WSRJoifOvFhBMPGA6Lzh+cKx+725wV9H0fwvBptkkNuBDEuS2SCzMTlmZurMTySaxfCPwdXRdBDXM7fbZj5zyr95e+7HYe5rk/HXxWk8I2Y02C6a7mhJ/0iQEfTPJyf8DX7Tl2DjgaSh1P5Rz7Np51inUTduiO18WeLLHwnYs1xcRwhQSTkZH1718y+NvjNqHiJp7fS3e1swRF5w++V9h2z145rnfFXiLVfG2qfvWknZvupz19TWz4d+G/2yJEVPPnxkgAkDj+Ve3BSnsfPctLDq9TVm58B/HOo/DPxkmqW05jeTEcjJyrKDn72fUfqa/Vv4K/F2y+J3h+KQSoL5FG9QRlh64r8u/+Fe3P2WVYY1a4RflEalM9wp/IgcZr2z9nLxRqHhvVILm1dhLalftFvuCl1HBOO57e/PpXPjMFGtC6+JGmHzH2NRVE/de6/U/SA0hqvpt9HqVhb3cJBjmjV1IOeCM1Yr4xxs9T7yMlJXQ2m7acaaaRRGzVGxp8lRN0rVLQgjbrUMlTNUEnSrbuiSCQiq8jDFSyMaryN1FAmVZec1RmbrV2VutUbiqQinM1VJGwtWJm61SlfGaYDbVq0oW4rLj7VdhOaTQjRjOauwms+EnirsLfKKgsvRtVqNqpRye9Wo26c0mBdjapY/vVWjarEfaoWhVy2nzVKpquhNTIc0CJMmnbjTDTl6UigzSbjRg5oxQNBk0optO7VIDGr87P2svjJbfEHxxNaWE7Np2llrWDGQrsD87/AInp7AV9y/F/xV/whfw313VEYLcJbtFBk4/eP8q/lnP4V+VmqSFb6aeXHnZJj547kse/pXvZTQUpOq+miPls5xDio0V11Zw9y3kap867BOpQ5/vdRmszzbma6lgePD5O5u3TrXS6vZG7idv4h9xsY+brVF4mkt1uWB8/HzbeM+o/rX0conhwq6HPw2ImkVGLM6fIMHj1zW7DoIdQF+b+lUhmK5LREmPqMnj/APXxXQ6HDdXmVhwWzk9cjPWpNJ1JJXuYOpaP5MfzLnbx8oBNYF7oUd0oCoA2PfivY38PuY8SEPuHzHGPb6VkSeCru1824RIzHjlXOCo9O/8ASpcUy6WLtuzxSbSbjTbhiQWjIxg17N8FkS7XSftSRy23myYR4w6rIrA7XHXaw79ia4rXsxSSq8eHAIYE+tbXwWkl8m9iglZJBPuWRSVYHaOmAe2fzrxcwk6NFyifTYCKxVRKRP8AHrVBrHiG00pNsBhDTyRoVZVY8KAR/s/NjtvxjIry1tNdYPLlXnnb7/5P867bXoZtT8T6wb15ri/jlw0srbnZcDafbjGPw4pLWzg1SD7KVInQ8nHKH+8PUeorWjTi6MbdUYVMQ6Vdp9GZXh+Z4oVjJwWGAGOA2OcH+npXpfw38eXvgzXLeTacOfLJbC+ameY2zxkdvQ9ODXmjltBuJLO9iZRncGGPlOT0Poeo5NaaazbXS7HdWU8bl4Jx0JHY18Pj8I5TlCcdD9oynMKdSjCcJa/1ofbEVrY+Imj1WzeOPzlB3KOc91Ix1BzweRXjf7QHg59Jm0vXEVd7OIZCoyOQdv48Ee9cB4C+L194BuTbXMqXenygMEnlICj+FgQpwR+OQMV6TqnxA0f4waBDoD3FraXcp82GKS6KFnGcDdIiDO7A6kEHGRwa+Qp4LEYOup2vD9D6eti6GMoSouST8xvgv4dWfiyBWCJE7ofmVuN2MgjI6fUdDXe6b8C9VsYg9lfPJG3I2uVT8OME/h1rjvhTdXfhvV5tL1KKSynt5DGbeY7GxxyM9VOeMcGvqTwn4htXt4IlLAEYZQeh5/TFeBmNWtSquEXoflWKp+zm0eLTWGt2TfZftOy4jwHjkiUN9Qe5pND+GMGrTNqGqTPNFkny2YKGOSDnr0OR/h0r3bx54ZgvNLlvItqPboSZI8d8YB9Rn2/CuP06O3t5YrOwLXl4sPmeYG4j75OM7OT29eBXiqtUhsrPyOP4loc9fWtnpdjJbaZBHaAqQW2YK8dT0wfr9cdq8C8eSWsNxKyzSXMxP+umzuPsB24/+vjOK9o+Juvf2FD5Rn8+baAy+hPbkZH868A8QQy+XLd3wxI4JQf3B16f3v8AH6V7eWU5yl7STPXy7L54upf7K3Z55NFLqGrK0h2QQ/P+PqT65B/IV6V4P0ue40dbf7nmNvJwSw9QOvBG3P0+lcla2Ruvs1sg/f3T7i2c7R2/8dH/AI8K9p0/S0sdHjKqqtsy/OBke/oOvPpnmvqMVWtFRR+pZfh4UbtaJEvw/s7xrTxHZx7EY/ZoBjChjufJ9hjrT/iDNa6HDFZJLK7lS7Ko4ZumSc8D2x+Vef6x8fdL8I/bYNPs21CdpAzTLiOI7V25DZz0Bxxzn3Feca58XdW8eTTW5P2Kxk5laFT8yjnAzg449veqw2VYrE1lU5bR/wCAjnxWeYLL6MoyneXZf5l/xV4qaO1eSB2S1TA81ARuyMgA9s9fXGSa7b9mnRYri61LWdRQm33YiJJBbHJJ/Hb7HP5ecaX4HfxReQ2sU7izh5UTEs0cfXr0LtjcTwOR7V7NqUcvgzwSbWyZxbW0SyyxAHK7sbS/1LdD61+o4HC0sLBW+8/n7PszxGZVHGT36dEjT+NHx4t/Dely2NnMs7y/KQpzuP8AX8eOOK+Yre6v/F2oC8vJljRmY7m5x34Hc1RvrC98Ua5LNcSmQlyqbjnPbH+eK9Q8B/DNT5clxINq9lOB9MkfyFestWfLSlDDQ394m8F+D1vLgraI23kSSsMEg8H/AD1/CvoDwd8N3+yxw2q7R08znr3J9foKt+AfBIaGOK2tvJhU8PjAz647mvoXwN4RhtVRiOQBktxk46/59a1dRrY8GUnWnypnms/w9jtNF8vYpuJDncVAI/yB/KuUh8Iz6f4gigtAq+YhmZsAsuOD+ZYflXv/AIrsY7dgTtC56D19K828MXFreat4jvZ/kW32WcS4+7tUux6dy4/75FCqS5bmM48krH0H8CfEEWseA7e33D7VZyPDImfm65DEE55z+leisK+P/A/ii58N69LqenMrh8gxlQQRkHg8EHvxX0r4J+IVp4yg2hfs16oy8LN19cd+/cfnXzOMoOE3NbM++yrHQrUo0pP3lodSTTc0rUw15p74yTvUL9DUjtUJNaIQxmqCRuKlkbNV5KEupJDIetVJD1qzJVSU1YitKxqjMx5q1MfeqE/FUgK0zVRmbk81amfdVKVuvFUhD46uwdqz4WHrV2Fh60S1JNGE1biNUoj0q1GTWZTLkZq5EeKowk96vQ9KTGWo6txVUjqylSUWlqVCBmoFPSpM1NrsaJqVWxxUYOaWgCWmnvTqTFSMRaUUuK8m/aC+Ntv8JfDEy2rpL4guExbxY3eSD/y0Yeg7A9TWlOlKtJQhuzGtVhRg6k3ojy39s74pWcFhB4TtblTcZ8+5KMDtwDhT798e9fCmoYeWSdsmWY4VR1x2Fb/iDxFNrl9c3t9cve3EzmSWeQ/MzEnP86yrWE3N4tw6kA8Af3RX2uGoLD01TTuz4HFV3iajqtGh4a8IyakhZ8Dd93I4qPUPDL2+oS2SR53Y2ccnJ/ycV3/hRZJoxHFESCRgkfOR+fFdBD4cEPjG1ubtS0VxEYl2LwjAggfka6WzyfaNyZ89XXhK+0/UfsU0fypyHxjZz3Oa6LSJIdHhwuFn3FXRu55z/n3r0bxdHa6T4oeHyVL+TgMRnccnP+favLfE0n2VjMBh1UlyPUd/yx+VQdKqOq0mdJp2pxTXUZfhQ2QR7dB/nrXTzaPfX9vK0cXlK3zEyYyf1GK8/wDhzqFnf3ga6uUVwMqh4I9/c17z51qmjly8caMpxNMwVenP3iM0PYzmuSR8ofELRbq01Nkmy69BkYz/AJzT/gzGbXVriGRcDzUfa3AYY9jnHFd58RtPfXLoTW8WYMfLLjKt2znv9B6VzHg23Sz8QMrErhV3Hp0b1HTr2rxc3jfCTZ9nkda9eEf62Oc+ME0+m/Ei4mhXyhLFHJhckcDb6/7P61FpN4bh4rxB5NymAWXofY+o9j711Hxe08al4sW8VVMZt1QhQRjBJHXnof0rB0WyKMigbi3BBHB71OWtywtNvsTmUoxxFRdbndW3guy+J2g3BgGzVIAFaDcATyQBn044bpk4OOK8C8SeHr7wvqxtLxXj/usw5x2PHb/A16nb6zf+Cdct9U0+bYu7HzAsCM8qe/1/PqK9L+JfhvTfjN4QXWNLijTVYFMkibuA3UnPocenftXRiaPtI80d0GW46WFqqMnenL8D5dhtZLiLbkuPft9KdZ3U2hzDKie23Ze3kJ2E4xnggg+45FT6ZFLZ3MlnMrRzxMVMbZBDA8itybTY7+3OEw/Q8Y/OvkqlXklyyWh+s08L7eCnB6nq3gH4kRSw6fp+v3882l4CWGtMA9zphPRJT/y0tzyME8fNtKnIPv3w/wBc1LStStrC4aOR0Cl3jwFkQ5KujYwysBwwyDXxZ4Svl0DVvsd1CslrMShEpbbzwwOCPlPcjkHBHTn6W8M64JtAhihjn87Q5Y4nmjhfAtJj8gkkDFW2twDwcN0PJr5jNMDGtTcoK7QSpOtBxmrNbf5f5H25od9Dqujyu0R2bcH+IDA+nP4D1rzfVrzT/BPht0siy3DFi0jJt3nOM8ngf0Ax61e+G/iRptJhJYBtp6OT29K8z+NevNHKirIuSMO2AOT0+gz79q/PY0HOSijwadCUqnIup5vrFxNr2qS3V0ysqsdvoT3b6AYH8+wrz/xlcQiYW0h/dZy5xnIA4H4nj/8AVXYXmrW1tZkRyLLheSCM+pyeeM5Oa84m0268U60traMAeZJZeoijGAzH88D1Jr6vC0lDfRI/VKFCODw8aMd/zZs+A7FLy7u9ZvpI47O1Vsszhcd3bPQDoM+4rjvir8Y4tV8zTtMUxad91mDHdIM+h6DqMd+/SnfFHxU0Kp4c0OV7fTLE5nbd9+ReTk45I5J98+1eM3lhMsRmd2O88bjk898Z719NgsDGpNV6vyXY8bMsynTpuhR26snsbafXtRaMMwiUhmx3z24/l/hXaR6cbdXdlChh8w6ALjJH4Dj8a2Ph94PP9kklD5uN7NjnA6n6dR710Fr4Tn1a8aFLdvIzs3qMYPTPuePpzX6LhqEY07Pc/DswzB1a2+iM7wH46l0VlAtpL8QMY4jGgDyleF3j1GSf4h0yK9HTTdX8WLP/AGrBLaQapZXCKB8+yaICVVYAcblwozzhW9a8ucXXwz8aQQgBIblgrblBI5x3+vYg4OK+gtZ8V3snw40zURBbo9rbyJtXGN0uFztBJPXgsFyM9elfOZhVrUJRox2b0PbwNGhiKc8R1s7/AHHlPhT4fpHcRRqNx4A4/Pp/jX0b8P8A4f2/lKzgMSBjgA46Yzn+VefeB/D+pyQxXGILTzunnk7yD7cd69r8P6Dd2FurzajGFwFZk5b/ANCAHXtmvsoxSjdn5FiasqlRroddpdnHpsCKrwJlA2JJAGyex71raL40gsYXS9kSB1bIYsMY7/rmuP8A7N024t2UajdXPX/VzMig/wDAMeuOted+PGfQbxIftTSWNwh8p5m3MrAcxlscjB3KSc8EHOATVlLRmVPmhL3Geuap4ztPE0kn2SUzwxgqG6DPqT+VcFqENxcaDNZWkn+kaldSzzPu6Rhyo/EhB+FUfhpepLZSQKQGXhRg45wD0HX8CaZ4T1Nr7S7a5YlHnJYD/ZJJX9CKXLZ2Rrdttvc6jRNBuNKtxICzhcBt30xn8sV1ngzxE/hfxZp9y7bbVpAkmMjAPBzjg9T17fSr3h/OoeG3meMDGFGB+v8AL86zNT0lWtWkRcOvzdOuD6eo9R61yVIqonGR10ZyozjOO61PqbcPXimNWJ4T1hda8O6fdqeWhUP6hgMEH8RWvuyK+RcXF2Z+pQkqkVJbMYxqJqkk6VCxqo72LZHIaryNUzmqszGrJI5G96qSudxqSRzVWRjzQriIZjWdO3WrszGqE7elVHzApTHmqkpPrVmU1UlarENt5MqK0IW6Vj2z/KK0IZDxU7EmtE/TmrkbVlwufWrsbHFSM0Y2FXoG4rLhY1fhbpQ0UaMVW196z4X96tpJSKLaGn1XRjUytUokljbnFPqJThqkBpMomXpRuApqVjeMvEdv4R8O6hq10wWG2iL4/vEdB+J4qLNuyCUlFOT2OE+Nnx30/wCFmmtbxsk+szJmOMjIiHZmx+g9q/Pr4ieP9Q8V6nNfX909y8pLM8xyxP0/kK6f4jeKbvxTrF5qVzNvkumaRiScqCeFHpwB+GK8razWa4Vwvze+T+YzX2mFwscLC32nuz8+xONljajcvhWy/rqM0XSjf3QllTHzZUH+ddSmiskwz8wPyjHWodMjESnKkHp83IzXWWFqhCSr0U4G713d67Ty6lRtno3w08GtHZfa2QlWOVyOMYz/AJzVjxsy6fax3PyqVJVD6sPT2ru9GMcfhGEICVWM4b8OleZ+Nrp7rTY5RIo2k5XOeDjH8v1qI3kzldjzj4rTLLeW92isJ0d4pRkevHT6/wAq8g+IGoLHDOgY7XH13DHSu48Va8yXF3auWZLsiWLd/u4cc9+AR7ivLfF94t9Yb4/nZVJDDjHrVdD0cPC8otnqfwb0GLxFBp0ZVJ4jbNKyHI2tu8sKT7/Ma+irD4L6esAazZrKRh8zW4CkH1GB/Ovmz9ni+W3llSaSQRBo1/dEkLwWyfxfkV9o6HqUR09JY5UkjP8Ay0UjHTvzWNRuysEo2qNM8M+JPw11HStLmkGrSzw45+0W0DYPsQin9c186eC7K5m8S6i7MZ2iKRKxG0c5OP0H519j/FnxXFHok0WwTbkx83Yn0+lfNHw7ghklF7vgEdzM0xZpgrEltqjGAfuj17189nVd0sE77y0Pr+GqPtsZptFXOv8ACvhG28XeJLzTLpf3nkK8eRlSu5gw9Mg7enr1rgPH3ge+8D6pL5MfmWLN8rYOUP1r2Gytb3wtf3+t2cEVyEt1QwzPsMgBB+T73zc/iPyre8Q29n488Mrf28PySJkqy4ZD6e/P8jV5LUVTCQOLPU6OYTfR2PlG9hfU7d18tklXqe2cZB+hH866z4KeJLyz1VtJUr9oV8JCz7fOTpIgOMbsZIHfBq7faWLW1ldlIls38uTqMxHgHB44b+dcVrFr/ZeovcQ+ZFJ95XTg5HI/EGvetZ3PLjUjU9zuaPx48BDS9Z/t2yjMaO22VWGCevzMOxAwDXKWdut5Cki5jZ+oHc9/xr6I1K0j+Jfwzj1JlzqFvE0d1DGf9YAMB8c5OP5/Svn3ToWtGnt2JD28nX0wevHqMV8TnFH2c+eOzP2XhPGfWKDoTd5RKGr6HkbyvIGcivY/AGoXV9oui6ZPK8lvrwl0c2NucO0yqDBM3OH27yMEHAHHJFecyXayq8b4dRnBPbFdNY2qyeErqyS2DXduy3sEiqA8e0jdg9RkDI+g9K+ZlVcUlI+3q0E06kOmv3H0D8M9ejtLO2gO/wA9CsTr1ZWB2sPzzXGfH3WlluobZJXaWR8ntwPbsc4pnh7xLAskOqCCKGDUl8+SS2DLEt11miXcT0LA8cfPXn+ta43iXxpPdbWlhjOzGf68/wCRXztPD8laTtsKjgYSrqrHZ6mDqC3cEIEcrR7h/q1JO49PXr/jRN4oufCPhJtPsRHLqFy2bi93D5T0Cx9228jd90EEjPFa9npSa9r8xJddK0xfPv5lQtnjIj+gClmPsB1NcvfRvr2sSXEMTBZjtghwPkjUYVfToB0r3Kag1aovP/K/5nZWc6lTlovTa/52/I46a3kmQWipk9W7sQTnk/hVCx02TVfFNtZj96d2SG6DHr7dPw4716ra+F4tDsp57hUNw4z5h65I6fTtU3wN8GnXNb1DWHgaSBZREoXjoN7n14O0cevsce/l1ZYirZLRHyPENN4DCOberPV/CfgOWHS7ezC+dM6CLcEJJ9fr36ew967XUrDTPAvhtJWCi8K4S3JG5m/Dp1yT2rj/ABf4zufCOtaabFlEcZ+03Sl8bYvuooxySSxOMdvTNY+r61Pry3esXiKryEpCnaNAT3/yTX3MYt+h+AVZc8rs811a5i8SePbb+0n4VjJJIM/KAM4GP8817Qulz+JvDVtpdsred50czHyymWKZBJIzuGBgZ4ArxjSbqC+8RakViaWQxhIjGuSp3h2Y47YTB9M16t8MfElzN4dlYuUm/tFWOw4J+Yg5I7YbH1HGelfIZlJyzGlfaLj+J+mZfT5MlquO7jL8D1Lwl8CL6S3jZryzt2xtZjH5r49NxP8AKuub4Mx6ZD5tx4guyvcwhIzz6YUkc+9dp4HmeTT4N7F5Gw7scjdx/WutXSobhGZ/mdefr2J6V9hKpKOh+VLDxn73U8Z1b4XabHpovIRd3UqjIe+uHYlsZ+6SRjHt1+tcB8YtHkh+H9prNmCZLTBljA2heMOVA6KflPrxj1z9B6oYJmeGSXD/ADZ9B7flmvO/Gj2sfgXU7KQDy1iJLMQSeO/4EVXtGxKEYtHg3wz8UR/Z52NzhWjYnaN3JHUjrkGvQfAskU2jaeQ25VhRVVRwMKPzr5x8K6p/wjusPb7htjkK7Cff/wCsa9q+HesZ0jTYl+UqiqIz1ACj/CulamVany6rZn1X4XsVk8NxiMY6E+/equpYhdwAAQC/p061q+GZDF4StJZRt3Rhj+VYGoX0VwZN3pt69q5bXKk0kjofhr4uh0m+fTpJQIJTuA3D5SeAfQeh+lewK2VBByK+WbVpN6MGwVyq8HP59q9D+Hnj680++istWn8y0lyqyM27Yex7cV5OKwrl+8gfQ5XmSp2oVduj7HsTdDUDNipdwZQQwII4NQS/nXkRR9mRsxqtM3+c1LJ3qtI3vTJK8je9VpG5qWVsd6pzSdaVgI5mx3qhM1TSyn1qjNJ1q0hEFw1Z874zVuV+KpSNTEVrd+laMLdKybdq0YX6UMRqwMavxHpWXbyc4rQhekBejbbVyGSs+NqtRnpUvcs0oXq5G3SsyNjVyFqALyt0qwjVUVqmjbp6VIFlWp6NUC1ItSxlta+ef2uPFjW+kaZoEEwVrhmubj/ZjUYGR9SfyFfQAbaM9a/PT9ojx0/ir4samyAmCzk+yI27GFTIwB6k5P4ivTy2l7WupP7Op4ecVvZ4ZxW8tDgNZVriRz/eJ+RewzxWPdWiW9o1yYyzNwi+uO+f8K6BYBdRjyzgs2055/H8qr65Yy3JaEDKg4AA9K+uPgVK1jmdH10XJ8ucBWVuSOv0NelQtv03dGAVIHI56YrwbWIdQ8M3kwMBntnberA/dPoT2716b8PfFianpYtpT88fYnkDnH68UGlanaKnHY9/8Ga803h0Qlhsix/TrXGePGSO1miA3JnIKkYUdj+B/QVneG9cFnJNbPIEVgdv17f1FQeJtZS7tZEzhV6D056D2pctmcN9UeL+NGN5HnlJYWJVu4I6/wCfavN9Qv3mju1dQhb5iF6c/wD18V6H43vUs5o5TxDIQpPfP+RXmWuki4MkWCvT1z/9as5n0WFV0kdF8M/iingeQpNatPFJIrnY2CpHBznggjNfS3gr47eELqOOODUW0+WYjdb3X7vnpweVx9DXyBoOlreM2ectwuK39a8EyWKvHghCA2R0z/L86y1aNq1KlKeujPp74rarcarpcpgmieHaWHkqoyPqOteSeArdbrUrMxTNGJPLij2SBQGGec46nJ/KvJY7XULWN44LyaJFGCqMQvX0/Kum8I6zqGk24vBC8zKwEe4kAYOMgjqfqO1fP5zQnWoxUN/8z6nh2cMJVn7TVPy2se9ateXsOsHT5ryS8t2h81N4zLHuZl257jKnk89vr6Z4BC/8I4FVedpAU9uOc+2efxrybwLY6h4svRqVyBE9xsQRqOI0UfKCT1+8T+Ne/W+i/wBlaUkcC7ZGXn5R19a7ctw0sNhIQqL3ra+p8vnmMjisbUlSfu309DwvxFpf/E8vbdh+6nBVj9RwfrzmvNfEFq8tuUmiVbiDKkBQDx2P5V7J4+t5dNkkuYkE0sfzMjA5OCOvr+lcr4w0EyG11NIvLt9QUEjccB8Ag/iGGe/51655FOfJZ3M34Q+KP7M1KXTZXzZ3LBSCMZBBwPT8PrXI+ONNi0nxrexjCpcpnk8fj+f8vStnwzaiw16O2m+UTK0Y3e3zKfYgg0/40eH5rDXNK1iL57e6BRmY8q+0Ej365B9mrwM4pc2Gduh97wrivZ5lFN6S0OC0nTW1DUjCDycA8/5/+vXp/hySHR/E1iLtdnnRtCwk4DDHBGe2ay/AlpbGQx3WxZnb5XwOe20HGMnp+Apfitpsmnwx6gjsyr0YnkY7f596/KK9V1qyo7H9H06Kp4eUnrc0E0ueDWNS8KTvKVhX7RpskhJxHlgFXPKgM2Dg4Pz8DGau3Hwp1Xwz8N9U143mmC6tY0nFiJiZ3SRwiyYKgFdxJ4JzgjrXPfDf4gWvxR8Wadpmrh7e40+3mkjubXmR1SMkgnByeM9+RXS/F74nalD4i0rwhpGnJLFptjZXl5C9vLHdWMvkrLseVmKsAZGIIBVgVIGQMelHCVpe80tLX/rzsfISzRYepGhTe77dP8zjb3ULnS9Dh8J2UrPLIyyarKCTukJVyh7HBC5442r1rpvDPhFLK1jmlw0pXCnnoSOOazfBWlR3upx3DcxSjdkgDLZ6498H9T3r02XTI7O3Zsbs8Dng8A968vFVuX9333PtMDQUYcx418TrpNP091JxI42g/XI/kP8AOa6L4X28/h/wTps8ZZJLiQeYg4J3t3H4qK85+K14+q+IYrVWztYD8+B+PP617/puhxW/hu2ijIKxlR9CORn8hX33D2H5afMz8Y8QMd8FFdX+X/DnlfxA1W9tdc1GSNFuoZcW2x8btoA53YJByMgj0x3qKx8Xxa34d+y7v3sIzIxbHbqR610+p6Ymqrq7qoaWGRg4ZsbfTH6V5L/wj8n74wgqMsu5eN3P1r7fl7H49TnGpG0iDQ7yOx1yWbLfaODEUbA3bsZPsAa9w8F6fa3ngee5xHDe21yfP8xFXzIQygkHHzBAS+M/KUyMmvCZNNk0nyrxo3KQt8x7kMcf4fnXq3hnXPsvg+Yq8Yxue4Vt33BhuMggZ57HOD0xXxOa0ZrG03Hq4/mfp2VV4Vcrqx7KX5H1h4R8SR6HpNjM8iJHsDctgEDr17cH26iunuPi/Y3n7nTib26wQVtwWxx3PQfy618T237Qttb+RHb6RJcQw5JWaTywM9f72OfY/QVFrXxw8VapBKNGtYdEtZDy1tGCx9BkgAdeoFfZSpqR+WRo1o3jeyPpjxh4rurcS3l7q1npdmCd/mT5I74yOM47Zz7V4V44+MKa5PDpmjyTXOn+YTc6hMpjFwoYDYg64J6sfQfj51DoN7rEi3moTXF5evyGnbzCRjOPmPH4Ypty0ty0rSLgW4ZjuOSG+6ox7cHHfFX7Pl3LhTjF73ZZvoY9U8RutqxG4sWk6EAnH58nj3r3D4L6KdTkR1VmaWZhgjhRkHA/D+VeCaPdf2OxlRDLITtAP8THj8h+tfWvwB0ttL0mKe4UjC8nruJ6n8/5Cr2WhFfpE9w8R3P2XRYYIRtVUxkD2wK4ZWkkIjK5fpjvWl4j1L7SyxhtwYnnrj3qOHUtO0FR5hUSgcu5Gf8APtWXwqxxT1ky/p/huSaMGRdp61afR3t1PzmNf9r5gPf2rHb4yaTZthg8i9PkTj8//rVej8eW+sQ7rdTj0YYrJqRvH2dtGekeC/FS2lqljqNwv3tsMxOV56KT/Kuzkl614NHfNc6fMjrgsPuY4PpXrfhi6luPDti85JlEexm9cEgH8gK8LF0VTfNHqfaZVipVoezl0NeRqqSNUjSfjVWZ68+575BM/WqczVNK+PeqUklUmBXnbrVCaSrVw1Z0zdapMTIpJKqSzdadM9Upm68igkdE2KuwydKzIpKvQtxVtDNGNuM1egm6Vlo2BVqGTpzUgbUMmcZq5G1ZNvL71pQvnHNQxl6I1cjNUY2q0rY71IF+Ns1OjVQhkqwsue9IZeV6kVqppJUqtnvRYobqd4LHTbq4Y4EUTSH8Bmvyk1rXjqXiTVbp3IuJbiRdjjB5b73Xqfy4r9JPjp4uj8F/CvX9QZwsptmhiz3dgQK/L2x0ifUrgzyFoZmmIC92VieMd/8A69fR5RBxUpnyecyjKUYN7Ha+GLh7i92upKSDair2wMf5zXrnhbwzFqivFKFMp43N/KuV8I6LBptqhI3zPxuBz+Ge5/TmvTvDWkvbtHP5qB+oXeOK9ubPjZS5pWSPPfHnw2/s+NozFuikOEkIJ/D+VeQSeG5fB+qfardj9nPysoPb0PuOor7r/sq08Uacbe6VT/D+nUGvEfiJ8Kb7RfMZo1urQt8kjR5GCTw3pUQn0ZTU4LTVM8R1DxAsKpMxZo8hlfHc/wAjmop/EzMvmh/NjbhsHhSR/Fjpn196seLvDn2K1kt2VltJV28feibqCOeRXmjX11odx9lnj8qbZ/A3yyL6g9wa1ua0qcZrQ3vE0P8AaljImMFRkBq8ylZtjxOPmUlSvfFdnHqoEY/e74s/dzzH/wDW/Tmub8RQqtwZoxgZ5Uc/Q1hPXU9bD3j7rJPBwRPIYAeZ5vX15x+tew3Fv/o6O8eUdQPujuP8a8K8NXTbpfmwBIWC59R2r6DtZI9S0G1uAc/u+cDA+tTDVDxUXGVzgfEEdlqKRwWMYE1w4UbRxkkdvx7V7K3w8s7bwTDKv7oRCNZB83qoBwT/ACzXh/w7sZNQ+IX2CV/3sTOwBPGVQgfrX0jciebwy0L2rgLtOCCDwwJHIBI4yNuemMevx2bYhvG0KSeiab+8+6yrCqll1etu5Jr8D0b4T+EoLLRrdtnmZUNuHHP9f8a6HxDeJYxthlRscD+dcx4S15rLQ4Yg5AVQAFP59Kra5rDahdLB5h+7nkdPxz7V9ryO5+TSl0W5z2paf/bH9oTuNyEEHd054rmfFEJj+GNpNGcNC6nBPACk/pXqsmmw6fofzBVaVcnbzn2rlNQ0lbr4d7QRwCQOCTwew57mp6hFtbnkfiiGJ/suq2uNmVk4yCMdQR9AD+Brd+JMA1z4YzTRpvltWSdf+AsM/wDjpNGp6es/h5XT5ivylDyGXuPr1wetVrW/N18K7qOUM22FonOP4goUHr0PH6VhiaftKUo+R7eW13QxFKoukl+ZxXhSENpNvejd5sbNG4HXHUH/AD7Vb+JGopP4LuHdg0gOBzwePTvWL8Pr+SfR5BxxggAA55x6elHix/ti2dgz/LLOiHB6hmwc/nX4m6LeLSfRn9eKqlgnPpY9G0z4f3nwzuIrLw7o1uPEy6Va31tdKY1lmM0WJAxkbOUfcCqggqQSQcinWLeLNa06+8P67azLdzGGe51KfYz3SRLtWI7JXPlgBcAjjaOfT6l+IWkjy7S7stNhvrtoktnSaNnUgZwSikFgDjIBzioJPBtj5KXY0220++EQDx2owoJHzKOORmvsalOMk0fitPETU1N7nyx4R02TTdYa3mUJPD+527hwQCMfnW/46vp9P8Oyi3KLPgndMeMn+dUfFF0NJ+KGqW6kjdMpJ7klFJP4E/pVL4lagP7FAVslgCByPXk18JXpv60ovufumCnz4KNTul+R4fpcc2oeNNKFxJ5kzXMbO54B+cHFfVOlRiHS7YSsXSR5Z2OedoG0Yz7DP4V8r+FZFbx9pAcZ3XKLtYDrnP8ASvqHWrprXRZDGAfJtTGoXOd2WB4+px+FfreURSpNn818aSdTGwj2X6nN6DoMscWs39wMQzq7jBPz9cHHpzTtP8BxXHhuOUJ+8VOTiu3ZWt/h7dZVVYQtzgZ6H/8AVWl8OoEvvDkcbAMjKRnA7ivobn5y76s8a17wJBdeFb8qdojhkZsjHIUsOfwqrH4DWb4Iw33mpva7hEuwDKIzBAXGehZl79xgda9m8WaHDoPhTXZpVwht3ZST0OCMD35rzzwJfXX/AAgt1pgELR3FsQwwCzPtBXAx1BVTnr0r5fOajp1KFZfZf4H2XDylVw+Iop7r8bM53w78GoLqGKSRCYs4LcgMMZrd1zwlaWlpBp1tCsMjPgYwBgfy6CvUfC+y38PpZyx7JFPLKckEjPHrXM3lmW8ZNOf+Pe0i44zud/mJP4EZr6aM+fWJ8jNy5vfZh2Pg9dQuLC3i2hnQkFgew3Yx68ivEfET/wBj+JtetAwAkl2KjezFs5H0P4V9b6Npa2OpObhgrLCW3dMMTz+mK+TfjdPHD4mvLu2CgS5V1zja2P8AHJrVvSxWF96o0+pH4Ft/7a1yFydsUH3Q3T0J/wA+1fUGn+OrXRNJiiWceX90HGDu+lfKvgu/+x2qtnawGSc44rdj1lpJGbcFx3ZsKD06f0/OpLxCc527Hut/8TZZjJ5b8DgOzcdevtWN/bjXMm+4uHldj+7LAAe4A7+vFeRTeJF0jdLlZ2H8UmSq544Hr7n1rpPAPh7UvEVwdV1KaWOFz+6hcEO+ecn0FUmc0qPLHmk9D0a31Ce7mXyI2kfoP6k/T+lew+CdBlttOie5LD+JV/kW9+enasrwH4PjVUkMC+SP4cfer0i4jdYdkSgHsPSs6kk9ERTj9pmfdXKLH+4/1i9V6V6H4F8ZWd/YRWErrDcxfKqucb/Ye9eVX1tcwtJJ91SvPTP0riL/AF65sbqRJuWH/LQYUjB/DpxXDWw6rRselhMbLCT5kfW8j+tVZpK8r+HXxOlvpItL1B1mfhYrhW3Z9AT3/nXpLyetfM1YSoz5Jn6DhsRTxVNVIDZpPeqUrdeamlaqkjVC1NXuQzSZ6ms6dvercjVQnategirM9U5m61PM1UpmxQIWN/mq5DJisiOfnrVqObpzWtrgbCTVZhkxWTFLVyGT3qWrAbEMlaVvL05rDgk9a0beTpUFG1DLVxJKyIpKtxy5qWgNKOSpUeqKSfhU0b+9S0FjQjbNTq21cngDqapxMa8f/ai+KeofDPwfaPp42G+Zonm5yo44H1z+la0qbqyUEY16yw9N1JdDkP2kvGkfjC6Tw5azxjS7b57mctndJ2VQOpFeAzaLarGbaCEJ2MrY3H69h9P5Vht8QJdWdpCxJY8Mxxn17/y/StDStYkmvIzneenXAA/z6cnjmvrKNP2UVCJ+eYys603Un1Os0PQbyLaojzGO/Ofy9K9K8OaHdbQWRUwPvSydPbjmuKtdfkWMNkjHC+5/x/xp9142v5E/cv5YB/1jHoK6Nep4/Mr3PYbcLabfMvIo3/2BkfrWncatbTWxhnuo7hWHII/SvnyPxXcXkhVrh539FYmu48L6fNdIJLrzCvQKxxge/wDhWTsbKrLZI5P4m+BoVEl1YJ50Dj51HOPUYHUV86+MfDqXyvC8ZIjzsYYJRvbPqK+tfHF29vY+XbMNqH5dpBz6ivn/AMXCLcb5F2oSUlix+R//AF+lbwbasyqbcanunzndTS6fM8Tj5lODjofcfWmyXQks2Vju7Z/2a3PGtjF9sBl+W2k+aNx1Ujqv554+tcZCMWshJzuJC+lc8rp2PqKdpwUg0GUie6xjG/pnHFe5+CdYDeHDbnO+LjHp+FeDaGxhuWdQXZs7h/s9/wCdeg+G9UaymHUwSL19Af6VNKWg8VT5mWZJH0fxd/aCId7PncvUcjp7/wCNfTHw6+JiapbwaJcxQt9oQCN9o8uTpkZ/gb1XHXpmvAb63jv4xKvPHYg1p/D7R5dW8QeSt0bMxRl1kwDhgflyD1GTz7V8tnOXqs1V2a6n1WS5lyUnRe3Z9T3uw1bybiSDdvaOVlIycDGR6+o/TrVjw7c/2hrDem/AbA9a850PWW825aTm481/MYNwzAncBntkGvRfhapuJDLt3Fm7A5PIr7SL9xeh+YVock5W7nX+NNQ+xXUFuoJMkJAVfXcp6fjWNHb3ek+GoLa/hEbzxCSPkFWjIznIJ9fwzWl8TNKkumtGtm8i6ibfFIVzsbAwT9DiuE1PXfEWpSxWmrpZqsCtia3ZszSPhi5B4Xleg44rHXmXYmMYuL7mVbRJ/wAI7PyDlyT3J7dhXP6BILj4ea9H822N5N3HXJyR+prtLizTR9BWErjYMMCPbFch4fs2t/h94gcxtGJJJCuM46D9KqWzN6Omvmjzv4YqP7BuAq5ZULbj7c8/l/OmeNZ/stxaSkFUilRy3I6HIq/8GHW4slj6LINpweuf/rZ/WofHEKXmhyRnAntwQ2RywXoR+HBr8TlLlxzv3P7DhH2mXqK7H6D3+Zks5o282N2RlxyNrDIP8qmuIX8tiwxmvGvgX8ZtP8YfDnRIWuVn1XT7ZLW8hjy8sfljYHZAN2GVVbcBjk89q6nxf8ZNE8J+HrrUbiaSSCFcswRkGecICwGWPICjJ68cGvr3rqfjDpyhNwa1R84eOpmvvjB4jQbNsV55Dng5KqBg/l+lYvxU/wBGs4EyBIyDOcY71geCdWv/ABX4l1rxBqC7ZL66e8cL0BdiSufTnA+lP+M84j1eOL5lMcK4yMYHoK+NrRvjrI/a8HJ08tXN2S/Q838P3j2fjfTrgrny50I3dPvgZ/WvqbVo/wC0vDN4kkRYsdwXbnAOGz+Bz3r5C1STffQyr+7zu5A56YBA+vP4V9feEb6PWdEtsAFbi2XOBnBIx/Wv0zKJfuWj+f8Ai6CWKjPyt+p0VnKb7wfcQONwaFlAPP8ADWx8M9kOhxK5O89CT1rg/BepNNp91bOzCeNmSRWJyGB5BHUcjofWrHgHVnsvMtnLAxSsoyOAASPX8fxr6C10fmc/dbRqftCa7Ba+D5LRH3SXOI9q5JI4yP5D8axvANjaaTpGnpdPHDPNOkfkmQbhGDjcwB+nbtVD4/3mzwnHexDzJIZQ4A56HP8AOvN/AuviS6inlmeW6lO7KvjBz347Z6V8jn9OUoR5Xorn6NwjGD5+be59G60sOktEsYZU2qSx6v1yetY2l2sd9r0Fo43KWM0mO5Zs4/Dp+Fcjc+NJ9X8QQ2U1wsyw/elVQMkAcHbgbvm54/nWro+qvZ+IpLpZULsNqo2eFHc/iT+VfQZapPCQcnrY+Qzen7PGVYrubPxG8Xy6D9t8lkSXnHqewx/n1r5A+LV95lh9oLEtJIvy/wCe1e7fEjUBqGoXkpLbVkZQMcY7fj7fSvDPiZZGbQ2bbhVkXd/30Fx+tei/hZz4OCjJN9Tl9N8YW1naojTMGOM7QT+dblvrxv440tYnkZm2gYxk/wCf5159BYhngVRkNyAPr0r2TwL4fjtcN950+RmHYkdPbv8AgKzjd6HbiY06S5ranS+AfATapdw3F/tmlRt6xP8A6uP3+vvXv/hXRF1C+WGMCOGM7XdMdR2z61wmg2rWNqJWxFERuMnfaOy/4ngV1uh695EICOsEKDC9AFreztZHzk6rm7yPdtN8u1t0hjZcgAY711Gj2fmEee4jXOSOS341434b8UNJJGLVXkbHLZ5PvmvR9InubgAu+AR9MVzuNjanU11R02t2VjcWwwAseOnGa8W+I2jRXNnIIEZZQ2VwR0HTntxnrXoetajHBHsAaSULn2H1zXEatJLqUbKykq3HBPeoi7MqrZ6Hlnw91C/svGFrboZHE0i4TGF64H/6/rX2VKxVjk5r5U8J2B0nxfbSuFLQ3CsBjnqDX1NMw5IOR2IPWvKzSKcos+pyCTcKl/L9SGRutVZHqSR6pzyda8ZI+qGTSDms+d+TUs8vXmqE01UIZK/vVKZ/50+aaqkkm5h9akDNhuquw3Ge9YEM+T1q9DMeK6NRM6GCbPer1vJmsK2mPFadtJ0qdRG3A+cVfgkrJt5OlXoZKixZrxucDmrUMhrLjnq1HNUjNWKSrUbCsqOSrcMnNAGrE1fMv7d98sfhDQYM5l82R1/Tj9P0r6SSUKuScAcmvib9rrx7/wAJP4qbRbHfqUdhECwtxvETHO5GI4BHHX1rswcf3yseXmUksO13Pm1NXlgUHZ2xtyfTuc/pWt4e1PU5rhJEhmc5wFjUt+gqHQdOv47pXazSI4wokdSFGfx5+ua9Z8LaXrE+3z9UjtoB91Y495z+OM/lX060Ph60lF6EmiQareLH/oV3McY3CBiBn8Meveu30/wnqdxsM1ls4AG4LuP5/lxW5ovw/Oo2g+0eIr9HPPmRlFA9uhrb0f4WxWgBHiS8uZB0kOAefcD9KTl3OVU+bUm8O+CWh2NcqCeuADxXVz6fDFblApXjGcVVh8D6wirJYa9MjqOk8QZWP4MDWXql94u8Pq7X2lQ61bL1l09mMgX12kZrM6FRtHY53xdp+21dgQNvUMe2e3uK8V8WWqNHKFKtC2c7cEH617Jq/jTR/EVjJFbXHlTxp81tOCsmcdMHrXh+s3wW7lgyHhdumcH9K6ad7HLa0tDx3xJarcQz2UvDZzG3v2P+e30rzG+32n7hhsZTt/GvZfGEIW4eSPl4+Q36/wAq8W1q7e41actkhfkB9MYFZ1D6PB6rXYl8O7Y7hpHHyD/P9K7W3t5IoIJFX91JzsXHy5HOKwPBmivfyKJOIvvN/tc/yzjmvTW0+GOxDPwQAqBcZ9jjr9KiEdC8RUSkYMC3QxDY7g8h/wBWo3DP0rqdMj1TwzfWjWrxJeTKQ26Pfgf3ua0PDfh8afvvrobNvI3HAUep9PXP+Ip1rHJqmsmZywDKPLzwUQdMjsTy2PeiVNTXLJXuYQxHs3zxdrHTafYLZ2cUJJMijJZh8zZ5JP49a9l+FemPp9j5x+4q8E889a4DwzoL30yNsbaMEjrXq8l0mj6UkUbBd3G0Gt7cuh4NWbkyv4y1CRYjKnynIJ2/lXDMi3Aed2J+RT174Oa6vWJGvtPfbk8d/pXDrqSW8aWshCyTNtXNBnG72JNYuFktZsKSAMnjO0dM/p6VjzYs/hnehiQCsgB5IJx1/MVsvB5ml3D7d3y4G4gA89CT0/8Ar1zPi+adfgtcmAiNmjkDyZDELlgTj6Efgc1nUtGMn5HTRg6lSEF1kjzP4JXRjkiU7QHGV2npycdPp+prtPGGjrHcTQtujhufnGM8Eck9eoP6GvMfhLKdN8WQQtkxTPhN5Jyck4+lfR/ijw6NR0kSqhMqDergZOcf1FfhmZVFRxd+jP7Nyqm5YRRfQ+S9f0W78M6g8xWaCWNtyTREqAc9QwPH4GrVrcap4uu7VL/UrzVDtCp9suHkCfixOB9K93vfCcXi7wrds0YJWFwDwDvAOP1H6iuX+G/guO3tVvpI9yrEGC8Dt1HHuK745l+5kno1ock8oh9YU0tNztvhv4VRYre1zxGwlk2/dcgjAyPcfpXn3xwVz4muBuyPLQBu3SvfNBsItM03esYDyDncOcdQK8E+MiltWkm5zkbh1xxx/n2rxsHW9pibs9zFUVHDNJHkF4oW5tWcsgEi5ZeDjOD09q+ofgxfKfBmkFXy8CmFx3BBIwcdOAP0r5f1AmSNgOOxr1j9n3xEDv0ya5Ybis6Kx6Pjaw5+gb/gdfqeT1NXBn898Y4d+zjWS2Z6rb3C6b4/vY0fbFdlbgR7MY3Dk5x3YN/k1JdmXTfE1wmMQXA81T69j/KrHirRDHfadrEahWUeQzYJJVjuU8jGMkjJ9RgVZ1m3kvLG2mVd0kP8TD5ip/QHpzX1dPY/Iqsk5X7ljUtMj8V+H7nT5/l8xCqs3AzgjP618sX2k6r4I1q4hlBdFfZyMgH6/XvX2B4T0x5ohMGZ1/i3HPb09azda+D8OqX1xcTksshDAgYHPJrKrShU92R2YHGTwjbizxPwdDc32pafeSW8kAG5N5bBIYggY9MqefevUNNhD+IMA/L8jcH0Y1cfw7a6KtlaR4UrKz528jCt3/z1qrobA+JvIEglL4yqHJGff8a1o01Tjyx2RGLxEsTN1HuyD4reGJ9Ps7dkj+/LvIzg4OSM/rXh/jK2E2l3kaghVhbLHAG7YCP5V9e/EKOC80e8eQcQxnGAOu0/1Ir5H8d3UcGi3zsSpKsMd+hGP/r1ad0ycPK8kjyPw3GZJhOcMITldx6sen5V7H4PhaBoXnZikxDOCcbuuDXkehgeZBEvdhn0ya9GsdRkKOhkyOFXA5xg5H5D+dTDY7sZeTsj0vUPFysu0P8AuIcKiJ3/ALoH/wBfp+NSWGr3Wr3cCEeVbsVRYx7989/5150l4ftcdush3qOSp5JPX/D8K9A0WeOzs43DfvBjcT0yT603I8yVNQWp7x4XvrTR7NPMkXCgfMSMnjPIrqk8fQxBVjkCsSNrKwyfxrxbQfBXijW0+0XWLG0l+ZZb1yh554QAsf0HPWvSNB8DWETItzfy3ckfVIQIUP1wSx+rOB7cVzSlzbGSjy6tmg3ieW6YSJbNcpNnEjSLGp9wWPz++wNRHH4ku2ElvpCYyO0gwPXLIq4+hr0bQrWx02Nprfy4JJBywHzt0+8+efz4q5HfW882DPukzgNIep7cn/Cs9bHTGKvfueSav4Z1QahDdyRqsgGG8uMkHB6fIzH9DXoHhn4iW50+0t759oO2OOYkEjnaA6/eHJAyR3FZvjTxUugyFpMXIdvl2kfMT35H+eK5weJre9urO+RUd4yGII6/5/TFZ1qbq07NHVhMR9Wq3i7dz2OaQjPrVOaaorTUItTs4rqBt0MgyueSPUH3BzUU0mM184otOzPv1JSV0MmkrPmk681JNN1qjPNihxNCOZ+tVfMHmL9aZPP71U8794vPcU1EZg2twSBWnbzHjmuetJK17eTpWrIOgtZOlalvLWBby9K0rebpzWYG/DNV2GbHGaw4Zverkc3vUspG3FN0q5FL71iwyHrmr0Mhx1qblGxFJ0q7DJx1rFjkI5q5FcEcUaDOX+OPix/Cfw8vLyORY3Y+UpZ9p3EcY4OcHB/CvgPWNRRYfL8z9xu3iLPVjyWPqSe5r6p/bK8P6jrfgSwvrIs0VjKWmQcgKcEtj2x1/wAa+Lbe+tG2iSVZJCD8pOf0/rX0GXU4qDmt2fI5rKU6qg9kdVoobhvNCDjBPfPQDn8q7rQbjUreRVkAEYONzt+Geuc15dY6tp9pcB5JYwF5/wBaqD68ke1dNa/EbRbEAy6pp6FRgbr2Nv0BJr1HaPU8CdKUtl+B7zofiC4h2IxAXpgcnPbjPNdfoOqtbyqzyMQevmf0NfO2k/H7QbCaPy9Us5WzhtjMR9c7f5GvQ/Df7QXg3VcR3d/ZCZjhGE6g4OOOSp6/XisJVIbXRisPWgruLt6M+i9M1hJlG4598g1cvtQ3xnyXWNvQ/wA+a8dj8QWqwrd6dqCPE3PluxHb16H8KZJ8XbO1do76XyJVxjf39CMdfwrNp3ujqhWduVifFbwTpfiaF5mt/I1Bhn7Vb/K+c9/73456V85a9DfaZqBs9RztHEV4o4bHZvQ/WvZdU+J0Oo3ZAk3wMRtHAPHQ15/8QfEen3yvGVV3bGYyAdw44x1x1/ya6qUnszGWstEeJ+LtYNqk8koJXeVDN/ER29+gryfm7vstzz+Z5rtfHCSLcGUtL8rFVDnIHXHNYeh6P9qlbJOQPlwMnPUgfhSqaux7uHiqdO503hvfbrFHFGZZiNo2jr/n+temeG9LmuJhezbJzGN4gZd0ZA67uwGOMnjOK5bwvc2unKFSPe33XiXBZxnuxBCjjHeuw0bTbrxRugllEMRfPkx5K8+pPJ/l1rSCvscNeUYu8ti1qbSeKNQ2WGXs4XzGqgcgYIGAOQDnJxiuq8L+EZl/e3K47jjBz6+5Na/h/wANwaDGFypYAfN3Ars7KAXSqUAMhO0elbcqjqeLUxDqPljsUdPjOmqNnyk8nNR39/JP95lAXk5PPPoPwro7nQPJh807mZhnb/SsZNGlvI5SV2le+cn/APVWW7IWm4abqAktTG53DH8XWuTuLJbjXjDs4jTcjcZUlj/h+lbtnG0EzoVICetR6dHH/bkssj5Y+WhjA4UfMQf1NBpHS7LXiDSms/CUkp/hxn6lhzXH+MNLVfglcS8MvkO3QEkgEkEZHYH6e9eqePriG38J30aj5hEfzyMfmePxrjfHFvHD8G3gkOd1q4P977hGcdxgVjP3oSv2Z2YZfvadu6PmPwQz3WpWewbRHJkODzknrX1Ob7/iTwK3y4QHPdun6+1fLPw/mNuUIH3dufpx/jX0Y+pCbTY1YAKRgOvUDjn8jX4Pm0XOrFI/tPKklh/M2PDFrA+iz7wEhd3Em3dkDcR17dPQnrXIaKFhXTY1RI7XyIkdogPLbCKM/TjNdT8Opmk0E7gJV85i4AJ3jecHgex781maNAs+g2bwsjkII225JGOOffj3+tec/d5k+/8AmdtNp1PkdlboLy0RVIGBgbOK8d+NXhk2f+leXmOXhjjjOPlP5163oqvZsQ3Jbg5ye9V/HmlQa9oUlux/ebfl5Ayf/wBeKxoVHSqqSOqrHmTg+p8RaxbyRM+PmXPf0NM8H60dB8QQzD7yMsg467TyPyz+Qrd8VWT6Xqk9tIdroxTkcHuPwrkJpltdQgnVSFUgtk9PX9M1+pZfX5ZxqLY/G88wXtqM6T3/AFP0Cs7KHWPDcLq4k8yJTuGcKSAc8gc/41maTD9ohkhmXa3KkHjBAwfpzW98GNRttc+G+lv/ABhBEyhvTgnn6e/WpNct49L15Si7YrhchR03LgH8wR+VfdxnqfzpXpOK1KPgO4Oka49jcj92549hnivWLjTVeIKm3aRkY7V49qTL/a0LxnEo53dTwT71654b1L7dp8eSdwXrzVT/AJjCm1ezPLviF4dnsmhvIo9whk3kYzlTwfrj+VebfaY/DOtR3kRO24/49rjsMj7rdgwyR7jFfUOsaLHrNmUk2g9MYr5/8Z+EZ9FuLryLRby0mb97ZMeOvVfQ556/TFXTnc2slo9jcm1FtW0Hy3Y7pl2yTMRxnv8AXP0FfMfxH8Ia19j1G2itvtTwhmZY+XdcZ3qvVhj0yRXokXiaXRIXNvPNd2ighreZv9It/wDZIP31HYjnnnPWrWm6la37LdmUSRqvyvkBg3bHt9PaiTOmhH2TufJWn6h5MiI646jpyD2NdVpN8z27yFtwXK9+P88V6x8QPhdpvjpNT1TSolttatomuZ4Idu25Ucs5AP3+vI69+ea8e0hY7GIFkyV5O8fj0rOMnsepNwqRulqdT4S0+91aeS48s+WzcXE2Qp7cd2OPT869f8M6hB4fuIrhmHmBh5cjLudmyOI19ffPHrXlsPigWtmHEbSy44LcDp045P0FM0PXJ7q8a4mbG47CzcYU9B9PaqVtjgqU5Vbt6WPoa88e6rrUkcNt/oMeAJPJYSOw/vFhxn6fnXZeHbi5h0yNrmeRyvygE5x+Zxk8dM9K8z8M3kNvZrNMwhiA5kkOCfXGen4VsJ8YNG0mWOO3ZZOSWkmAHQE5XJ5HvUSkoo4o0nKVoo9Vs9aurhR5SODGTiRhgdfUgVPeX2IY3mncSLhiVbYvB5HuK+dfEX7TMsd862NncX0wbKAbYkHtuIzj6DmvPPEnxZ8e+IlaSGZdNtyfu2qnfn/fbJz9MZrzamZYSl8U1c9ejleImvh+8+mPFmpRXtrIHaMqRyAePYgjnP45rgtE8RNoeoTQ3Ui3GnTH5tzAtGRjnscj/PSvmjWNY8QSxSLd6lqFxIvRZp2IX14JqTwzZ6nql1HaRT3V3c3DKkccbsfmPQAAnPp0ranmFKtH3UW8pnCXNKR+hfwo8WW13YyWqzo8TtuhO/JLbeRjtkDP4Gu4nmzmvIPgh8H7fwD4bsp9RhZvEMo824ZpSyxndlUAzjKgLz6g9q9QkkrxqslKbcT6fC05UaShJ3sJNN71QuJvenzTVnzy+9ZJHaRzy9aq+d+8X6im3EnHWs6S4IlHPetQMq1k6Vr28lc9ay9K1beb3pMmxu28vvWhDP05rBhm6Vfhn96loZuwznjmr0Mx9aw7ebpWjbycVmFzct5enNaMMnArBt5a04JulS0Wayv75qeOSs6Ob3q1HJUlXuW7i0t9UsZ7S7iWe2nQxyRt0ZSMEV84/Hr9jnw14q08al4csV0m+t4sG3sUUCZVBwAvTcSeWJPQelfR9ucmo/E2sQ+H/D97qFwwWC3haV2OPlVRkn/PrT9rKkrp2RHs4zeq1Pxu1Lw3PputXFhcw7ZoHaOReQQykg9fpVy18OMEwsW0HBbIOR+tdRrk3/CReKtX1N0Aa6le5246bmzj9a6u18OtBoNveOhXexUqDjsOc14OJzCUbXe52qnGOyOM0/wa97OFLyAIGI2rk8dQoz64Gc4rsNP+Ct/qkZc6bdlZFVieSDzwGz93jnn616n8LfCNvNH50sT7icqqnJ2joD6n29frX014f8F6auluwi2tIDuZ1yVJ449CPevh8w4grUpuNHUr2KZ8QR/C/wAReE0kk0nU7jTEjcL+5nYB2I5AUHawHzAnGOOvNUrzxH4n0NnXWrT7YjrzcRoqOR64Hyn6cGvvqP4a6dMryTRNNuyo85QOBx054Nee/EL9n+11SwnewARwvyIxyuf909f06V15bxbXo1FDEOyPMxWBpzV3G58k6Z4kjvYVltZW54IA5U+hFUbm6urq+kmuHcTFsoq8dAMHjtUfxI8M3fw91hZoleIR/JMGUgP35Hb/AOtVvwXpd7461qxj0iKS8ubl1jiiUYZnPb8Py4r9tweLhiqSqxejPlqlD2ctFcwvGummHRbS48qRBPMw89hwxVRkZ9RvHHoa5fykjs43Vj83I2MQcen8/wAq+2f2vfhLYeCfg94LtbCJFawlmjf1kkZELP75KfkAK+LZJGFmrFdgR8kAcdev5/zrpp1FWjzouVOVJ8jOi8NyDGMH5TjDdsd69o+G99bWN7GJR94YLHntweleGaNcCG4DOcIygrnoTxkV6Bo+tCEw+XkkYyewP9a7oPQ8XFQcme3ahGfMMsY3w5yB0/H0rT8N3whuvLLYGTt4PH69zXDab4ySSPDHavXbgVs2usW8kivCV3/7PerZ4/JynstjcQ3sOxzuyKh+zx2CsoB2nqTjmuJ0fxEbeRSxBHf/ABrqm1qK8t1aVvmrF6MsxdbjgUvLGBkn7orkNEkI8Sag0oLLC6IvABx5at/7Oa6nWLq3ihJDBXJzsHJx71wdrftENaumLRr9qKxs3UkIi5/8dP1oLj1Og8U6kdQitNPXm4upVEgGcABt3H021gftBeIBoPw/WxjkVPMh8pO5beAoH/fO4/hUnhC4j1HV2uCVKoMASICAo7Hrxx0rxT9oTxc3iDxxHYRsssVmBK/szAYU/QY/76P0rz8ZV9jQkz6XIsJ9azGlTe0Xd/I5HRWeKJY0z1wAB16V7dY2txceGQyysWdMBj823gEADvzivGNJGCQDuyevTHrXr0GuRLoMUTHYu3DKRzz68V+PY+7a5e5/WWAfLCx1/wAN7wJ4VDl1ZllI24I+bzOCF7EHv/D261X8F3sn2MQmMMu52LRjjG5ufocfzrnvBuoRjTg8MjrOl0QYlyVIJXBGBwf6HNc7pPiAWlv5ay+U3mkjb9T6+2D+dedKi586S6/5ndRnGM1d9P8AI+j7W6tobXeWjldvulOvPas/UoF8tpJG25OBt5PPpXlum+MJBIArsR+IxVnUviCLeMKx836L/If5615/1aakkj0Izjvc8w+PumRWusw3EPPmDafzOK8b1JP3RcLjILe3HWvTPidrh1uZHHTd3PQc8e1eaalKy24c9FYrtI46V91l6nGjGL3Pgs2cJVps+nv2WvGgtfD62k0pTcSgUd2UY9MZPH517prt3/akNhcRfO32hMAckB1ZfX3FfIX7N97KYdRhiZf3Miy/MemADxn2U/lX014X1d20+WP5ZJYI1kIJ+UbXyB+gr9LoPmoxktz+ZM1p+yxtWn0uV47j7Tq0iuxBDfLg+x9a9I8N3wtY0jLAn0HavHYNUE10lwhCrJI205GDgkcfhiuwtdemjEqxlVZlyG28g12tXifPP3ZI9ibVIbeLfJMAPTqa4jx5dQ3EbSRoOO/Y5xzXJ2esXEjYJYsT82ScfWn67e7tNl3y/Lgn0yaiNPldy3UclynlniaxhvbncyRi5YnbJjBz7nrXnPjTS3s0aRHCy5w8kZILH39fTPWu6vLpt0glByrdT69fzrgPFupx3DCFnxknczdh3/X+VaT0R34Xmckj0v8AY60efWvjFaLNuu7WOyuHk3HhcpgHGeRuYcVh/tP/ALPN18IteOpJMk2jajcyG1VM/ulzuCNx2BIHJPyk8cV6h+wJat/wnmvvhXjTTT87KNykyxjAOM88/lX0p+0d8M5Pip8MrvTbOFZ9TtpFurSMsFLOvBUE9CVJ7jnHIrwqld066XQ+oo4dVsO5r4rs/MCSP7THGN2EChV3EgADt06VJFeWnhry5ro/a7hjmGNRyx7cf1r2vWP2TfiLpumyXR0aNIYYvOObmLcv4buvfrivnHR9Fn8Q68wuGZxuI2NyTg4ANVicwpYem53vYzoYGpWlyzukdEniTxB4yuFRHcW2cBIeij/f/wDiR75rufB/w5uXuY0mXaznIbBZye/PXNepfD34a2+nWKpbo/nEBCykAYwM49uRXufh/wCFukqySTyyQtEA3mRYzuDHHOM4wMcDnk8V+L5txNWxMnG9l2PpqGCp0VamrHiWlfBWNEhuvs/mKPnVijcfhWz4j+Gclv4W/cQQieMlyzDYZPQf/Xr6Qs/A9vAzKLm4CDO2OTaVHvgjn86xfEHh+BJTCEi+dcFo87WHuP4foMjuK+SeLqylzSZ3xjY+KNU8Dx6mv2aSLyblugbIYY6gg+let/sseD4fDJ8Sx3FmiajHLFsnZFLiMhwVDdR8y8j6V22r+BbO8WaNAwlEb+WzADDgYGffkjgj16cVj/Bs31jrmo215Mtz5ikLNn5m2kY/DGR+Br7DI8ynLERpt+69LephWpq1z15mxVWaTjrU0h61SnkwK/TTkRBNJ71nTS9amuJqzJputaAxtxN71nSSfvB9afcTVSaT94vPeqRJlW83StK3n6ZNYkLVchk96Bm/BPV+GasG3l5rQhkqXoI3beatK3m965+3mrSt5elZsDoLebpzWjbydKwLebpWlBNQVc2o5KtQyZ71kRy1chl6c1IG7bzInLNgV5P+1Nr0tv8ACfVoxIY0mCxYAJPLD+gP0zXpEc2BmvB/2tr+4t/B9rblgIbmQ4VeWJ2ENn3wwA9Mn145MVLlpSZrTfvI+NLVUV7mRU2hiqqMcDOR/KvaNe09P+EUtLaBo5pVwR5QPyrg5z68c15hfWiafo9szFQbiZc+oXJAJ7dOlepXOnppejwyo3mK+0yrg+oHDev+Ir8+zCak6bT6/kdqfM2dz4Azb6TayKrCWMCMqBknJGSf89q9r0XUHiW3Q75IJArggnBI6ZP1rwn4RXs91fbDKptXQyFWH3SM4BPvivozSLeGw0meeX51VckMSBjj06n+or4XF3hiHc06WJpvEwtZPJkjVlbcd0jkcAc4HfmmW/iKC63JE+5W4CZGSe3T9fpmvKPFHiqSb7Z9qKRQbWRWXGcc/eXHP4da8e8NfFe40fxpNpzX01tbyMJI/LGNueo69xkV0UcLPERk4boWyPTfjF8JR4u1RzO+Ipm2howNxHUgZ6nAPXjOK9/+C3wJ8E/C21+2eFbS5AvIIyJ7y6adivXIzwpJ5O0CvNvDuvNrthMxCsijK+Zy/GDgfmea9e+Cuuy6x4TmSZSn2O5aBFPOEIDL/M/SvsuGcwr+1eBm/dt+RzToQj79tTyX9u6e2t/hvoMbj/SpNRPlkdkETb/1KfkK/O668udpIkGzrge3XH4Gv0k/bR8J3Ou+B9O1O2R5f7NlcOijPyvt5PtlAPqRX5vazYyQzrMiFGQ5yMZHtX7Zg9KKsfJYt3xDT8rFHS9Q3K9rPwYjlK7fQ74eSsaEqOT6da871KGSaZZo0MbfeHHBHpx/nFa2laldxeWzxEY4zkf45rujKxwVqXOro9Zsr5Y4dpfJAwOMVGuu3FhKWjVjz95ecVxtv4muLdNtxDIsZOQzqR+RP+NW4NfjuJA5Lc87uf51vzJnkujKPQ9Q0XxgL1oxI+GPHPSutPiaS1sWb5WwOBvrxSHU7fcGSRfXLADJ+oxmtKTxBcbhDnMYXI28ml1Of2d3sd6viKa4kVpgvlr2zkc81g6x4mf+zHijPytcTyFunWRsYH4dfYVV02+WawLAgSZKmNgRXMyQNdyXyO2/bcyKMnsTuAHr1ps1pRXNqereF7qWPwvctZqFn8oksQCFHrj1xnGfrzXy5Mlxca/qnnOXuhcuZWJzubcT/UV9I/DG/wB0N/ZMhLhNqK3+B+v6V89fEC1l8P8Ajq8j8povMIlXIx7Z/SvBzWMpUtD7jhSpGnjJqS3NTS5pLdwGj3jGMH1rduvGX2eN4ntwu75gyjp74zXAxateKBjqevWrkOpPdKYJATu6nOPyOf0NfByw/NK8kfu1HFWjywZ6B8NNSE012CN4VvMAJBG3acj+v4VzVrqhhkuInKyFZXXEgHXJ56detM+H88tjr8iSOCroUKudoOcKRyODhiPqacukRahfXEUzNG5mYKykcfMfaspQjGcr7Ox0Uqk58qW+v5jbi5K/caSP6OT/AC6VUutWnaIKLiYA8H94TkdPWrdx4TvTzFcLKmcDdxnHc1kXWg3zH7qkj+LNa01B9S6spRXwmbqV4WVQ0jSYPLMc8Vnag6tZgH+I5/PirGoafcwqyyBQfTNY80beSxeTOD90V6tOK0Pl8TUkm9D2X9m+NFm1hwyozqEXIz/DnBP1B6+o9a9ytLlrOyuiGKoyYPYkZBwPw9u1eH/s1xN52onJRjgDnvjg/rXsfj4jS/C7vnyZ5cxhSp+YsMDHHqf0r7jBW+rxR+C55eWPm/QwdNvmt49PtySAsCkbuh+UBsfj/Ourt9ZzCWZznAG7Nefa7dJZ+Qquu2MhVYepzwPyFWdK1UbwskjSKB0YfKD+NejbQ+dlHm949T0u4Mi7nbLdt3pWX4s1wR2/lhhsXOMHgnFc9ceLkhh5cAY5IrjtZ8USascIxFqhP7xvX0+vtU6ImnScmJq+sfZ0kLPgn5mY885/lXCGH+2Z5Z3fcSpEcbddoycgfTNLr1xdapJ5cZby1HzEDrjP4D6VJonhie91CDO4NjJYeg4x+fH61zSk5M96lTVOF27M+2f2DfBMui+Fdf1+5UrJdzrZQnsyx5ZyPX5mUZ9q+pHfbXmvwD019F+FOg2jbFURs6+WMBgzswJ9/m6969AkkNfK4mblVkz7HBx5aEfNX+8wPiRrZ0PwPrV8Cvmw2kjRq0gQM4UlV3NwMnua/NbwF4TuJrmW/QeewBYrGm5hn0568fXnpX3p+0XqS23wl15ZEaTzYvJXZ2ZiApPpg4r528D+BYdN06LUIrqS0WZFhkhbBSQZyCRke3Oc18Hn2M9hanfdHq043Vzo/A8P/CI6HuuMCWZjmCSTLKg5A445PPpke1b9n4/3X9sqzb4vlUBTk9D1+nQ//XrxT4la5c+Gbq4SG9hltFYqYY0YbsfxDjB7dCD6j15HwR8Qo/EHiC3eKSSNY8lo1wBu5+XPP5/pXw0svnXi672Oq6Wh98aXrP25AA7EMNxbPAz/AJ/lWN4qb7PeQXCx7wp2KOdpU9jj8a5LwZrF5HZxNE/mJtHysVU4PY9P1Ga7q6ul1PSVAiYOVLZkBGx+o614UrrQRx91Gs0ysH2q+NzdOqqRkjPOOPU+1cV4PWZPFDQNMri0u5I4txIk8nn5SD1HzEgg9uldpqVwtrdQWzsWLIr/ADf7xPc+w7VxOoNFpPjKK9kLRQzFUeZR3BPzH1Pr+PrXs5XVjDFU+fq1+ZjW0i7I9Mlas27kwKsPciRAw6Hmsq8m64r94ieamU7qf3rMmn+brUt1IazJJOTW1hMSaY+tVGm/eDnv6UTSVTkk+YfWhCKEU3TmrsMvSsOGfgVdhn6c1Qzdt5avxTVgwz1dhuKzcWBvwTVpW8vTmuft5+lVtY8eaN4Xwl/eqk/aBPmc/gOn41M7RV2NRcnZHeW8laMMleSt8WreSENYxLIx6CRhn8cH+tMg8feIrhd/2vTbYsfliGGbaenPPavCq5vhaWl7+husPUPa4ZOlXYZK8Uh8faxZgNc6hA4zztjX+grQh+J8zL5n2rbn5grKF7dAOtcTz7D9ISf3f5lLDTPaI5RXzX+2FfSQw6PhmcMCuwnoDwccfT8cV0dx8YLyOMqlw8kh7JbbiP0xXiHxs8d6v4iurVr8F7S3cMkZwu09zgHqcDP0FYzzalil7KMWm+9v8wVCcNWcF44Xz76xs4vlgXBLORyRngD8vzr2S7hWf4eW+44aOLAbvuwMYPcZxXiPja4FrJGzI2XOIwBnjOSc/lXp2ueII5PhnabYYzOIvlkYYIOAQCPrj9K+bxkHOnRt3NoWsdt8FbOS4tYZUlVlY/NGvRyfujd+HYHvXv8AdfZbTRlWVsS7AJHOQv8AujP8P5dK+NvgZ4m1hcGJGvli3NExkOEboQBj39O9e369421+zt1kv9OWKJto8tjuD55GDzz2x+FfMZjhakK8oXWp0QszjfjRrj6bazfZ5WiB5bsD9B/T3r5usJ7ufxBbzSykDzBlsDucn/PvXv8ApOn2fxf+Kug6JNI0lqZfOuo1GFMaAs6Y9woXPvX0V8Vf2e/C/jTQ7y5tdPt9N1m3gJguYk2qdgGFdQPmAVdo9O2K+4ynCOlhG7akSavqYHw/tXvPDEQWQEtDn92ML0xnAHtXp/wTU6Tq2tWEl2pWQRtFDIwDOwzuKrnpg+/6V8+fB3xFex266d9rQiXbD5hALHHGVHA75/GvbvAEMPhn4iQLI7XEl7btAJZTudCfm46YB24rwMri6GZQbfX8zWUHyvQ9a8T6fBq+i3tldRrLBPCyOrDI5HpX5OeM57Vtc1SFAfJSeSNFON20E4r9Y9W1JbOMMQWOcDjjPvX5K+LvDd3pepX8VxGyzeezNwcr8xH88iv3XL/tI+OzJRvBs5WOzWSPbu5zweMqf8/yqJYLuG5Amnyq8JuAI/wzVhfMjkClSQOvGfxqT7UWI649K9ex5SfzQ61ur+3k3LKoYc4IKkj6jFXYbyWSQi40+Cbd1wi5/E4pbWOC6dCqnOOWHeui06xjbGTj2IHNWos5pzS3RT0+30+dlDx3Nkzcfu3O0fgSa2Y/C86sZLPVIZ1jb7smd30JFdNaeH7XU9N8m3x9oZuW/ujPPSm3HwvmKhmndWHTbJ/KqscftE+tjFjh1O1feYQVAyfLbI//AF1zn24QaxdmQ4V3WdVz6gKf1H611k3gXXYTmLUZ9kfRX+Yfhmuck0DUL3Uks7xInLKx3qhBOeO34fmfrT16GkORatnoXw/vof7ZtrmzuF81SBNExx8rDnp7gf5Nch+0tp9pp/iLT5o5FMsyvvGBgcKcZ7nknp6U6y0ldH1S2S2tTG0TDzrhTkLnopBJxnjoPSuj/aa8KG68O+H5ZozFJ9skjDL1KENzj0yq15eOSlTcT3Mnl7DFxqLY8ES4jkZcALtGBxk9etPVnuAPmUEDqw5HHP1FZ114aa3+UOUKnaysQc9O4+o/MUxdNvLdBKGYxkkLzxnGa+TlhX0Z+t082j9qJ3Hg3zf7Y3I6GTyZPvHbx8q9jyfmqxqDRi9nkRlVjO7AZ+7+8bHAwB0zwB14rhtO1C80+5dnTzEkQx4X+Ekg7sfUCi4utSub2WSBXWFmZkXkkKSSBnHJ/wAK4p4KpKTZ6VHOKFP3j0i0vHki3OVznhuDkY+tRaleRR7mLZOcLuPX864NJtaeEjkJ0C7N35dv1qvcW+pXylZjcTdtqkAfiBn+dKGXzvqzatn1C3uptl3VtWhEjIHUgd8j/GuavLkNldmNxznGB0z17d6uN4fnj3GRhGB/CTgj61kXtkq5K7mIOPavYp4VRW58tiM0dV7H0F+zda+XpV3qE7KFkkMakj721R0P1Pb0rvvizq8cd1pqSfvI/N3uS4HCqX7/AI/pXNfC/SprT4feFngjiAjLX08mQGZWZuBnvhhn0C12fjPT7HUPD9veCFpZbWVJSQ2SI2YK2f8AgO4/hX1uHjyU4xPx/MKjq4qdR9zyjzNR13ynt7KeYEB/MCnaOh6ngdOue9dPo/g3Wr6FJGjMaE4HloXOevUkKP8Avr0rZXWLqFoxYWEIQ5XfdsGYDOTgDJz0NdBpuj+JvESZk1KSC3VSSkSBFHTnOMn8OfeuuVzzXLS2xzknhOw0RZZdSuoImxhfOfzJSSf7p+RcDngMf51w2s6lAt0Ps9u11k4Etxk9eOhIAH+eK9C1fwxYaTcGRmae4TDCWQ5IPbnt0rjNejjkLbU2L/Cg7fX9PyrCSvqdFGWpyo1VLq6RJiXJbDKAAMda7m11CC3WR0Qb3zgccDdkD+R/CuJj0eW6uN0YJG7jHHOa98+DP7OOr+Mr62vdXgubDQ1wxmb5Gn4PCZ5I464rlqVY04tyZ6ccO68lyn1/8J45rb4daCk+4S/ZVJ3AjqMgcj0OPwrq5Jay9H0220HS7ews1ZLeFQqKzFjj6mp5JuDXy1SSlJtH2dOPJBR7Hjn7UGtw2Xg22s3nKTXt5HFHHGfnfB3NgdeAP1FZHh26g1Lwc0M9vJuXbmSRed2QFbB5649uK5/4/Rvrnxb8OWdxYzXtjb2jSRRxoXBkLHJIH0Uf5NdTeWVj4Z8N3Eun3AiRh5stjdbox2zsyoOflzgYHFfl3EMlWxUYdUerRi+S587fGbwtrfipjpWhafJeXrFt8douAvYlz0HOPbNePQ+F9e+G+pRw6jYS6fLMpYJJyrsuAQCON3T86+4/gvNHrja5rAKPJNJEnmKoBwAxx7feHHtUH7S+gW+qfCTWJPszTXVqqT2/lD5/MDqBg9s5wT6E19Nl+E/4T0nu7s56k0p2PNPhj481OGxtUmuoZ4gFIWFwSA3IGcHr/nFfQGlavDrFjLNDIqIilS3C56YB7HBFfEvwvvpriBY7kJbvGNgznJI4IB7Y4PTHWvo7wPqBhtXjYrLH2LZOw45we30561+d5jhlSqtR0OiOu5u6lrcS+LLIxoVWOE8qmdoBx/k/X2rnPG2sQQ2d6qoJblQvlxtjYeeMjuD0xjtXnXiXxZcw/EG9uBJJPHaxrHHgHAc5Yj0Bzn8qn8C3X/CWeILx9SuJCioCI4x/rSpHyA9gN2SevPvTwOVzq4mkvQxrSUYs9x+0BoVPTgGqF1MPWoftZZcmqlxcZHWv3eOh5KIriYHNZk8nWn3Fx15rPnuPetCiO4mxmqLT7pFHqQKS4nrOmuNrZB700iSjbz5A5q9DN05rCtpulX4ZveqY7m3FP71chuPesSOf3q3DN70mB0FvcHsaxrn4f+H9Qmeaa0YzyEs0gmfdk9+tWIJversc3vzWMoxlpJXKUnHZnGXnwvu7dpX0fUFAbpDcZB+m4f4d65eax8ReG2b+0rKaODduM6/PGMe65x+OK9nt5eRzWhHMPWvGxGT4atdxXKzeOImt9Txmw8bSs2RJmNedxPf0p8PjJGmMZWGHc2A7yDLH3FetXGgaVqCslxp9tKGyTmJcnPfOOtYF98GPDGoM5WK4tSw5EM5x+TZrwKmQ1I/BJP8AA2jiI9UcddeNbC1UsWa5A/55DCn0xg15j441b+3Y5kt2AGMlR1+mO1e+2/wL0BY1je5vHjXoqlF/XbVx/gR4NuFxLZ3LE/eP2phn34wM1FDJa0J80raeZf1iJ8oeKrkXWn6TNK+XeFVwB8wIyp6e4/SteTWj/wAIvFav8235WJ7jpnnoa6X47fD1PA+r26WkZbSZmE1lmQu8WPvoxPXDHg5PDLk9a84mU3+lzyQysAr4ZMgZIz1464z9cZrSpQ5bU59GONnqjsfhD5tvDcmzvFtS5KFjyeD6e4x+VfT2g6HJ4j8Nm31C78x41Dr5XJUnufM/p0/GvlH4KN5PiGRZ5jnqyk4HXrn8RX234XgT+xXhKsJGQ8ddwI64r4fiCTo1tPW9jemcZ8BvBMfh/wCMGqSKizRRWTETPFgozMoGOTtb7w46jPrX0dfWq6lpt5aMocXEEkJVicHcpGOCD37GvE/hHeXem/E/xPod0WkUQpPDIWJ+XOQDk/7R577TXtyyEd+a/QcnbqYKDk7tnNW92eh8ofCDULPUtWmi40m6huArRL9xRn5gC+4nHPBJ+vevXPFF5ZxeNNFfS5FN4txGGuJpxsiUEHc3I468VwXxc+G9/wCBfFd34y0a2mvdDvJGnv7S35e0kb70gUAkxk/MSPu88Y6Z+s63a38NneWKwGOQhiy4YHHUjHXp1rwquGeFxF2uuh78JQxEVK/Q+vLmRJCwBDoenPUVwHjT4M+DPHXmSatodtLctk/aIwY5c+u5SDngd+1dB4We5k8Pae12++5aBWYj3GR+mK0pomZTiv0KE5WUlofKzgneL1PzS/ae+Gp+DHimBdOZ7nTb354/tJDMuO2QBnvXkFv4nSaZka38s5H8OfxyOf0r6a/bs8TQahrWjaYo33Fq8hfI+7wBg+h9vfNfJ775n+dgqZI2p35PUn3r0I4urTVmzKOX0ayvy29DrNN8SWVjCCzR4yTguAevQgit7T/GWn/MrRgnruhkDfyNeX3LhdyogJ5LYyc4qD+y1uI8LbuDn77D9a6I5lKPxo5quR05P3ZM9u0vxpDZzMyOyI3LDGDg+vNby+O3dVImKgZPzdP0r5zjt761UtFcyRjPRXJH5HrViDxZq1g5SRklH97GP5f4V3UswpVNGeNWyOrT+F3PpK38bXdxbhFnWQE7clvc+tZuhzSr4gilJ3PJhfXHP19v1rxGH4jX8SgiyUqPu5dgM+vArSh+KWrRSvKbWIA91YkLn866frlBdTiWU4mztH8T2XR9Nutf8RXN5cRSWVpFM3mTsxwVBOFQd2x/StT4wazB4q8N2drjyp/N/dDqSgVhx+Y6f1rya2+JuqXShTLDFGeNwBdvXge9El/PfXCTSzNKzHDMzA8kjA9hXg4nEqo7RPq8FgpU7OXQi0e1gurc20sS+cqMGbOPmB4P1+XH0PsK2pPBZWNYzJthUb8lckcZPf0IrPmkTS79ZApMZlwEzg7eTj+Y/CteXWmdYBnMcgVWT+8CAPz4rgleWp7cfd0MiTwrCrtuh2tk7iCB+PNSWfhF418xXbEny9eex/l2rrdV22s8crxsBMoHt0z6dfmH5VbufEFqscMFgizSIwYBRn8B6tjj+dZcz6G2j6FOH4Y3Utk7s6xIDy7DLE/if881zt34djW4MKyrhOrhcjOPXv8A/WrqNa8Tu0LWf+rdlJYKfm29znsMcfiea5S88RSizMNuAQg+XngcjP8AnpSi5A+UwNY0pFZow5VF67jz06n071xOtNGq7IR90n5sY3V295HL5LS3B/ddT2U4/p/OuVmsReSmT+FeFz2FddOfc5KkL7HsOg+IL2L4S6SttD+8Mawl06hRlfxPH61v6bJqEPgC9+1XaI7KuBIzFuWGBgZ7kjtXm3w68a23h1PsF+GFoHMkUuCQpxyMDnGe4rsPF3xI0W404WsE0LBtspCuDggZwfxBP+FfSYetCcU7nweNwtSFRxUdL3udt4T+y2awSXMrXMhQAHkBc9hn+lelt4qht9MMqMsRGcyMcYAHSvkdfi5LbyFbWNpQPlU52rnp1qG68WeIdcmED3/2dJGx5dsCzc9tx6fpWeIzDD095XDD5NiKzu42XmekeJ/G1ta3Esj3e5ctjc+PTGAeenPFcBqXjia+ZhaQFznhpFxjjGQueT3FV7HwWPtAedLiQscu0hy56fX17YrdPhctbB1g8rynAU4IbGAf/wBVeDVzqMtIOyPo6GTUqVnN3Z6R+yLYyeJfilpa3RWdYSXKSLkHAJ4H1FfopJhV9hxXxP8Asg+G/s/j5NRRcsiv5hyUbDLjOCMMueODkEj3r7QnmxnnFcftfa+/e53yhGDtFWGPJVeRyRimPNVaW49DzUAeHaV4lTxp8TNYupZlS10+U2dtHKcKFU/M31ZvXsRUnxe8YW91pP2JWF3M42Dy8uUJ4HPOOf5VzXjT4G+JbXxFql94Pv7a2i1Ny8v2mXYY8knAG1s9TgjB9egrpfhj8M7/AMEvNdanrH2+9uCDKsAdYzgcA7iS3JJ6Dk18VUyOtisY69R6XPdWKowoKC3O68E+F7LwToMenWiMpyZJnb70kpxuY/kPyp3jOyXWPDGpWWPmmt5FX2O04P4HFWGusd64H40eNL3wp4LubjT4Wlnm3Qeav/LIFT831zgDtkivtZxhRpNfZSPBbcpXPl3wXpM2taooS0t5I9uxbZowV3gncwAU8Y/GvoXSfDbWukSXDlrXZGU+zxlkjPB+Ygnpz/gK4/4M6NaabZ20zLJ9okQBdsmWPvnqc/lxXpPjS+/srRZ5VkIURFzzySAMD16mvx/HVpVKzhHbY74+Z88Sa5Y6fruozXDYt2ufLZQhLEjPbv0HX1713Pw/txca5d6lbwvDpf2cR2xcYLMzgscdjhQMD2rzTT9JW8tftcqu9xPclIlQncCTjdn1r2vw3pSaBo8NsB+9wGlYnJLY9fbp+FfZ5PgVLEKt/KYYiSXum95+B1qCa44qrJce9VZbivvrHnBcTdeay7i4681JPN6ms2eXrWhRHPN15rPmmPTNSzSVSlbrQhFCGTGKuwzGsqOTgVZjmq2iTWjl96t28nNZEM2auRy9KgEzchlq5HN71hx3FXI7jcKCzagn561fiuPeuegmO72rRim4603ZjNyK496uxXHvXPx3HTmr0NxWFibG2lx71PHLWMk3TmrcM1LlGil428G2fjjSntbn5JdpWKYdUzg/iMhePavkLxV4QvfBWozWtyvk3EYKy7QSkqkEKwz9R+XrX2tHJnvXlH7Q3hYal4XGqQITPbuEl2JndGfXHPBx+deXjcPzR9pHdb+aOujPlfK9jwv4LpA3iW6l3B5lSMhS2OCxBwe3Y19d6F4qt9N0cbdZt4olQgRs3mYbHACjqenfFfD2jahceG7q8ntI2jlmTYruuNnPTGec/wD1q7H4b+LoYdeLahfMHmbMzOcblA6AZHJ6fQ18DmuWvF1HVv7qtpuehCOp9K+GfFn2PxhLqDo32uOVV8psqWiAwfkz8uQW9Rx+NfR0cySKGRtytyCPSvlqHwrDqEMF/oWoQySRfvY4GVYy2OcEquSfbJ579a9M+E/xMOuRtpGpWtxp2qWuIxHcJjeuOCD9BXpZHWjTbodOnqGJpc0VKPQ9c80jGDiufvvAXhfVpjLeaBp88jNuZjbqpY+pxjP4/wBa0fOz3pfN96+15U90eWpNbGtDcLGAqgKqjAA4A9qvw3KtjmuXkvFgjZ3YIijLMTwAO9eVePPil4gsNMXWdGkaDQ1lFu7+Rl3cn5cMemecAdhz1rkxWMp4RJz3ey6s0p05VHZHz/8At0/2NL8SozpF/He36QAahaw/N9mlzgKx6biAMr1GOetfNi6HeS4dY22jkNnCj3r6rm03Wr2xW7S30+xt5N1xI918zncTySF3biQe+e5q34d+DreKszWmqLZtyZJorKPaW7nf1B9yCRjvXzdXPp9YqK9b/oexTw8aSs2fI0mkJZgtcSeaf7sY4+nHWm2slxqszw2Fk0jIfmbAUD6k9K+rvEXwT0uzkZNe1SeSGP8AemKRh87A4+dsnjnPGOtc94I8A3Pia4CWmmQ6dp8dwVWaSMKojBwSqHqTx8xHpzRDNYTi5vW33HRy6XWh86Xmh6lChMiRE46LIpOPpmseaxmjyskW3PYg1+gerfs5+H/7PDPCZpWXPm7gDkDkZAFeD/Eb4AzaRC0mn5lQndhmBBA/EkGtcLnVNy5ZaXMJxjLQ+co7edV2jbIjdl6/lUwAtvlIKZ+UKRnHbByOld/a+Czc2YkO1ZclWU4DKRxg+9ZGoaHJZbYbqLemc4x/nH/1q92GMp1JWT1MvY8qObt4XhcOh2p1x2B9/wDGur0PVI/MCywF3X/lkTznHr6f0NYv2X975SOAQTjPG4Y61oaPpN5/aEMduryuT8seMkHPQfiTXempIwtyu62N24JuJX7yRv8ALuGVPH6jn9KyWuhb3CPJJtKNjBPOAeg56mvQ4Phdr90sEf8AZs6ySMoXy8qW3ZHryK7Sw/ZH8VvYx3RtVVmU/Ixyw79M571UdFqRKS6HkN94qn1XT0PKlWJVm+bODk4Hvk/pVWxvXt42y+xupYkZA9Pb/PPr6rf/ALOHimO8jtY7KR2furD0zzWV4m/Z38X+H4I57zTpEil4DIQRn0OO/wDOmkhOR57NePeL5KuwjY8/7Xv6mrLJDp8ILr5j54UkMc+/v/Lmt+L4c65p2+P+yrgz7RndGwwCAwJ44GCD+IqQ/C3xPeWd1PFpk0UdsGZ3wcrjII54HI+vf6P1J5kef3l4dSuzAw4HLYPH/wBf61OlipchR8iAZG3p2xj/AD71Vjs5LRXL/u+SDwT0yf8AP1pt54lFvZRwBVM54+Tkgms5vpE1hbdmdrDJAojiwcdc1z7RtIyt5ayyk7SvJ2/XFav2WWaTzbhgsWMhV54/z+daXh3RDq2pxRRw4DNgR4C9c9T/AFrGVaNKLb6FSjzMPD3h97pVO3c2cDjv6V6j4W8KpFhWt/t9yRu2pIUVPTL9Px6citrwb8L9UvYS8AjysZTygw3DgcA/j061qz6XdeGZmt2srmCRCS/2hTyQOufQdvwxXx+Lx31huNOWp1wp6FzTfhXr95IJ7X+z9jN/qbh2xjjo5Uj+ldTHp1paR+Vr+kGySNgomDLJDIf99flB9jzXO+FdS1bWtQ2fbfItMmMsW5JBGeM4AH9a9v1D4OaxrnhWQaZe22rkx7mtLhfLL+mGDYyO2fevlsVKanGFRq/ldMPYyveTMn4P31vo/iMXOh2NxqOlK7REwkfuiw5AZsAjgdPQV9AW3ia21C4NurNDdAZNvONr/hzhvwJr5W8L/Eb/AIRK+m0u+iurSa0k8p7EIY2jP0YHHXr716Zqcs3i7R3e0WVjJHmKbBEsTDkOrEJyD3XmvVwmZYrAyVOavTfff7wlglJXPYZLg+tVpJvevPdC+Id0PLstUjDzRYie58xFctnGWjJBzz/CDnBPpXYNdBhwfyr9Aw9eliY89N3PKqUp0naSLE0mc1nXD9afJNVKaXg16EUczIpp9uea8S+PnjCdv7M8K6dELm91R9825wipCp53Htk/yPqK9B8beKo/DumtOR5krHZGhbaC2O57ADmvF/MbVdWk1O4Q3F22B5pUIqqOijHRR7n0zXgZvmVPCU3SWs2v6udeHw8q3vdDT07Vl8Hw2Vte6rexq4GIN2IUYDkhMH9ai+I/xGaHS447iJpLKdhGZowAZABnvwOnpzg1xviaGe+1NLjzySowyKvBUHOQc8fXH51qXGkSfEZrOAK1no0AH2idmy7kfwJ7n1x3+gr4TC4KWKqwaXM+vkdc3GndvRGt8PtFsL5I/EEau0Uhb7LHJ/Bzhj785AP412803Xmq0aw2dvFBbRLDbwoI4404CqBgAfhUMk1fq2Gw8cNSVOPQ8ScnUlzMkkmFVZpveoZp+tVHuPeuwgkmmqlNJRJN71WlkoAimaqc0lSTSetUJpetUMzopqsRyZrMVsVPHNVtMg1YpsVcjn96x45ganWbHepswRsRz1bhuPesKO496uQzdOao0Oghn96vQz8dawoJelW458d6hiZtRzYq5Bce9YSXHHWrMN105qWhnRRzfL1qxDce9YUd5x1q1Hde9SkB0EVxnvU88cV9byW88azQyqUeOQZVlPUEHqKx7afOK04XJFNoZ418UPgzIVF54ftTNbKMS2Kkll5yWTPX6dfTPbw7WvBeoWRknhtcRxYV2MZyjc8NxwTg9fSvt9cntUkNqk29JIldH4dWXIYeh9a8atl6lLnpO3l0OuniHFWlqeA/BvxAzWAjm8uKXI3ImxOB0AAwcfTkmvTPEepXlnNZatDmK4s5FO+ZjgJnkEZyBXN+Nv2erqO+/tXwhLHGxJL6fcSFET3jYDgf7J9Tg9qy/CXgPx7eaxHFrNtb6fp4kVnme4SWQqP4VVeuemWr5OplmKWI54I9iNejKF5SPqzTdQS+s4LiNg8cyLIrDuCAR/OrhlxXM6K0dpaR20C7I4VCKgPQD68/rWr5xr7eN7K+54MrX0OC+NGvCPTbPSI7gx3F7JlkzjMY7nBz1/PFcXpviaPTfC1x4c1SEXOiS5BWBcPG3ZgfYgH61R+PXiaLw/4tsLi4LREwfupscAjIIB5559B1HrmvLb/4q6UdP/e6raLKwJZGbYe56Y9+xr5fGYeeJrvmTstvkfXYGlTeGSlbXUq+OvEF94g8caTYWga3glmS3WMMWbZnpk9eMn8K+wY7q08O+GIpIYPLSKLOCcADuT6V8a/B+TTfFnxCXU7y8i+yWLb41+8zyHIGB0GOTn1x619HfGXxdY6b8O7zyp0UGJlViBu3bTgcev0r5vNaTlVhh4K1l+ZpWpxk0lsjxa+8UX/xm8aR+HNJwBM217nB2QRgjc7e2APxIHevoK3+DsvhzRY7yLxTc3U1ipZIbyKIowAJ2fKu4KSScEtgn8K+E/hD8TIfD/xCbzpFhgvAYmmLH5PmznjsTwc19n/8JgYtFd4J5HhdAPlY4I69M16lbCrCJUuXRo2o4eOIhdPqZVj8XbK+s1fzWS4ZSjwsDuQbsFSOPT3681a/4SSw1q1mDner5DLnk9SORx6fka+a/HWuQ/21M67YyzFvkK5z74qno/jSZd1u26ZWB+fIDfy5FeVUyyWs4Pc5a2GdN+67nq/iLTdDsZJnhm8gv94RnBk9jzk15hrs0V5duUtpFTbgGQHn68ZP5VPa6g91PJvON3Od446DnpXWeEfAl/491BNP0uxWUsQHnxiKFe7yMScgegOTXVhadSnJQ1kzz5WiryZ5lZeFbnULoLbLIjFwqRrndu5IAHXpX1v8H/2fbLw/pOl317Apv1IMyNkkEk5GfQZ/Ku+8G/B7w14Ns7RIrMXd7AMteTD55HznJA4xnoO3vXcg/MDjAHSvvaUXGCUtzyKtZSehLFHHbqAkaqF4GFFOaYt3NQNJTd9bpHJzFgSZbrU3ysuGAZfQjNU1b5qe0pA4NOxSYS2NrJuLQRsW65Uc0200uzjjnjMKNHIGBBHZs5GfxNN809KmtpBvGalplXPhH4ufCGTw94u1WC3tJFtYp38t2JbfGxypPHcHGfWvB73TBp98zMpXa21Sw6H39+1frFrmh6d4is5Le/tVuEkjMZJyCFJBxkc9QD+Ar56+MH7Jel+J9MkvNAZotVXkRzdJfbcB17AnPXmsZHTCqk7S2PiKO3EgO8SNuBOVGTjIA+n+Fd54JNlp1iDOSH80JjOAoJG4njPTH5GuX8R+Gtb8Eay1lqFlcWk8RwY5VIOPr0biorXxBAWZJXEJ6/vB715OKo1Zx5Oh3Rkt0z618BazpotVNk3mICN3y49hkc4J7GvR9a0fT/FmiSWs6M15937RIQduM8E8fL/Xpjmvi7wt4+uNDvvtFtdR/KORGBhl9CBjIr6Q+GnxZsfE1zE8c62t0+1ZIJpBtOO68c85468+gr89xuArYeTqx+86Yy1PIdN8THwh4ouNKvYniuLd9pt3UgjBLZ+hJz75FfR3w7+KMdk1nai5EsbRqCXO45Po38PUf/rry79oz4c3vij7J4i8O2ck+pxx7ZoxhfOiGQuO29eg5yR9K8OsNf8AEXgueNdY0+e1DHILAgcEg9Mg8ivUhRp5jSU6bXN1XW56cKaqq66H2t8cvh/H4+8PR61payN4htIcqY1G67iB5TjqRyRjvkd6+c/BHxbutAugJLpmgPyq/ORzyM4r379nf4sweOtNjszN5ZUh5E3g4PPQHnB9vWvmj9onTbbwT8WdasbXb9juHW/iWRcBPMG4gfRi3T1x2rHB03KUsJVWq28jW3KnFnvOi2lz8TvEEN/oZgV7W13T+YhLsWdQAcdhtPI6Zz6V2Ph3WLy01CfSdTJS6j5Cv1wehB7g/pXyp8M/jZc/D/VrfULCeM3GwxtHMpaKVD1Q4Ofx7V6xp3xkT4g/EbTby5istPjjHloscrSZPGAGwOc9AR688Zr2MLGrg6sWttvVHLWp+1hKLV1bTvc93aY45qrNN15qGS8G3NZ11ee9foET49s81+Ml1L51g0EjxzRnKbP7xIxz26fWuAt79rm8TT7aFr/U0XdJBB95enUj7o6dfz6V7Vq1jbasu25j3AdG7g+tYul+H7DwzZvDZxsA2NzuFBbAwOFAHQdTzXyuMyeeLxntZP3X+HQ9Whi4U6Di/iW3n6nG2vgmaZTJqkqwBh/x6WrEn6NIev4fnXQQxxWNukFuiwwxjCovQCrF1N8xrPml619LhcHRwseWlG35nk1Ks6jvJkzT+9QyT+9VXm21XkufeuuxmSzTe9VWlqGS496jZ/erEPkm96rSTcdaimmxmqsk3vSGh0stUpZOtPkk96qSNmqGf//Z" preserveAspectRatio="none" id="img0"></image><clipPath id="clip1"><rect x="0" y="0" width="2790825" height="2790825"/></clipPath></defs><g><g transform="scale(0.000104987 0.000104987)"><g clip-path="url(#clip1)"><use width="100%" height="100%" xlink:href="#img0" transform="scale(5450.83 5450.83)"></use></g></g></g></svg> | 5 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sdxl_turbo_perf_chart_static.svg | <svg width="2221" height="1115" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1828" y="523" width="2221" height="1115"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1828 -523)"><rect x="1829" y="524" width="2218" height="1112" fill="#FFFFFF"/><g><path d="M2522.58 794.5 2522.58 1341.5M3013.6 794.5 3013.6 1341.5M3504.62 794.5 3504.62 1341.5M3996.5 794.5 3996.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2149.07 1215.04 2202.07 1215.04 2202.07 1342 2149.07 1342ZM2640.09 1149.04 2693.09 1149.04 2693.09 1342 2640.09 1342ZM3132.1 1077.04 3185.1 1077.04 3185.1 1342 3132.1 1342ZM3623.12 993.033 3676.12 993.033 3676.12 1342 3623.12 1342Z" fill="#156082"/></g><g><path d="M2250.07 1192.04 2303.08 1192.04 2303.08 1342 2250.07 1342ZM2741.09 1112.04 2794.09 1112.04 2794.09 1342 2741.09 1342ZM3233.11 1026.03 3286.11 1026.03 3286.11 1342 3233.11 1342ZM3724.12 905.03 3777.12 905.03 3777.12 1342 3724.12 1342Z" fill="#E97132"/></g><g><path d="M2351.08 1181.04 2404.08 1181.04 2404.08 1342 2351.08 1342ZM2842.09 1096.04 2895.09 1096.04 2895.09 1342 2842.09 1342ZM3334.11 998.033 3387.11 998.033 3387.11 1342 3334.11 1342ZM3825.13 885.029 3878.13 885.029 3878.13 1342 3825.13 1342Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 547" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2030.5 1341.5)"/></g><g><path d="M2017.5 1341.5 2030.5 1341.5M2017.5 1263.54 2030.5 1263.54M2017.5 1185.54 2030.5 1185.54M2017.5 1107.54 2030.5 1107.54M2017.5 1028.53 2030.5 1028.53M2017.5 950.531 2030.5 950.531M2017.5 872.529 2030.5 872.529M2017.5 794.5 2030.5 794.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2030.5 1341.5 3996.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2187 1188)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2678.44 1122)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3169.87 1050)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3661.3 966)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2288.06 1164)">+19%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2779.49 1084)">+20%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3270.92 998)">+20%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3762.35 878)">+25%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2389.11 1153)">+27%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2880.54 1069)">+28%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3371.97 970)">+30%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3863.4 858)">+31%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1971.73 1353)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1971.73 1275)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 1197)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 1119)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 1041)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 962)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 884)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1950.83 806)">35</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2221.99 1399)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2713.42 1399)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3204.85 1399)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3696.28 1399)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1925.73 1182)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2882.18 1462)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2437.83 627)">SDXL TURBO (STATIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2327.6 716)">(A100</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2529.63 716)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2557.96 716)">SXM4</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2749.04 716)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2777 716)">80GB, 512X512, FP16)</text></g><g><rect x="2411" y="1558" width="22" height="23" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2442.88 1581)">Torch (Compile)</text></g><g><rect x="2770" y="1558" width="23" height="23" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2802.34 1581)">ORT_CUDA (Static)</text></g><g><rect x="3178" y="1558" width="22" height="23" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3209.93 1581)">ORT_TRT (Static)</text></g><rect x="1829.5" y="524.5" width="2218" height="1112" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 6 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sdxl_turbo_perf_chart_dynamic.svg | <svg width="2353" height="1120" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1864" y="521" width="2353" height="1120"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1864 -521)"><rect x="1865" y="522" width="2350" height="1117" fill="#FFFFFF"/><g><path d="M2591.59 792.5 2591.59 1344.5M3115.6 792.5 3115.6 1344.5M3640.62 792.5 3640.62 1344.5M4164.5 792.5 4164.5 1344.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2193.07 1293.04 2249.07 1293.04 2249.07 1345 2193.07 1345ZM2717.09 1201.04 2774.09 1201.04 2774.09 1345 2717.09 1345ZM3242.11 1218.04 3298.11 1218.04 3298.11 1345 3242.11 1345ZM3766.12 1069.04 3823.13 1069.04 3823.13 1345 3766.12 1345Z" fill="#156082"/></g><g><path d="M2301.08 1175.04 2357.08 1175.04 2357.08 1345 2301.08 1345ZM2825.09 1111.04 2882.09 1111.04 2882.09 1345 2825.09 1345ZM3350.11 1004.03 3406.11 1004.03 3406.11 1345 3350.11 1345ZM3874.13 899.03 3931.13 899.03 3931.13 1345 3874.13 1345Z" fill="#E97132"/></g><g><path d="M2408.08 1186.04 2465.08 1186.04 2465.08 1345 2408.08 1345ZM2933.1 1082.04 2990.1 1082.04 2990.1 1345 2933.1 1345ZM3457.11 1019.03 3514.12 1019.03 3514.12 1345 3457.11 1345ZM3982.13 842.028 4039.13 842.028 4039.13 1345 3982.13 1345Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 552" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2066.5 1344.5)"/></g><g><path d="M2053.5 1344.5 2066.5 1344.5M2053.5 1252.54 2066.5 1252.54M2053.5 1160.54 2066.5 1160.54M2053.5 1068.54 2066.5 1068.54M2053.5 976.532 2066.5 976.532M2053.5 884.529 2066.5 884.529M2053.5 792.5 2066.5 792.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2066.5 1344.5 4164.5 1344.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2232.73 1266)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2757.28 1174)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3281.82 1190)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3806.37 1041)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2340.59 1148)">+229%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2865.14 1084)">+63%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3389.69 977)">+168%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3914.23 872)">+61%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2448.45 1159)">+207%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2973 1054)">+83%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3497.55 991)">+157%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 4022.09 815)">+82%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2007.71 1356)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2007.71 1264)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1986.81 1172)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1986.81 1080)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1986.81 988)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1986.81 896)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1986.81 804)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2274.52 1402)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2799.07 1402)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3323.62 1402)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3848.16 1402)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1961.71 1183)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2984.39 1465)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2494.48 625)">SDXL TURBO (DYNAMIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2429.81 714)">(A100</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2631.84 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2660.17 714)">SXM4</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2851.25 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2879.2 714)">80GB, 512X512, FP16)</text></g><g><rect x="2483" y="1561" width="23" height="23" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2515.72 1584)">Torch (Eager)</text></g><g><rect x="2798" y="1561" width="23" height="23" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2830.51 1584)">ORT_CUDA (Dynamic)</text></g><g><rect x="3259" y="1561" width="23" height="23" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3291.14 1584)">ORT_TRT (Dynamic)</text></g><rect x="1865.5" y="522.5" width="2350" height="1117" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 7 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sd_turbo_rtx_perf_chart_static.svg | <svg width="2226" height="1117" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1847" y="521" width="2226" height="1117"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1847 -521)"><rect x="1848" y="522" width="2223" height="1114" fill="#FFFFFF"/><g><path d="M2542.58 792.5 2542.58 1341.5M3034.6 792.5 3034.6 1341.5M3527.62 792.5 3527.62 1341.5M4020.5 792.5 4020.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2168.07 1149.04 2221.07 1149.04 2221.07 1342 2168.07 1342ZM2661.09 1105.04 2714.09 1105.04 2714.09 1342 2661.09 1342ZM3153.1 1014.03 3207.11 1014.03 3207.11 1342 3153.1 1342ZM3646.12 982.032 3699.12 982.032 3699.12 1342 3646.12 1342Z" fill="#156082"/></g><g><path d="M2269.07 1116.04 2322.08 1116.04 2322.08 1342 2269.07 1342ZM2762.09 1051.03 2815.09 1051.03 2815.09 1342 2762.09 1342ZM3255.11 923.03 3308.11 923.03 3308.11 1342 3255.11 1342ZM3747.12 869.029 3801.12 869.029 3801.12 1342 3747.12 1342Z" fill="#E97132"/></g><g><path d="M2370.08 1121.04 2424.08 1121.04 2424.08 1342 2370.08 1342ZM2863.09 1075.04 2917.1 1075.04 2917.1 1342 2863.09 1342ZM3356.11 912.03 3409.11 912.03 3409.11 1342 3356.11 1342ZM3849.13 865.028 3902.13 865.028 3902.13 1342 3849.13 1342Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 549" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2049.5 1341.5)"/></g><g><path d="M2036.5 1341.5 2049.5 1341.5M2036.5 1263.54 2049.5 1263.54M2036.5 1184.54 2049.5 1184.54M2036.5 1106.54 2049.5 1106.54M2036.5 1027.53 2049.5 1027.53M2036.5 949.531 2049.5 949.531M2036.5 870.529 2049.5 870.529M2036.5 792.5 2049.5 792.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2049.5 1341.5 4020.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2206.18 1121)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2698.93 1077)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3191.68 986)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3684.43 955)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2307.51 1088)">+17%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2800.26 1024)">+23%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3293.01 895)">+28%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3785.75 842)">+31%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2408.83 1093)">+15%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2901.58 1048)">+13%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3394.33 884)">+31%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3887.08 837)">+33%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1990.52 1353)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1990.52 1275)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 1196)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 1118)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 1040)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 961)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 883)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 1969.62 804)">35</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2241.44 1399)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2734.19 1399)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3226.94 1399)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3719.69 1399)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 1944.52 1181)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2903.61 1462)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2500.62 625)">SD TURBO (STATIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2488.02 714)">(RTX</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2644.51 714)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2672.47 714)">4090, 512X512, FP16)</text></g><g><rect x="2432" y="1558" width="23" height="23" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2464.31 1581)">Torch (Compile)</text></g><g><rect x="2792" y="1558" width="22" height="23" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2823.77 1581)">ORT_CUDA (Static)</text></g><g><rect x="3199" y="1558" width="23" height="23" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3231.36 1581)">ORT_TRT (Static)</text></g><rect x="1848.5" y="522.5" width="2223" height="1114" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 8 |
0 | hf_public_repos/blog/assets | hf_public_repos/blog/assets/sdxl_ort_inference/sd_turbo_a100_perf_chart_dynamic.svg | <svg width="2351" height="1115" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" overflow="hidden"><defs><clipPath id="clip0"><rect x="1908" y="523" width="2351" height="1115"/></clipPath></defs><g clip-path="url(#clip0)" transform="translate(-1908 -523)"><rect x="1909" y="524" width="2348" height="1112" fill="#FFFFFF"/><g><path d="M2634.59 794.5 2634.59 1341.5M3158.6 794.5 3158.6 1341.5M3682.62 794.5 3682.62 1341.5M4206.5 794.5 4206.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2236.07 1259.04 2293.08 1259.04 2293.08 1342 2236.07 1342ZM2760.09 1159.04 2817.09 1159.04 2817.09 1342 2760.09 1342ZM3284.11 1168.04 3341.11 1168.04 3341.11 1342 3284.11 1342ZM3808.12 1046.03 3865.13 1046.03 3865.13 1342 3808.12 1342Z" fill="#156082"/></g><g><path d="M2344.08 1131.04 2400.08 1131.04 2400.08 1342 2344.08 1342ZM2868.09 1021.03 2924.1 1021.03 2924.1 1342 2868.09 1342ZM3392.11 960.031 3448.11 960.031 3448.11 1342 3392.11 1342ZM3916.13 830.027 3972.13 830.027 3972.13 1342 3916.13 1342Z" fill="#E97132"/></g><g><path d="M2452.08 1157.04 2508.08 1157.04 2508.08 1342 2452.08 1342ZM2975.1 1081.04 3032.1 1081.04 3032.1 1342 2975.1 1342ZM3499.11 984.032 3556.12 984.032 3556.12 1342 3499.11 1342ZM4023.13 882.029 4080.13 882.029 4080.13 1342 4023.13 1342Z" fill="#196B24"/></g><g><path d="M0 0 0.000360892 547" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd" transform="matrix(1 0 0 -1 2110.5 1341.5)"/></g><g><path d="M2097.5 1341.5 2110.5 1341.5M2097.5 1263.54 2110.5 1263.54M2097.5 1185.54 2110.5 1185.54M2097.5 1107.54 2110.5 1107.54M2097.5 1028.53 2110.5 1028.53M2097.5 950.531 2110.5 950.531M2097.5 872.529 2110.5 872.529M2097.5 794.5 2110.5 794.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g><g><path d="M2110.5 1341.5 4206.5 1341.5" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none" fill-rule="evenodd"/></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2276.1 1231)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2800.08 1131)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3324.05 1141)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3848.02 1019)">+0%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2383.85 1103)">+155%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2907.82 994)">+75%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3431.79 933)">+120%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3955.77 803)">+73%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2491.59 1130)">+123%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3015.56 1053)">+43%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 3539.54 956)">+106%</text></g><g><text fill="#7F7F7F" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(6.12323e-17 -1 1 6.12323e-17 4063.51 854)">+56%</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2051.25 1353)">0</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2051.25 1275)">5</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1197)">10</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1119)">15</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 1041)">20</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 962)">25</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 884)">30</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2030.35 806)">35</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2317.78 1399)">(1, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 2841.75 1399)">(4, 4)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3365.73 1399)">(1, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="37" transform="matrix(1 0 0 1 3889.7 1399)">(4, 1)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(6.12323e-17 -1 1 6.12323e-17 2005.25 1182)">IMAGES / SEC</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3026.78 1462)">(BATCH, STEPS)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2578.24 627)">SD TURBO (DYNAMIC SHAPE)</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2472.2 716)">(A100</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2674.24 716)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2702.56 716)">SXM4</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2893.64 716)">-</text></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="700" font-stretch="semi-condensed" font-size="73" transform="matrix(1 0 0 1 2921.6 716)">80GB, 512X512, FP16)</text></g><g><rect x="2526" y="1558" width="23" height="23" fill="#156082"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2558.12 1581)">Torch (Eager)</text></g><g><rect x="2841" y="1558" width="22" height="23" fill="#E97132"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 2872.9 1581)">ORT_CUDA (Dynamic)</text></g><g><rect x="3301" y="1558" width="23" height="23" fill="#196B24"/></g><g><text fill="#595959" font-family="Aptos Narrow,Aptos Narrow_MSFontService,sans-serif" font-weight="400" font-stretch="semi-condensed" font-size="41" transform="matrix(1 0 0 1 3333.53 1581)">ORT_TRT (Dynamic)</text></g><rect x="1909.5" y="524.5" width="2348" height="1112" stroke="#D9D9D9" stroke-width="3.4375" stroke-linejoin="round" stroke-miterlimit="10" fill="none"/></g></svg> | 9 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api.py | import os
from typing import Dict, List
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS, get_pipeline
# Must contain at least one example of each implemented pipeline
# Tests do not check the actual values of the model output, so small dummy
# models are recommended for faster tests.
TESTABLE_MODELS: Dict[str, List[str]] = {
"audio-classification": [
# Language Identification
"speechbrain/lang-id-commonlanguage_ecapa",
# Command recognition
"speechbrain/google_speech_command_xvector",
# Speaker recognition
"speechbrain/spkrec-xvect-voxceleb",
],
"audio-to-audio": [
# Speech Enhancement
"speechbrain/mtl-mimic-voicebank",
# Source separation
"speechbrain/sepformer-wham",
],
"automatic-speech-recognition": [
# ASR with EncoderASR
"speechbrain/asr-wav2vec2-commonvoice-fr",
# ASR with EncoderDecoderASR
"speechbrain/asr-crdnn-commonvoice-it",
# ASR with WhisperASR
"speechbrain/asr-whisper-large-v2-commonvoice-fr",
],
"text-to-speech": [
"speechbrain/tts-tacotron2-ljspeech",
"speechbrain/tts-fastspeech2-ljspeech",
],
"text2text-generation": [
# SoundChoice G2P
"speechbrain/soundchoice-g2p"
],
}
ALL_TASKS = {
"audio-classification",
"audio-to-audio",
"automatic-speech-recognition",
"audio-source-separation",
"image-classification",
"question-answering",
"text-generation",
"text-to-speech",
}
class PipelineTestCase(TestCase):
@skipIf(
os.path.dirname(os.path.dirname(__file__)).endswith("common"),
"common is a special case",
)
def test_has_at_least_one_task_enabled(self):
self.assertGreater(
len(ALLOWED_TASKS.keys()), 0, "You need to implement at least one task"
)
def test_unsupported_tasks(self):
unsupported_tasks = ALL_TASKS - ALLOWED_TASKS.keys()
for unsupported_task in unsupported_tasks:
with self.subTest(msg=unsupported_task, task=unsupported_task):
with self.assertRaises(EnvironmentError):
get_pipeline(unsupported_task, model_id="XX")
| 0 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_audio_classification.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"audio-classification" not in ALLOWED_TASKS,
"audio-classification not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["audio-classification"]]
)
class AudioClassificationTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "audio-classification"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def read(self, filename: str) -> bytes:
dirname = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(dirname, "samples", filename)
with open(filename, "rb") as f:
bpayload = f.read()
return bpayload
def test_simple(self):
bpayload = self.read("sample1.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_malformed_audio(self):
bpayload = self.read("malformed.flac")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(response.content, b'{"error":"Malformed soundfile"}')
def test_dual_channel_audiofile(self):
bpayload = self.read("sample1_dual.ogg")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
def test_webm_audiofile(self):
bpayload = self.read("sample1.webm")
with TestClient(self.app) as client:
response = client.post("/", data=bpayload)
self.assertEqual(
response.status_code,
200,
)
content = json.loads(response.content)
self.assertEqual(type(content), list)
self.assertEqual(type(content[0]), dict)
self.assertEqual(
set(k for el in content for k in el.keys()),
{"label", "score"},
)
| 1 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_text_to_speech.py | import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in ALLOWED_TASKS,
"text-to-speech not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text-to-speech"]]
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content, 16000)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
| 2 |
0 | hf_public_repos/api-inference-community/docker_images/speechbrain | hf_public_repos/api-inference-community/docker_images/speechbrain/tests/test_api_text2text_generation.py | import json
import os
from unittest import TestCase, skipIf
from app.main import ALLOWED_TASKS
from parameterized import parameterized_class
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text2text-generation" not in ALLOWED_TASKS,
"text2text-generation not implemented",
)
@parameterized_class(
[{"model_id": model_id} for model_id in TESTABLE_MODELS["text2text-generation"]]
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = self.model_id
os.environ["TASK"] = "text2text-generation"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post(
"/",
json={
"inputs": "English is tough. It can be understood "
"through thorough thought though."
},
)
self.assertEqual(
response.status_code,
200,
)
result = json.loads(response.content)
self.assertEqual(type(result), list)
self.assertEqual(
"IH-NG-G-L-IH-SH- -IH-Z- -T-AH-F- -IH-T- -K-AE-N- -B-IY- -"
"AH-N-D-ER-S-T-UH-D- -TH-R-UW- -TH-ER-OW- -TH-AO-T- -DH-OW",
result[0]["generated_text"],
)
| 3 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/requirements.txt | starlette==0.27.0
api-inference-community==0.0.23
huggingface_hub==0.5.1
stanza==1.3.0
| 4 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/Dockerfile | FROM tiangolo/uvicorn-gunicorn:python3.8
LABEL maintainer="me <[email protected]>"
# Add any system dependency here
# RUN apt-get update -y && apt-get install libXXX -y
COPY ./requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt
COPY ./prestart.sh /app/
# Most DL models are quite large in terms of memory, using workers is a HUGE
# slowdown because of the fork and GIL with python.
# Using multiple pods seems like a better default strategy.
# Feel free to override if it does not make sense for your library.
ARG max_workers=1
ENV MAX_WORKERS=$max_workers
ENV HUGGINGFACE_HUB_CACHE=/data
# Necessary on GPU environment docker.
# TIMEOUT env variable is used by nvcr.io/nvidia/pytorch:xx for another purpose
# rendering TIMEOUT defined by uvicorn impossible to use correctly
# We're overriding it to be renamed UVICORN_TIMEOUT
# UVICORN_TIMEOUT is a useful variable for very large models that take more
# than 30s (the default) to load in memory.
# If UVICORN_TIMEOUT is too low, uvicorn will simply never loads as it will
# kill workers all the time before they finish.
RUN sed -i 's/TIMEOUT/UVICORN_TIMEOUT/g' /gunicorn_conf.py
COPY ./app /app/app
| 5 |
0 | hf_public_repos/api-inference-community/docker_images | hf_public_repos/api-inference-community/docker_images/stanza/prestart.sh | python app/main.py
| 6 |
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/app/main.py | import functools
import logging
import os
from typing import Dict, Type
from api_inference_community.routes import pipeline_route, status_ok
from app.pipelines import Pipeline, TokenClassificationPipeline
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.gzip import GZipMiddleware
from starlette.routing import Route
TASK = os.getenv("TASK")
MODEL_ID = os.getenv("MODEL_ID")
logger = logging.getLogger(__name__)
# Add the allowed tasks
# Supported tasks are:
# - text-generation
# - text-classification
# - token-classification
# - translation
# - summarization
# - automatic-speech-recognition
# - ...
# For instance
# from app.pipelines import AutomaticSpeechRecognitionPipeline
# ALLOWED_TASKS = {"automatic-speech-recognition": AutomaticSpeechRecognitionPipeline}
# You can check the requirements and expectations of each pipelines in their respective
# directories. Implement directly within the directories.
ALLOWED_TASKS: Dict[str, Type[Pipeline]] = {
"token-classification": TokenClassificationPipeline
}
@functools.lru_cache()
def get_pipeline() -> Pipeline:
task = os.environ["TASK"]
model_id = os.environ["MODEL_ID"]
if task not in ALLOWED_TASKS:
raise EnvironmentError(f"{task} is not a valid pipeline for model : {model_id}")
return ALLOWED_TASKS[task](model_id)
routes = [
Route("/{whatever:path}", status_ok),
Route("/{whatever:path}", pipeline_route, methods=["POST"]),
]
middleware = [Middleware(GZipMiddleware, minimum_size=1000)]
if os.environ.get("DEBUG", "") == "1":
from starlette.middleware.cors import CORSMiddleware
middleware.append(
Middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
)
)
app = Starlette(routes=routes, middleware=middleware)
@app.on_event("startup")
async def startup_event():
logger = logging.getLogger("uvicorn.access")
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))
logger.handlers = [handler]
# Link between `api-inference-community` and framework code.
app.get_pipeline = get_pipeline
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
if __name__ == "__main__":
try:
get_pipeline()
except Exception:
# We can fail so we can show exception later.
pass
| 7 |
0 | hf_public_repos/api-inference-community/docker_images/stanza | hf_public_repos/api-inference-community/docker_images/stanza/app/batch.py | #!/usr/bin/env python
import os
from api_inference_community.batch import batch
from app.main import get_pipeline
DATASET_NAME = os.getenv("DATASET_NAME")
DATASET_CONFIG = os.getenv("DATASET_CONFIG", None)
DATASET_SPLIT = os.getenv("DATASET_SPLIT")
DATASET_COLUMN = os.getenv("DATASET_COLUMN")
USE_GPU = os.getenv("USE_GPU", "0").lower() in {"1", "true"}
TOKEN = os.getenv("TOKEN")
REPO_ID = os.getenv("REPO_ID")
TASK = os.getenv("TASK")
if __name__ == "__main__":
batch(
dataset_name=DATASET_NAME,
dataset_config=DATASET_CONFIG,
dataset_split=DATASET_SPLIT,
dataset_column=DATASET_COLUMN,
token=TOKEN,
repo_id=REPO_ID,
use_gpu=USE_GPU,
pipeline=get_pipeline(),
task=TASK,
)
| 8 |
0 | hf_public_repos/api-inference-community/docker_images/stanza/app | hf_public_repos/api-inference-community/docker_images/stanza/app/pipelines/base.py | from abc import ABC, abstractmethod
from typing import Any
class Pipeline(ABC):
@abstractmethod
def __init__(self, model_id: str):
raise NotImplementedError("Pipelines should implement an __init__ method")
@abstractmethod
def __call__(self, inputs: Any) -> Any:
raise NotImplementedError("Pipelines should implement a __call__ method")
class PipelineException(Exception):
pass
| 9 |
0 | hf_public_repos/autotrain-advanced/src | hf_public_repos/autotrain-advanced/src/autotrain/project.py | """
Copyright 2023 The HuggingFace Team
"""
import os
from dataclasses import dataclass
from typing import Union
from autotrain.backends.base import AVAILABLE_HARDWARE
from autotrain.backends.endpoints import EndpointsRunner
from autotrain.backends.local import LocalRunner
from autotrain.backends.ngc import NGCRunner
from autotrain.backends.nvcf import NVCFRunner
from autotrain.backends.spaces import SpaceRunner
from autotrain.dataset import (
AutoTrainDataset,
AutoTrainImageClassificationDataset,
AutoTrainImageRegressionDataset,
AutoTrainObjectDetectionDataset,
AutoTrainVLMDataset,
)
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
def tabular_munge_data(params, local):
if isinstance(params.target_columns, str):
col_map_label = [params.target_columns]
else:
col_map_label = params.target_columns
task = params.task
if task == "classification" and len(col_map_label) > 1:
task = "tabular_multi_label_classification"
elif task == "classification" and len(col_map_label) == 1:
task = "tabular_multi_class_classification"
elif task == "regression" and len(col_map_label) > 1:
task = "tabular_multi_column_regression"
elif task == "regression" and len(col_map_label) == 1:
task = "tabular_single_column_regression"
else:
raise Exception("Please select a valid task.")
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
task=task,
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={"id": params.id_column, "label": col_map_label},
valid_data=[valid_data_path] if valid_data_path is not None else None,
percent_valid=None, # TODO: add to UI
local=local,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.id_column = "autotrain_id"
if len(col_map_label) == 1:
params.target_columns = ["autotrain_label"]
else:
params.target_columns = [f"autotrain_label_{i}" for i in range(len(col_map_label))]
return params
def llm_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
col_map = {"text": params.text_column}
if params.rejected_text_column is not None:
col_map["rejected_text"] = params.rejected_text_column
if params.prompt_text_column is not None:
col_map["prompt"] = params.prompt_text_column
dset = AutoTrainDataset(
train_data=[train_data_path],
task="lm_training",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping=col_map,
valid_data=[valid_data_path] if valid_data_path is not None else None,
percent_valid=None, # TODO: add to UI
local=local,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = None
params.text_column = "autotrain_text"
params.rejected_text_column = "autotrain_rejected_text"
params.prompt_text_column = "autotrain_prompt"
return params
def seq2seq_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
task="seq2seq",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={"text": params.text_column, "label": params.target_column},
valid_data=[valid_data_path] if valid_data_path is not None else None,
percent_valid=None, # TODO: add to UI
local=local,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.text_column = "autotrain_text"
params.target_column = "autotrain_label"
return params
def text_clf_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="text_multi_class_classification",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={"text": params.text_column, "label": params.target_column},
percent_valid=None, # TODO: add to UI
local=local,
convert_to_class_label=True,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.text_column = "autotrain_text"
params.target_column = "autotrain_label"
return params
def text_reg_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="text_single_column_regression",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={"text": params.text_column, "label": params.target_column},
percent_valid=None, # TODO: add to UI
local=local,
convert_to_class_label=False,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.text_column = "autotrain_text"
params.target_column = "autotrain_label"
return params
def token_clf_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="text_token_classification",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={"text": params.tokens_column, "label": params.tags_column},
percent_valid=None, # TODO: add to UI
local=local,
convert_to_class_label=True,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.tokens_column = "autotrain_text"
params.tags_column = "autotrain_label"
return params
def img_clf_munge_data(params, local):
train_data_path = f"{params.data_path}/{params.train_split}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}"
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainImageClassificationDataset(
train_data=train_data_path,
valid_data=valid_data_path,
token=params.token,
project_name=params.project_name,
username=params.username,
local=local,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.image_column = "autotrain_image"
params.target_column = "autotrain_label"
return params
def img_obj_detect_munge_data(params, local):
train_data_path = f"{params.data_path}/{params.train_split}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}"
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainObjectDetectionDataset(
train_data=train_data_path,
valid_data=valid_data_path,
token=params.token,
project_name=params.project_name,
username=params.username,
local=local,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.image_column = "autotrain_image"
params.objects_column = "autotrain_objects"
return params
def sent_transformers_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="sentence_transformers",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={
"sentence1": params.sentence1_column,
"sentence2": params.sentence2_column,
"sentence3": params.sentence3_column,
"target": params.target_column,
},
percent_valid=None, # TODO: add to UI
local=local,
convert_to_class_label=True if params.trainer == "pair_class" else False,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.sentence1_column = "autotrain_sentence1"
params.sentence2_column = "autotrain_sentence2"
params.sentence3_column = "autotrain_sentence3"
params.target_column = "autotrain_target"
return params
def img_reg_munge_data(params, local):
train_data_path = f"{params.data_path}/{params.train_split}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}"
else:
valid_data_path = None
if os.path.isdir(train_data_path):
dset = AutoTrainImageRegressionDataset(
train_data=train_data_path,
valid_data=valid_data_path,
token=params.token,
project_name=params.project_name,
username=params.username,
local=local,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.image_column = "autotrain_image"
params.target_column = "autotrain_label"
return params
def vlm_munge_data(params, local):
train_data_path = f"{params.data_path}/{params.train_split}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
col_map = {"text": params.text_column}
if params.prompt_text_column is not None:
col_map["prompt"] = params.prompt_text_column
dset = AutoTrainVLMDataset(
train_data=train_data_path,
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping=col_map,
valid_data=valid_data_path if valid_data_path is not None else None,
percent_valid=None, # TODO: add to UI
local=local,
)
params.data_path = dset.prepare()
params.text_column = "autotrain_text"
params.image_column = "autotrain_image"
params.prompt_text_column = "autotrain_prompt"
return params
def ext_qa_munge_data(params, local):
exts = ["csv", "jsonl"]
ext_to_use = None
for ext in exts:
path = f"{params.data_path}/{params.train_split}.{ext}"
if os.path.exists(path):
ext_to_use = ext
break
train_data_path = f"{params.data_path}/{params.train_split}.{ext_to_use}"
if params.valid_split is not None:
valid_data_path = f"{params.data_path}/{params.valid_split}.{ext_to_use}"
else:
valid_data_path = None
if os.path.exists(train_data_path):
dset = AutoTrainDataset(
train_data=[train_data_path],
valid_data=[valid_data_path] if valid_data_path is not None else None,
task="text_extractive_question_answering",
token=params.token,
project_name=params.project_name,
username=params.username,
column_mapping={
"text": params.text_column,
"question": params.question_column,
"answer": params.answer_column,
},
percent_valid=None, # TODO: add to UI
local=local,
convert_to_class_label=True,
ext=ext_to_use,
)
params.data_path = dset.prepare()
params.valid_split = "validation"
params.text_column = "autotrain_text"
params.question_column = "autotrain_question"
params.answer_column = "autotrain_answer"
return params
@dataclass
class AutoTrainProject:
"""
A class to train an AutoTrain project
Attributes
----------
params : Union[
LLMTrainingParams,
TextClassificationParams,
TabularParams,
Seq2SeqParams,
ImageClassificationParams,
TextRegressionParams,
ObjectDetectionParams,
TokenClassificationParams,
SentenceTransformersParams,
ImageRegressionParams,
ExtractiveQuestionAnsweringParams,
VLMTrainingParams,
]
The parameters for the AutoTrain project.
backend : str
The backend to be used for the AutoTrain project. It should be one of the following:
- local
- spaces-a10g-large
- spaces-a10g-small
- spaces-a100-large
- spaces-t4-medium
- spaces-t4-small
- spaces-cpu-upgrade
- spaces-cpu-basic
- spaces-l4x1
- spaces-l4x4
- spaces-l40sx1
- spaces-l40sx4
- spaces-l40sx8
- spaces-a10g-largex2
- spaces-a10g-largex4
process : bool
Flag to indicate if the params and dataset should be processed. If your data format is not AutoTrain-readable, set it to True. Set it to True when in doubt. Defaults to False.
Methods
-------
__post_init__():
Validates the backend attribute.
create():
Creates a runner based on the backend and initializes the AutoTrain project.
"""
params: Union[
LLMTrainingParams,
TextClassificationParams,
TabularParams,
Seq2SeqParams,
ImageClassificationParams,
TextRegressionParams,
ObjectDetectionParams,
TokenClassificationParams,
SentenceTransformersParams,
ImageRegressionParams,
ExtractiveQuestionAnsweringParams,
VLMTrainingParams,
]
backend: str
process: bool = False
def __post_init__(self):
self.local = self.backend.startswith("local")
if self.backend not in AVAILABLE_HARDWARE:
raise ValueError(f"Invalid backend: {self.backend}")
def _process_params_data(self):
if isinstance(self.params, LLMTrainingParams):
return llm_munge_data(self.params, self.local)
elif isinstance(self.params, ExtractiveQuestionAnsweringParams):
return ext_qa_munge_data(self.params, self.local)
elif isinstance(self.params, ImageClassificationParams):
return img_clf_munge_data(self.params, self.local)
elif isinstance(self.params, ImageRegressionParams):
return img_reg_munge_data(self.params, self.local)
elif isinstance(self.params, ObjectDetectionParams):
return img_obj_detect_munge_data(self.params, self.local)
elif isinstance(self.params, SentenceTransformersParams):
return sent_transformers_munge_data(self.params, self.local)
elif isinstance(self.params, Seq2SeqParams):
return seq2seq_munge_data(self.params, self.local)
elif isinstance(self.params, TabularParams):
return tabular_munge_data(self.params, self.local)
elif isinstance(self.params, TextClassificationParams):
return text_clf_munge_data(self.params, self.local)
elif isinstance(self.params, TextRegressionParams):
return text_reg_munge_data(self.params, self.local)
elif isinstance(self.params, TokenClassificationParams):
return token_clf_munge_data(self.params, self.local)
elif isinstance(self.params, VLMTrainingParams):
return vlm_munge_data(self.params, self.local)
else:
raise Exception("Invalid params class")
def create(self):
if self.process:
self.params = self._process_params_data()
if self.backend.startswith("local"):
runner = LocalRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith("spaces-"):
runner = SpaceRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith("ep-"):
runner = EndpointsRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith("ngc-"):
runner = NGCRunner(params=self.params, backend=self.backend)
return runner.create()
elif self.backend.startswith("nvcf-"):
runner = NVCFRunner(params=self.params, backend=self.backend)
return runner.create()
else:
raise NotImplementedError
| 0 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/training_api.py | import asyncio
import os
import signal
import sys
from contextlib import asynccontextmanager
from fastapi import FastAPI
from autotrain import logger
from autotrain.app.db import AutoTrainDB
from autotrain.app.utils import get_running_jobs, kill_process_by_pid
from autotrain.utils import run_training
HF_TOKEN = os.environ.get("HF_TOKEN")
AUTOTRAIN_USERNAME = os.environ.get("AUTOTRAIN_USERNAME")
PROJECT_NAME = os.environ.get("PROJECT_NAME")
TASK_ID = int(os.environ.get("TASK_ID"))
PARAMS = os.environ.get("PARAMS")
DATA_PATH = os.environ.get("DATA_PATH")
MODEL = os.environ.get("MODEL")
DB = AutoTrainDB("autotrain.db")
def graceful_exit(signum, frame):
"""
Handles the SIGTERM signal to perform cleanup and exit the program gracefully.
Args:
signum (int): The signal number.
frame (FrameType): The current stack frame (or None).
Logs a message indicating that SIGTERM was received and then exits the program with status code 0.
"""
logger.info("SIGTERM received. Performing cleanup...")
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
class BackgroundRunner:
"""
A class to handle background running tasks.
Methods
-------
run_main():
Continuously checks for running jobs and shuts down the server if no jobs are found.
"""
async def run_main(self):
while True:
running_jobs = get_running_jobs(DB)
if not running_jobs:
logger.info("No running jobs found. Shutting down the server.")
kill_process_by_pid(os.getpid())
await asyncio.sleep(30)
runner = BackgroundRunner()
@asynccontextmanager
async def lifespan(app: FastAPI):
"""
Manages the lifespan of the FastAPI application.
This function is responsible for starting the training process and
managing a background task runner. It logs the process ID of the
training job, adds the job to the database, and ensures the background
task is properly cancelled when the application shuts down.
Args:
app (FastAPI): The FastAPI application instance.
Yields:
None: This function is a generator that yields control back to the
FastAPI application lifecycle.
"""
process_pid = run_training(params=PARAMS, task_id=TASK_ID)
logger.info(f"Started training with PID {process_pid}")
DB.add_job(process_pid)
task = asyncio.create_task(runner.run_main())
yield
task.cancel()
try:
await task
except asyncio.CancelledError:
logger.info("Background runner task cancelled.")
api = FastAPI(lifespan=lifespan)
logger.info(f"AUTOTRAIN_USERNAME: {AUTOTRAIN_USERNAME}")
logger.info(f"PROJECT_NAME: {PROJECT_NAME}")
logger.info(f"TASK_ID: {TASK_ID}")
logger.info(f"DATA_PATH: {DATA_PATH}")
logger.info(f"MODEL: {MODEL}")
@api.get("/")
async def root():
return "Your model is being trained..."
@api.get("/health")
async def health():
return "OK"
| 1 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/utils.py | import os
import signal
import sys
import psutil
import requests
from autotrain import config, logger
def graceful_exit(signum, frame):
logger.info("SIGTERM received. Performing cleanup...")
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
def get_running_jobs(db):
"""
Retrieves and manages running jobs from the database.
This function fetches the list of running jobs from the provided database object.
For each running job, it checks the process status. If the status is "completed",
"error", or "zombie", it attempts to kill the process and remove the job from the
database. After processing, it fetches and returns the updated list of running jobs.
Args:
db: A database object that provides methods to get and delete running jobs.
Returns:
list: An updated list of running jobs from the database.
"""
running_jobs = db.get_running_jobs()
if running_jobs:
for _pid in running_jobs:
proc_status = get_process_status(_pid)
proc_status = proc_status.strip().lower()
if proc_status in ("completed", "error", "zombie"):
logger.info(f"Killing PID: {_pid}")
try:
kill_process_by_pid(_pid)
except Exception as e:
logger.info(f"Error while killing process: {e}")
logger.info(f"Process {_pid} is already completed. Skipping...")
db.delete_job(_pid)
running_jobs = db.get_running_jobs()
return running_jobs
def get_process_status(pid):
"""
Retrieve the status of a process given its PID.
Args:
pid (int): The process ID of the process to check.
Returns:
str: The status of the process. If the process does not exist, returns "Completed".
Raises:
psutil.NoSuchProcess: If no process with the given PID is found.
"""
try:
process = psutil.Process(pid)
proc_status = process.status()
return proc_status
except psutil.NoSuchProcess:
logger.info(f"No process found with PID: {pid}")
return "Completed"
def kill_process_by_pid(pid):
"""
Kill a process by its PID (Process ID).
This function attempts to terminate a process with the given PID using the SIGTERM signal.
It logs the outcome of the operation, whether successful or not.
Args:
pid (int): The Process ID of the process to be terminated.
Raises:
ProcessLookupError: If no process with the given PID is found.
Exception: If an error occurs while attempting to send the SIGTERM signal.
"""
try:
os.kill(pid, signal.SIGTERM)
logger.info(f"Sent SIGTERM to process with PID {pid}")
except ProcessLookupError:
logger.error(f"No process found with PID {pid}")
except Exception as e:
logger.error(f"Failed to send SIGTERM to process with PID {pid}: {e}")
def token_verification(token):
"""
Verifies the provided token with the Hugging Face API and retrieves user information.
Args:
token (str): The token to be verified. It can be either an OAuth token (starting with "hf_oauth")
or a regular token (starting with "hf_").
Returns:
dict: A dictionary containing user information with the following keys:
- id (str): The user ID.
- name (str): The user's preferred username.
- orgs (list): A list of organizations the user belongs to.
Raises:
Exception: If the Hugging Face Hub is unreachable or the token is invalid.
"""
if token.startswith("hf_oauth"):
_api_url = config.HF_API + "/oauth/userinfo"
_err_msg = "/oauth/userinfo"
else:
_api_url = config.HF_API + "/api/whoami-v2"
_err_msg = "/api/whoami-v2"
headers = {}
cookies = {}
if token.startswith("hf_"):
headers["Authorization"] = f"Bearer {token}"
else:
cookies = {"token": token}
try:
response = requests.get(
_api_url,
headers=headers,
cookies=cookies,
timeout=3,
)
except (requests.Timeout, ConnectionError) as err:
logger.error(f"Failed to request {_err_msg} - {repr(err)}")
raise Exception(f"Hugging Face Hub ({_err_msg}) is unreachable, please try again later.")
if response.status_code != 200:
logger.error(f"Failed to request {_err_msg} - {response.status_code}")
raise Exception(f"Invalid token ({_err_msg}). Please login with a write token.")
resp = response.json()
user_info = {}
if token.startswith("hf_oauth"):
user_info["id"] = resp["sub"]
user_info["name"] = resp["preferred_username"]
user_info["orgs"] = [resp["orgs"][k]["preferred_username"] for k in range(len(resp["orgs"]))]
else:
user_info["id"] = resp["id"]
user_info["name"] = resp["name"]
user_info["orgs"] = [resp["orgs"][k]["name"] for k in range(len(resp["orgs"]))]
return user_info
def get_user_and_orgs(user_token):
"""
Retrieve the username and organizations associated with the provided user token.
Args:
user_token (str): The token used to authenticate the user. Must be a valid write token.
Returns:
list: A list containing the username followed by the organizations the user belongs to.
Raises:
Exception: If the user token is None or an empty string.
"""
if user_token is None:
raise Exception("Please login with a write token.")
if user_token is None or len(user_token) == 0:
raise Exception("Invalid token. Please login with a write token.")
user_info = token_verification(token=user_token)
username = user_info["name"]
orgs = user_info["orgs"]
who_is_training = [username] + orgs
return who_is_training
| 2 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/db.py | import sqlite3
class AutoTrainDB:
"""
A class to manage job records in a SQLite database.
Attributes:
-----------
db_path : str
The path to the SQLite database file.
conn : sqlite3.Connection
The SQLite database connection object.
c : sqlite3.Cursor
The SQLite database cursor object.
Methods:
--------
__init__(db_path):
Initializes the database connection and creates the jobs table if it does not exist.
create_jobs_table():
Creates the jobs table in the database if it does not exist.
add_job(pid):
Adds a new job with the given process ID (pid) to the jobs table.
get_running_jobs():
Retrieves a list of all running job process IDs (pids) from the jobs table.
delete_job(pid):
Deletes the job with the given process ID (pid) from the jobs table.
"""
def __init__(self, db_path):
self.db_path = db_path
self.conn = sqlite3.connect(db_path)
self.c = self.conn.cursor()
self.create_jobs_table()
def create_jobs_table(self):
self.c.execute(
"""CREATE TABLE IF NOT EXISTS jobs
(id INTEGER PRIMARY KEY, pid INTEGER)"""
)
self.conn.commit()
def add_job(self, pid):
sql = f"INSERT INTO jobs (pid) VALUES ({pid})"
self.c.execute(sql)
self.conn.commit()
def get_running_jobs(self):
self.c.execute("""SELECT pid FROM jobs""")
running_pids = self.c.fetchall()
running_pids = [pid[0] for pid in running_pids]
return running_pids
def delete_job(self, pid):
sql = f"DELETE FROM jobs WHERE pid={pid}"
self.c.execute(sql)
self.conn.commit()
| 3 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/ui_routes.py | import json
import os
import signal
import sys
import time
from typing import List
import torch
from fastapi import APIRouter, Depends, File, Form, HTTPException, Query, Request, UploadFile, status
from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse
from fastapi.templating import Jinja2Templates
from huggingface_hub import repo_exists
from nvitop import Device
from autotrain import __version__, logger
from autotrain.app.db import AutoTrainDB
from autotrain.app.models import fetch_models
from autotrain.app.params import AppParams, get_task_params
from autotrain.app.utils import get_running_jobs, get_user_and_orgs, kill_process_by_pid, token_verification
from autotrain.dataset import (
AutoTrainDataset,
AutoTrainImageClassificationDataset,
AutoTrainImageRegressionDataset,
AutoTrainObjectDetectionDataset,
AutoTrainVLMDataset,
)
from autotrain.help import get_app_help
from autotrain.project import AutoTrainProject
logger.info("Starting AutoTrain...")
HF_TOKEN = os.environ.get("HF_TOKEN", None)
IS_RUNNING_IN_SPACE = "SPACE_ID" in os.environ
ENABLE_NGC = int(os.environ.get("ENABLE_NGC", 0))
ENABLE_NVCF = int(os.environ.get("ENABLE_NVCF", 0))
AUTOTRAIN_LOCAL = int(os.environ.get("AUTOTRAIN_LOCAL", 1))
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
DB = AutoTrainDB("autotrain.db")
MODEL_CHOICE = fetch_models()
ui_router = APIRouter()
templates_path = os.path.join(BASE_DIR, "templates")
templates = Jinja2Templates(directory=templates_path)
UI_PARAMS = {
"mixed_precision": {
"type": "dropdown",
"label": "Mixed precision",
"options": ["fp16", "bf16", "none"],
},
"optimizer": {
"type": "dropdown",
"label": "Optimizer",
"options": ["adamw_torch", "adamw", "adam", "sgd"],
},
"scheduler": {
"type": "dropdown",
"label": "Scheduler",
"options": ["linear", "cosine", "cosine_warmup", "constant"],
},
"eval_strategy": {
"type": "dropdown",
"label": "Evaluation strategy",
"options": ["epoch", "steps"],
},
"logging_steps": {
"type": "number",
"label": "Logging steps",
},
"save_total_limit": {
"type": "number",
"label": "Save total limit",
},
"auto_find_batch_size": {
"type": "dropdown",
"label": "Auto find batch size",
"options": [True, False],
},
"warmup_ratio": {
"type": "number",
"label": "Warmup proportion",
},
"max_grad_norm": {
"type": "number",
"label": "Max grad norm",
},
"weight_decay": {
"type": "number",
"label": "Weight decay",
},
"epochs": {
"type": "number",
"label": "Epochs",
},
"batch_size": {
"type": "number",
"label": "Batch size",
},
"lr": {
"type": "number",
"label": "Learning rate",
},
"seed": {
"type": "number",
"label": "Seed",
},
"gradient_accumulation": {
"type": "number",
"label": "Gradient accumulation",
},
"block_size": {
"type": "number",
"label": "Block size",
},
"model_max_length": {
"type": "number",
"label": "Model max length",
},
"add_eos_token": {
"type": "dropdown",
"label": "Add EOS token",
"options": [True, False],
},
"disable_gradient_checkpointing": {
"type": "dropdown",
"label": "Disable GC",
"options": [True, False],
},
"use_flash_attention_2": {
"type": "dropdown",
"label": "Use flash attention",
"options": [True, False],
},
"log": {
"type": "dropdown",
"label": "Logging",
"options": ["tensorboard", "none"],
},
"quantization": {
"type": "dropdown",
"label": "Quantization",
"options": ["int4", "int8", "none"],
},
"target_modules": {
"type": "string",
"label": "Target modules",
},
"merge_adapter": {
"type": "dropdown",
"label": "Merge adapter",
"options": [True, False],
},
"peft": {
"type": "dropdown",
"label": "PEFT/LoRA",
"options": [True, False],
},
"lora_r": {
"type": "number",
"label": "Lora r",
},
"lora_alpha": {
"type": "number",
"label": "Lora alpha",
},
"lora_dropout": {
"type": "number",
"label": "Lora dropout",
},
"model_ref": {
"type": "string",
"label": "Reference model",
},
"dpo_beta": {
"type": "number",
"label": "DPO beta",
},
"max_prompt_length": {
"type": "number",
"label": "Prompt length",
},
"max_completion_length": {
"type": "number",
"label": "Completion length",
},
"chat_template": {
"type": "dropdown",
"label": "Chat template",
"options": ["none", "zephyr", "chatml", "tokenizer"],
},
"padding": {
"type": "dropdown",
"label": "Padding side",
"options": ["right", "left", "none"],
},
"max_seq_length": {
"type": "number",
"label": "Max sequence length",
},
"early_stopping_patience": {
"type": "number",
"label": "Early stopping patience",
},
"early_stopping_threshold": {
"type": "number",
"label": "Early stopping threshold",
},
"max_target_length": {
"type": "number",
"label": "Max target length",
},
"categorical_columns": {
"type": "string",
"label": "Categorical columns",
},
"numerical_columns": {
"type": "string",
"label": "Numerical columns",
},
"num_trials": {
"type": "number",
"label": "Number of trials",
},
"time_limit": {
"type": "number",
"label": "Time limit",
},
"categorical_imputer": {
"type": "dropdown",
"label": "Categorical imputer",
"options": ["most_frequent", "none"],
},
"numerical_imputer": {
"type": "dropdown",
"label": "Numerical imputer",
"options": ["mean", "median", "none"],
},
"numeric_scaler": {
"type": "dropdown",
"label": "Numeric scaler",
"options": ["standard", "minmax", "maxabs", "robust", "none"],
},
"vae_model": {
"type": "string",
"label": "VAE model",
},
"prompt": {
"type": "string",
"label": "Prompt",
},
"resolution": {
"type": "number",
"label": "Resolution",
},
"num_steps": {
"type": "number",
"label": "Number of steps",
},
"checkpointing_steps": {
"type": "number",
"label": "Checkpointing steps",
},
"use_8bit_adam": {
"type": "dropdown",
"label": "Use 8-bit Adam",
"options": [True, False],
},
"xformers": {
"type": "dropdown",
"label": "xFormers",
"options": [True, False],
},
"image_square_size": {
"type": "number",
"label": "Image square size",
},
"unsloth": {
"type": "dropdown",
"label": "Unsloth",
"options": [True, False],
},
"max_doc_stride": {
"type": "number",
"label": "Max doc stride",
},
"distributed_backend": {
"type": "dropdown",
"label": "Distributed backend",
"options": ["ddp", "deepspeed"],
},
}
def graceful_exit(signum, frame):
"""
Handles the SIGTERM signal to perform cleanup and exit the program gracefully.
Args:
signum (int): The signal number.
frame (FrameType): The current stack frame (or None).
Logs:
Logs the receipt of the SIGTERM signal and the initiation of cleanup.
Exits:
Exits the program with status code 0.
"""
logger.info("SIGTERM received. Performing cleanup...")
sys.exit(0)
signal.signal(signal.SIGTERM, graceful_exit)
logger.info("AutoTrain started successfully")
def user_authentication(request: Request):
"""
Authenticates the user based on the following priority:
1. HF_TOKEN environment variable
2. OAuth information in session
3. Token in bearer header (not implemented in the given code)
Args:
request (Request): The incoming HTTP request object.
Returns:
str: The authenticated token if verification is successful.
Raises:
HTTPException: If the token is invalid or expired and the application is not running in a space.
If the application is running in a space and authentication fails, it returns a login template response.
"""
# priority: hf_token env var > oauth_info in session > token in bearer header
# if "oauth_info" in request.session:
if HF_TOKEN is not None:
try:
_ = token_verification(token=os.environ.get("HF_TOKEN"))
return HF_TOKEN
except Exception as e:
logger.error(f"Failed to verify token: {e}")
if IS_RUNNING_IN_SPACE:
return templates.TemplateResponse("login.html", {"request": request})
else:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid or expired token: HF_TOKEN",
)
if IS_RUNNING_IN_SPACE and "oauth_info" in request.session:
try:
_ = token_verification(token=request.session["oauth_info"]["access_token"])
return request.session["oauth_info"]["access_token"]
except Exception as e:
request.session.pop("oauth_info", None)
logger.error(f"Failed to verify token: {e}")
return templates.TemplateResponse("login.html", {"request": request})
if IS_RUNNING_IN_SPACE:
return templates.TemplateResponse("login.html", {"request": request})
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid or expired token",
)
@ui_router.get("/", response_class=HTMLResponse)
async def load_index(request: Request, token: str = Depends(user_authentication)):
"""
This function is used to load the index page
:return: HTMLResponse
"""
if os.environ.get("SPACE_ID") == "autotrain-projects/autotrain-advanced":
return templates.TemplateResponse("duplicate.html", {"request": request})
try:
_users = get_user_and_orgs(user_token=token)
except Exception as e:
logger.error(f"Failed to get user and orgs: {e}")
if "oauth_info" in request.session:
request.session.pop("oauth_info", None)
return templates.TemplateResponse("login.html", {"request": request})
context = {
"request": request,
"valid_users": _users,
"enable_ngc": ENABLE_NGC,
"enable_nvcf": ENABLE_NVCF,
"enable_local": AUTOTRAIN_LOCAL,
"version": __version__,
"time": time.strftime("%Y-%m-%d %H:%M:%S"),
}
return templates.TemplateResponse("index.html", context)
@ui_router.get("/logout", response_class=HTMLResponse)
async def oauth_logout(request: Request, authenticated: bool = Depends(user_authentication)):
"""
This function is used to logout the oauth user
:return: HTMLResponse
"""
request.session.pop("oauth_info", None)
return RedirectResponse("/")
@ui_router.get("/params/{task}/{param_type}", response_class=JSONResponse)
async def fetch_params(task: str, param_type: str, authenticated: bool = Depends(user_authentication)):
"""
This function is used to fetch the parameters for a given task
:param task: str
:param param_type: str (basic, full)
:return: JSONResponse
"""
logger.info(f"Task: {task}")
task_params = get_task_params(task, param_type)
if len(task_params) == 0:
return {"error": "Task not found"}
ui_params = {}
for param in task_params:
if param in UI_PARAMS:
ui_params[param] = UI_PARAMS[param]
ui_params[param]["default"] = task_params[param]
else:
logger.info(f"Param {param} not found in UI_PARAMS")
ui_params = dict(sorted(ui_params.items(), key=lambda x: (x[1]["type"], x[1]["label"])))
return ui_params
@ui_router.get("/model_choices/{task}", response_class=JSONResponse)
async def fetch_model_choices(
task: str,
custom_models: str = Query(None),
authenticated: bool = Depends(user_authentication),
):
"""
This function is used to fetch the model choices for a given task
:param task: str
:param custom_models: str (optional, comma separated list of custom models, query parameter)
:return: JSONResponse
"""
resp = []
if custom_models is not None:
custom_models = custom_models.split(",")
for custom_model in custom_models:
custom_model = custom_model.strip()
resp.append({"id": custom_model, "name": custom_model})
if os.environ.get("AUTOTRAIN_CUSTOM_MODELS", None) is not None:
custom_models = os.environ.get("AUTOTRAIN_CUSTOM_MODELS")
custom_models = custom_models.split(",")
for custom_model in custom_models:
custom_model = custom_model.strip()
resp.append({"id": custom_model, "name": custom_model})
if task == "text-classification":
hub_models = MODEL_CHOICE["text-classification"]
elif task.startswith("llm"):
hub_models = MODEL_CHOICE["llm"]
elif task.startswith("st:"):
hub_models = MODEL_CHOICE["sentence-transformers"]
elif task == "image-classification":
hub_models = MODEL_CHOICE["image-classification"]
elif task == "seq2seq":
hub_models = MODEL_CHOICE["seq2seq"]
elif task == "tabular:classification":
hub_models = MODEL_CHOICE["tabular-classification"]
elif task == "tabular:regression":
hub_models = MODEL_CHOICE["tabular-regression"]
elif task == "token-classification":
hub_models = MODEL_CHOICE["token-classification"]
elif task == "text-regression":
hub_models = MODEL_CHOICE["text-regression"]
elif task == "image-object-detection":
hub_models = MODEL_CHOICE["image-object-detection"]
elif task == "image-regression":
hub_models = MODEL_CHOICE["image-regression"]
elif task.startswith("vlm:"):
hub_models = MODEL_CHOICE["vlm"]
elif task == "extractive-qa":
hub_models = MODEL_CHOICE["extractive-qa"]
else:
raise NotImplementedError
for hub_model in hub_models:
resp.append({"id": hub_model, "name": hub_model})
return resp
@ui_router.post("/create_project", response_class=JSONResponse)
async def handle_form(
project_name: str = Form(...),
task: str = Form(...),
base_model: str = Form(...),
hardware: str = Form(...),
params: str = Form(...),
autotrain_user: str = Form(...),
column_mapping: str = Form('{"default": "value"}'),
data_files_training: List[UploadFile] = File(None),
data_files_valid: List[UploadFile] = File(None),
hub_dataset: str = Form(""),
train_split: str = Form(""),
valid_split: str = Form(""),
token: str = Depends(user_authentication),
):
"""
Handle form submission for creating and managing AutoTrain projects.
Args:
project_name (str): The name of the project.
task (str): The task type (e.g., "image-classification", "text-classification").
base_model (str): The base model to use for training.
hardware (str): The hardware configuration (e.g., "local-ui").
params (str): JSON string of additional parameters.
autotrain_user (str): The username of the AutoTrain user.
column_mapping (str): JSON string mapping columns to their roles.
data_files_training (List[UploadFile]): List of training data files.
data_files_valid (List[UploadFile]): List of validation data files.
hub_dataset (str): The Hugging Face Hub dataset identifier.
train_split (str): The training split identifier.
valid_split (str): The validation split identifier.
token (str): The authentication token.
Returns:
dict: A dictionary containing the success status and monitor URL.
Raises:
HTTPException: If there are conflicts or validation errors in the form submission.
"""
train_split = train_split.strip()
if len(train_split) == 0:
train_split = None
valid_split = valid_split.strip()
if len(valid_split) == 0:
valid_split = None
logger.info(f"hardware: {hardware}")
if hardware == "local-ui":
running_jobs = get_running_jobs(DB)
if running_jobs:
raise HTTPException(
status_code=409, detail="Another job is already running. Please wait for it to finish."
)
if repo_exists(f"{autotrain_user}/{project_name}", token=token):
raise HTTPException(
status_code=409,
detail=f"Project {project_name} already exists. Please choose a different name.",
)
params = json.loads(params)
# convert "null" to None
for key in params:
if params[key] == "null":
params[key] = None
column_mapping = json.loads(column_mapping)
training_files = [f.file for f in data_files_training if f.filename != ""] if data_files_training else []
validation_files = [f.file for f in data_files_valid if f.filename != ""] if data_files_valid else []
if len(training_files) > 0 and len(hub_dataset) > 0:
raise HTTPException(
status_code=400, detail="Please either upload a dataset or choose a dataset from the Hugging Face Hub."
)
if len(training_files) == 0 and len(hub_dataset) == 0:
raise HTTPException(
status_code=400, detail="Please upload a dataset or choose a dataset from the Hugging Face Hub."
)
if len(hub_dataset) > 0:
if not train_split:
raise HTTPException(status_code=400, detail="Please enter a training split.")
if len(hub_dataset) == 0:
file_extension = os.path.splitext(data_files_training[0].filename)[1]
file_extension = file_extension[1:] if file_extension.startswith(".") else file_extension
if task == "image-classification":
dset = AutoTrainImageClassificationDataset(
train_data=training_files[0],
token=token,
project_name=project_name,
username=autotrain_user,
valid_data=validation_files[0] if validation_files else None,
percent_valid=None, # TODO: add to UI
local=hardware.lower() == "local-ui",
)
elif task == "image-regression":
dset = AutoTrainImageRegressionDataset(
train_data=training_files[0],
token=token,
project_name=project_name,
username=autotrain_user,
valid_data=validation_files[0] if validation_files else None,
percent_valid=None, # TODO: add to UI
local=hardware.lower() == "local-ui",
)
elif task == "image-object-detection":
dset = AutoTrainObjectDetectionDataset(
train_data=training_files[0],
token=token,
project_name=project_name,
username=autotrain_user,
valid_data=validation_files[0] if validation_files else None,
percent_valid=None, # TODO: add to UI
local=hardware.lower() == "local-ui",
)
elif task.startswith("vlm:"):
dset = AutoTrainVLMDataset(
train_data=training_files[0],
token=token,
project_name=project_name,
username=autotrain_user,
column_mapping=column_mapping,
valid_data=validation_files[0] if validation_files else None,
percent_valid=None, # TODO: add to UI
local=hardware.lower() == "local-ui",
)
else:
if task.startswith("llm"):
dset_task = "lm_training"
elif task.startswith("st:"):
dset_task = "sentence_transformers"
elif task == "text-classification":
dset_task = "text_multi_class_classification"
elif task == "text-regression":
dset_task = "text_single_column_regression"
elif task == "seq2seq":
dset_task = "seq2seq"
elif task.startswith("tabular"):
if "," in column_mapping["label"]:
column_mapping["label"] = column_mapping["label"].split(",")
else:
column_mapping["label"] = [column_mapping["label"]]
column_mapping["label"] = [col.strip() for col in column_mapping["label"]]
subtask = task.split(":")[-1].lower()
if len(column_mapping["label"]) > 1 and subtask == "classification":
dset_task = "tabular_multi_label_classification"
elif len(column_mapping["label"]) == 1 and subtask == "classification":
dset_task = "tabular_multi_class_classification"
elif len(column_mapping["label"]) > 1 and subtask == "regression":
dset_task = "tabular_multi_column_regression"
elif len(column_mapping["label"]) == 1 and subtask == "regression":
dset_task = "tabular_single_column_regression"
else:
raise NotImplementedError
elif task == "token-classification":
dset_task = "text_token_classification"
elif task == "extractive-qa":
dset_task = "text_extractive_question_answering"
else:
raise NotImplementedError
logger.info(f"Task: {dset_task}")
logger.info(f"Column mapping: {column_mapping}")
dset_args = dict(
train_data=training_files,
task=dset_task,
token=token,
project_name=project_name,
username=autotrain_user,
column_mapping=column_mapping,
valid_data=validation_files,
percent_valid=None, # TODO: add to UI
local=hardware.lower() == "local-ui",
ext=file_extension,
)
if task in ("text-classification", "token-classification", "st:pair_class"):
dset_args["convert_to_class_label"] = True
dset = AutoTrainDataset(**dset_args)
data_path = dset.prepare()
else:
data_path = hub_dataset
app_params = AppParams(
job_params_json=json.dumps(params),
token=token,
project_name=project_name,
username=autotrain_user,
task=task,
data_path=data_path,
base_model=base_model,
column_mapping=column_mapping,
using_hub_dataset=len(hub_dataset) > 0,
train_split=None if len(hub_dataset) == 0 else train_split,
valid_split=None if len(hub_dataset) == 0 else valid_split,
)
params = app_params.munge()
project = AutoTrainProject(params=params, backend=hardware)
job_id = project.create()
monitor_url = ""
if hardware == "local-ui":
DB.add_job(job_id)
monitor_url = "Monitor your job locally / in logs"
elif hardware.startswith("ep-"):
monitor_url = f"https://ui.endpoints.huggingface.co/{autotrain_user}/endpoints/{job_id}"
elif hardware.startswith("spaces-"):
monitor_url = f"https://hf.co/spaces/{job_id}"
else:
monitor_url = f"Success! Monitor your job in logs. Job ID: {job_id}"
return {"success": "true", "monitor_url": monitor_url}
@ui_router.get("/help/{element_id}", response_class=JSONResponse)
async def fetch_help(element_id: str, authenticated: bool = Depends(user_authentication)):
"""
This function is used to fetch the help text for a given element
:param element_id: str
:return: JSONResponse
"""
msg = get_app_help(element_id)
return {"message": msg}
@ui_router.get("/accelerators", response_class=JSONResponse)
async def available_accelerators(authenticated: bool = Depends(user_authentication)):
"""
This function is used to fetch the number of available accelerators
:return: JSONResponse
"""
if AUTOTRAIN_LOCAL == 0:
return {"accelerators": "Not available in cloud mode."}
cuda_available = torch.cuda.is_available()
mps_available = torch.backends.mps.is_available()
if cuda_available:
num_gpus = torch.cuda.device_count()
elif mps_available:
num_gpus = 1
else:
num_gpus = 0
return {"accelerators": num_gpus}
@ui_router.get("/is_model_training", response_class=JSONResponse)
async def is_model_training(authenticated: bool = Depends(user_authentication)):
"""
This function is used to fetch the number of running jobs
:return: JSONResponse
"""
if AUTOTRAIN_LOCAL == 0:
return {"model_training": "Not available in cloud mode."}
running_jobs = get_running_jobs(DB)
if running_jobs:
return {"model_training": True, "pids": running_jobs}
return {"model_training": False, "pids": []}
@ui_router.get("/logs", response_class=JSONResponse)
async def fetch_logs(authenticated: bool = Depends(user_authentication)):
"""
This function is used to fetch the logs
:return: JSONResponse
"""
if not AUTOTRAIN_LOCAL:
return {"logs": "Logs are only available in local mode."}
log_file = "autotrain.log"
with open(log_file, "r", encoding="utf-8") as f:
logs = f.read()
if len(str(logs).strip()) == 0:
logs = "No logs available."
logs = logs.split("\n")
logs = logs[::-1]
# remove lines containing /is_model_training & /accelerators
logs = [log for log in logs if "/ui/" not in log and "/static/" not in log and "nvidia-ml-py" not in log]
cuda_available = torch.cuda.is_available()
if cuda_available:
devices = Device.all()
device_logs = []
for device in devices:
device_logs.append(
f"Device {device.index}: {device.name()} - {device.memory_used_human()}/{device.memory_total_human()}"
)
device_logs.append("-----------------")
logs = device_logs + logs
return {"logs": logs}
@ui_router.get("/stop_training", response_class=JSONResponse)
async def stop_training(authenticated: bool = Depends(user_authentication)):
"""
This function is used to stop the training
:return: JSONResponse
"""
running_jobs = get_running_jobs(DB)
if running_jobs:
for _pid in running_jobs:
try:
kill_process_by_pid(_pid)
except Exception:
logger.info(f"Process {_pid} is already completed. Skipping...")
return {"success": True}
return {"success": False}
| 4 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/oauth.py | """OAuth support for AutoTrain.
Taken from: https://github.com/gradio-app/gradio/blob/main/gradio/oauth.py
"""
from __future__ import annotations
import hashlib
import os
import urllib.parse
import fastapi
from authlib.integrations.base_client.errors import MismatchingStateError
from authlib.integrations.starlette_client import OAuth
from fastapi.responses import RedirectResponse
from starlette.middleware.sessions import SessionMiddleware
OAUTH_CLIENT_ID = os.environ.get("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.environ.get("OAUTH_CLIENT_SECRET")
OAUTH_SCOPES = os.environ.get("OAUTH_SCOPES")
OPENID_PROVIDER_URL = os.environ.get("OPENID_PROVIDER_URL")
def attach_oauth(app: fastapi.FastAPI):
"""
Attaches OAuth functionality to a FastAPI application by adding OAuth routes and session middleware.
Args:
app (fastapi.FastAPI): The FastAPI application instance to which OAuth routes and middleware will be attached.
Notes:
- The session middleware requires a secret key to sign the cookies. A hash of the OAuth secret key is used to
make it unique to the Space and to ensure it is updated if the OAuth configuration changes.
- The session secret includes a version identifier ("-autotrain-v2") to allow for future changes in the session
cookie format. If the format changes, the version can be bumped to invalidate old cookies and prevent HTTP 500 errors.
"""
_add_oauth_routes(app)
# Session Middleware requires a secret key to sign the cookies. Let's use a hash
# of the OAuth secret key to make it unique to the Space + updated in case OAuth
# config gets updated.
session_secret = OAUTH_CLIENT_SECRET + "-autotrain-v2"
# ^ if we change the session cookie format in the future, we can bump the version of the session secret to make
# sure cookies are invalidated. Otherwise some users with an old cookie format might get a HTTP 500 error.
app.add_middleware(
SessionMiddleware,
secret_key=hashlib.sha256(session_secret.encode()).hexdigest(),
https_only=True,
same_site="none",
)
def _add_oauth_routes(app: fastapi.FastAPI) -> None:
"""
Add OAuth routes to the FastAPI app (login, callback handler, and logout).
This function performs the following tasks:
1. Checks for required environment variables and raises a ValueError if any are missing.
2. Registers the OAuth server with the provided client ID, client secret, scopes, and OpenID provider URL.
3. Defines the following OAuth routes:
- `/login/huggingface`: Redirects to the Hugging Face OAuth page.
- `/auth`: Handles the OAuth callback and manages the OAuth state.
Args:
app (fastapi.FastAPI): The FastAPI application instance to which the OAuth routes will be added.
Raises:
ValueError: If any of the required environment variables (OAUTH_CLIENT_ID, OAUTH_CLIENT_SECRET,
OAUTH_SCOPES, OPENID_PROVIDER_URL) are not set.
"""
"""Add OAuth routes to the FastAPI app (login, callback handler and logout)."""
# Check environment variables
msg = (
"OAuth is required but {} environment variable is not set. Make sure you've enabled OAuth in your Space by"
" setting `hf_oauth: true` in the Space metadata."
)
if OAUTH_CLIENT_ID is None:
raise ValueError(msg.format("OAUTH_CLIENT_ID"))
if OAUTH_CLIENT_SECRET is None:
raise ValueError(msg.format("OAUTH_CLIENT_SECRET"))
if OAUTH_SCOPES is None:
raise ValueError(msg.format("OAUTH_SCOPES"))
if OPENID_PROVIDER_URL is None:
raise ValueError(msg.format("OPENID_PROVIDER_URL"))
# Register OAuth server
oauth = OAuth()
oauth.register(
name="huggingface",
client_id=OAUTH_CLIENT_ID,
client_secret=OAUTH_CLIENT_SECRET,
client_kwargs={"scope": OAUTH_SCOPES},
server_metadata_url=OPENID_PROVIDER_URL + "/.well-known/openid-configuration",
)
# Define OAuth routes
@app.get("/login/huggingface")
async def oauth_login(request: fastapi.Request):
"""
Handles the OAuth login process by redirecting to the Hugging Face OAuth page.
Args:
request (fastapi.Request): The incoming HTTP request.
Returns:
Response: A redirection response to the Hugging Face OAuth authorization page.
"""
"""Endpoint that redirects to HF OAuth page."""
redirect_uri = request.url_for("auth")
redirect_uri_as_str = str(redirect_uri)
if redirect_uri.netloc.endswith(".hf.space"):
redirect_uri_as_str = redirect_uri_as_str.replace("http://", "https://")
return await oauth.huggingface.authorize_redirect(request, redirect_uri_as_str) # type: ignore
@app.get("/auth")
async def auth(request: fastapi.Request) -> RedirectResponse:
"""
Handles the OAuth callback for Hugging Face authentication.
Args:
request (fastapi.Request): The incoming request object.
Returns:
RedirectResponse: A response object that redirects the user to the appropriate page.
Raises:
MismatchingStateError: If there is a state mismatch, likely due to a corrupted cookie.
In this case, the user is redirected to the login page after clearing the relevant session keys.
Notes:
- If the state mismatch occurs, it is likely due to a bug in authlib that causes the token to grow indefinitely
if the user tries to login repeatedly. Since cookies cannot exceed 4kb, the token will be truncated at some point,
resulting in a lost state. The workaround is to delete the cookie and redirect the user to the login page again.
- See https://github.com/lepture/authlib/issues/622 for more details.
"""
"""Endpoint that handles the OAuth callback."""
try:
oauth_info = await oauth.huggingface.authorize_access_token(request) # type: ignore
except MismatchingStateError:
# If the state mismatch, it is very likely that the cookie is corrupted.
# There is a bug reported in authlib that causes the token to grow indefinitely if the user tries to login
# repeatedly. Since cookies cannot get bigger than 4kb, the token will be truncated at some point - hence
# losing the state. A workaround is to delete the cookie and redirect the user to the login page again.
# See https://github.com/lepture/authlib/issues/622 for more details.
login_uri = "/login/huggingface"
if "_target_url" in request.query_params:
login_uri += "?" + urllib.parse.urlencode( # Keep same _target_url as before
{"_target_url": request.query_params["_target_url"]}
)
for key in list(request.session.keys()):
# Delete all keys that are related to the OAuth state
if key.startswith("_state_huggingface"):
request.session.pop(key)
return RedirectResponse(login_uri)
request.session["oauth_info"] = oauth_info
return _redirect_to_target(request)
def _redirect_to_target(request: fastapi.Request, default_target: str = "/") -> RedirectResponse:
"""
Redirects the incoming request to a target URL specified in the query parameters.
Args:
request (fastapi.Request): The incoming HTTP request.
default_target (str, optional): The default URL to redirect to if no target URL is specified in the query parameters. Defaults to "/".
Returns:
RedirectResponse: A response object that redirects the client to the target URL.
"""
target = request.query_params.get("_target_url", default_target)
# target = "https://huggingface.co/spaces/" + os.environ.get("SPACE_ID")
return RedirectResponse(target)
| 5 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/params.py | import json
from dataclasses import dataclass
from typing import Optional
from autotrain.trainers.clm.params import LLMTrainingParams
from autotrain.trainers.extractive_question_answering.params import ExtractiveQuestionAnsweringParams
from autotrain.trainers.image_classification.params import ImageClassificationParams
from autotrain.trainers.image_regression.params import ImageRegressionParams
from autotrain.trainers.object_detection.params import ObjectDetectionParams
from autotrain.trainers.sent_transformers.params import SentenceTransformersParams
from autotrain.trainers.seq2seq.params import Seq2SeqParams
from autotrain.trainers.tabular.params import TabularParams
from autotrain.trainers.text_classification.params import TextClassificationParams
from autotrain.trainers.text_regression.params import TextRegressionParams
from autotrain.trainers.token_classification.params import TokenClassificationParams
from autotrain.trainers.vlm.params import VLMTrainingParams
HIDDEN_PARAMS = [
"token",
"project_name",
"username",
"task",
"backend",
"train_split",
"valid_split",
"text_column",
"rejected_text_column",
"prompt_text_column",
"push_to_hub",
"trainer",
"model",
"data_path",
"image_path",
"class_image_path",
"revision",
"tokenizer",
"class_prompt",
"num_class_images",
"class_labels_conditioning",
"resume_from_checkpoint",
"dataloader_num_workers",
"allow_tf32",
"prior_generation_precision",
"local_rank",
"tokenizer_max_length",
"rank",
"xl",
"checkpoints_total_limit",
"validation_images",
"validation_epochs",
"num_validation_images",
"validation_prompt",
"sample_batch_size",
"log",
"image_column",
"target_column",
"id_column",
"target_columns",
"tokens_column",
"tags_column",
"objects_column",
"sentence1_column",
"sentence2_column",
"sentence3_column",
"question_column",
"answer_column",
]
PARAMS = {}
PARAMS["llm"] = LLMTrainingParams(
target_modules="all-linear",
log="tensorboard",
mixed_precision="fp16",
quantization="int4",
peft=True,
block_size=1024,
epochs=3,
padding="right",
chat_template="none",
max_completion_length=128,
distributed_backend="ddp",
).model_dump()
PARAMS["text-classification"] = TextClassificationParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["st"] = SentenceTransformersParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["image-classification"] = ImageClassificationParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["image-object-detection"] = ObjectDetectionParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["seq2seq"] = Seq2SeqParams(
mixed_precision="fp16",
target_modules="all-linear",
log="tensorboard",
).model_dump()
PARAMS["tabular"] = TabularParams(
categorical_imputer="most_frequent",
numerical_imputer="median",
numeric_scaler="robust",
).model_dump()
PARAMS["token-classification"] = TokenClassificationParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["text-regression"] = TextRegressionParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["image-regression"] = ImageRegressionParams(
mixed_precision="fp16",
log="tensorboard",
).model_dump()
PARAMS["vlm"] = VLMTrainingParams(
mixed_precision="fp16",
target_modules="all-linear",
log="tensorboard",
quantization="int4",
peft=True,
epochs=3,
).model_dump()
PARAMS["extractive-qa"] = ExtractiveQuestionAnsweringParams(
mixed_precision="fp16",
log="tensorboard",
max_seq_length=512,
max_doc_stride=128,
).model_dump()
@dataclass
class AppParams:
"""
AppParams class is responsible for managing and processing parameters for various machine learning tasks.
Attributes:
job_params_json (str): JSON string containing job parameters.
token (str): Authentication token.
project_name (str): Name of the project.
username (str): Username of the project owner.
task (str): Type of task to be performed.
data_path (str): Path to the dataset.
base_model (str): Base model to be used.
column_mapping (dict): Mapping of columns for the dataset.
train_split (Optional[str]): Name of the training split. Default is None.
valid_split (Optional[str]): Name of the validation split. Default is None.
using_hub_dataset (Optional[bool]): Flag indicating if a hub dataset is used. Default is False.
api (Optional[bool]): Flag indicating if API is used. Default is False.
Methods:
__post_init__(): Validates the parameters after initialization.
munge(): Processes the parameters based on the task type.
_munge_common_params(): Processes common parameters for all tasks.
_munge_params_sent_transformers(): Processes parameters for sentence transformers task.
_munge_params_llm(): Processes parameters for large language model task.
_munge_params_vlm(): Processes parameters for vision-language model task.
_munge_params_text_clf(): Processes parameters for text classification task.
_munge_params_extractive_qa(): Processes parameters for extractive question answering task.
_munge_params_text_reg(): Processes parameters for text regression task.
_munge_params_token_clf(): Processes parameters for token classification task.
_munge_params_seq2seq(): Processes parameters for sequence-to-sequence task.
_munge_params_img_clf(): Processes parameters for image classification task.
_munge_params_img_reg(): Processes parameters for image regression task.
_munge_params_img_obj_det(): Processes parameters for image object detection task.
_munge_params_tabular(): Processes parameters for tabular data task.
"""
job_params_json: str
token: str
project_name: str
username: str
task: str
data_path: str
base_model: str
column_mapping: dict
train_split: Optional[str] = None
valid_split: Optional[str] = None
using_hub_dataset: Optional[bool] = False
api: Optional[bool] = False
def __post_init__(self):
if self.using_hub_dataset and not self.train_split:
raise ValueError("train_split is required when using a hub dataset")
def munge(self):
if self.task == "text-classification":
return self._munge_params_text_clf()
elif self.task == "seq2seq":
return self._munge_params_seq2seq()
elif self.task == "image-classification":
return self._munge_params_img_clf()
elif self.task == "image-object-detection":
return self._munge_params_img_obj_det()
elif self.task.startswith("tabular"):
return self._munge_params_tabular()
elif self.task.startswith("llm"):
return self._munge_params_llm()
elif self.task == "token-classification":
return self._munge_params_token_clf()
elif self.task == "text-regression":
return self._munge_params_text_reg()
elif self.task.startswith("st:"):
return self._munge_params_sent_transformers()
elif self.task == "image-regression":
return self._munge_params_img_reg()
elif self.task.startswith("vlm"):
return self._munge_params_vlm()
elif self.task == "extractive-qa":
return self._munge_params_extractive_qa()
else:
raise ValueError(f"Unknown task: {self.task}")
def _munge_common_params(self):
_params = json.loads(self.job_params_json)
_params["token"] = self.token
_params["project_name"] = f"{self.project_name}"
if "push_to_hub" not in _params:
_params["push_to_hub"] = True
_params["data_path"] = self.data_path
_params["username"] = self.username
return _params
def _munge_params_sent_transformers(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["sentence1_column"] = "autotrain_sentence1"
_params["sentence2_column"] = "autotrain_sentence2"
_params["sentence3_column"] = "autotrain_sentence3"
_params["target_column"] = "autotrain_target"
_params["valid_split"] = "validation"
else:
_params["sentence1_column"] = self.column_mapping.get(
"sentence1" if not self.api else "sentence1_column", "sentence1"
)
_params["sentence2_column"] = self.column_mapping.get(
"sentence2" if not self.api else "sentence2_column", "sentence2"
)
_params["sentence3_column"] = self.column_mapping.get(
"sentence3" if not self.api else "sentence3_column", "sentence3"
)
_params["target_column"] = self.column_mapping.get("target" if not self.api else "target_column", "target")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
trainer = self.task.split(":")[1]
_params["trainer"] = trainer.lower()
return SentenceTransformersParams(**_params)
def _munge_params_llm(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["prompt_text_column"] = "autotrain_prompt"
_params["rejected_text_column"] = "autotrain_rejected_text"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["prompt_text_column"] = self.column_mapping.get(
"prompt" if not self.api else "prompt_text_column", "prompt"
)
_params["rejected_text_column"] = self.column_mapping.get(
"rejected_text" if not self.api else "rejected_text_column", "rejected_text"
)
_params["train_split"] = self.train_split
if "log" not in _params:
_params["log"] = "tensorboard"
trainer = self.task.split(":")[1]
if trainer != "generic":
_params["trainer"] = trainer.lower()
if "quantization" in _params:
if _params["quantization"] in ("none", "no"):
_params["quantization"] = None
return LLMTrainingParams(**_params)
def _munge_params_vlm(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["prompt_text_column"] = "autotrain_prompt"
_params["image_column"] = "autotrain_image"
_params["valid_split"] = "validation"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["prompt_text_column"] = self.column_mapping.get(
"prompt" if not self.api else "prompt_text_column", "prompt"
)
_params["image_column"] = self.column_mapping.get(
"image" if not self.api else "rejected_text_column", "image"
)
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
if "log" not in _params:
_params["log"] = "tensorboard"
trainer = self.task.split(":")[1]
_params["trainer"] = trainer.lower()
if "quantization" in _params:
if _params["quantization"] in ("none", "no"):
_params["quantization"] = None
return VLMTrainingParams(**_params)
def _munge_params_text_clf(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["target_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["target_column"] = self.column_mapping.get("label" if not self.api else "target_column", "label")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return TextClassificationParams(**_params)
def _munge_params_extractive_qa(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["question_column"] = "autotrain_question"
_params["answer_column"] = "autotrain_answer"
_params["valid_split"] = "validation"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["question_column"] = self.column_mapping.get(
"question" if not self.api else "question_column", "question"
)
_params["answer_column"] = self.column_mapping.get("answer" if not self.api else "answer_column", "answer")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return ExtractiveQuestionAnsweringParams(**_params)
def _munge_params_text_reg(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["target_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["target_column"] = self.column_mapping.get("label" if not self.api else "target_column", "label")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return TextRegressionParams(**_params)
def _munge_params_token_clf(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["tokens_column"] = "autotrain_text"
_params["tags_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["tokens_column"] = self.column_mapping.get("tokens" if not self.api else "tokens_column", "tokens")
_params["tags_column"] = self.column_mapping.get("tags" if not self.api else "tags_column", "tags")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return TokenClassificationParams(**_params)
def _munge_params_seq2seq(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["text_column"] = "autotrain_text"
_params["target_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["text_column"] = self.column_mapping.get("text" if not self.api else "text_column", "text")
_params["target_column"] = self.column_mapping.get("label" if not self.api else "target_column", "label")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return Seq2SeqParams(**_params)
def _munge_params_img_clf(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["image_column"] = "autotrain_image"
_params["target_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["image_column"] = self.column_mapping.get("image" if not self.api else "image_column", "image")
_params["target_column"] = self.column_mapping.get("label" if not self.api else "target_column", "label")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return ImageClassificationParams(**_params)
def _munge_params_img_reg(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["image_column"] = "autotrain_image"
_params["target_column"] = "autotrain_label"
_params["valid_split"] = "validation"
else:
_params["image_column"] = self.column_mapping.get("image" if not self.api else "image_column", "image")
_params["target_column"] = self.column_mapping.get("target" if not self.api else "target_column", "target")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return ImageRegressionParams(**_params)
def _munge_params_img_obj_det(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if "log" not in _params:
_params["log"] = "tensorboard"
if not self.using_hub_dataset:
_params["image_column"] = "autotrain_image"
_params["objects_column"] = "autotrain_objects"
_params["valid_split"] = "validation"
else:
_params["image_column"] = self.column_mapping.get("image" if not self.api else "image_column", "image")
_params["objects_column"] = self.column_mapping.get(
"objects" if not self.api else "objects_column", "objects"
)
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
return ObjectDetectionParams(**_params)
def _munge_params_tabular(self):
_params = self._munge_common_params()
_params["model"] = self.base_model
if not self.using_hub_dataset:
_params["id_column"] = "autotrain_id"
_params["valid_split"] = "validation"
if len(self.column_mapping["label"]) == 1:
_params["target_columns"] = ["autotrain_label"]
else:
_params["target_columns"] = [
"autotrain_label_" + str(i) for i in range(len(self.column_mapping["label"]))
]
else:
_params["id_column"] = self.column_mapping.get("id" if not self.api else "id_column", "id")
_params["train_split"] = self.train_split
_params["valid_split"] = self.valid_split
_params["target_columns"] = self.column_mapping.get("label" if not self.api else "target_columns", "label")
if len(_params["categorical_imputer"].strip()) == 0 or _params["categorical_imputer"].lower() == "none":
_params["categorical_imputer"] = None
if len(_params["numerical_imputer"].strip()) == 0 or _params["numerical_imputer"].lower() == "none":
_params["numerical_imputer"] = None
if len(_params["numeric_scaler"].strip()) == 0 or _params["numeric_scaler"].lower() == "none":
_params["numeric_scaler"] = None
if "classification" in self.task:
_params["task"] = "classification"
else:
_params["task"] = "regression"
return TabularParams(**_params)
def get_task_params(task, param_type):
"""
Retrieve task-specific parameters while filtering out hidden parameters based on the task and parameter type.
Args:
task (str): The task identifier, which can include prefixes like "llm", "st:", "vlm:", etc.
param_type (str): The type of parameters to retrieve, typically "basic" or other types.
Returns:
dict: A dictionary of task-specific parameters with hidden parameters filtered out.
Notes:
- The function handles various task prefixes and adjusts the task and trainer variables accordingly.
- Hidden parameters are filtered out based on the task and parameter type.
- Additional hidden parameters are defined for specific tasks and trainers.
"""
if task.startswith("llm"):
trainer = task.split(":")[1].lower()
task = task.split(":")[0].lower()
if task.startswith("st:"):
trainer = task.split(":")[1].lower()
task = task.split(":")[0].lower()
if task.startswith("vlm:"):
trainer = task.split(":")[1].lower()
task = task.split(":")[0].lower()
if task.startswith("tabular"):
task = "tabular"
if task not in PARAMS:
return {}
task_params = PARAMS[task]
task_params = {k: v for k, v in task_params.items() if k not in HIDDEN_PARAMS}
if task == "llm":
more_hidden_params = []
if trainer == "sft":
more_hidden_params = [
"model_ref",
"dpo_beta",
"add_eos_token",
"max_prompt_length",
"max_completion_length",
]
elif trainer == "reward":
more_hidden_params = [
"model_ref",
"dpo_beta",
"add_eos_token",
"max_prompt_length",
"max_completion_length",
"unsloth",
]
elif trainer == "orpo":
more_hidden_params = [
"model_ref",
"dpo_beta",
"add_eos_token",
"unsloth",
]
elif trainer == "generic":
more_hidden_params = [
"model_ref",
"dpo_beta",
"max_prompt_length",
"max_completion_length",
]
elif trainer == "dpo":
more_hidden_params = [
"add_eos_token",
"unsloth",
]
if param_type == "basic":
more_hidden_params.extend(
[
"padding",
"use_flash_attention_2",
"disable_gradient_checkpointing",
"logging_steps",
"eval_strategy",
"save_total_limit",
"auto_find_batch_size",
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"quantization",
"merge_adapter",
"lora_r",
"lora_alpha",
"lora_dropout",
"max_completion_length",
]
)
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "text-classification" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "extractive-qa" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "st" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "vlm" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
"quantization",
"lora_r",
"lora_alpha",
"lora_dropout",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "text-regression" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "image-classification" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "image-regression" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "image-object-detection" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "seq2seq" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"quantization",
"lora_r",
"lora_alpha",
"lora_dropout",
"target_modules",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
if task == "token-classification" and param_type == "basic":
more_hidden_params = [
"warmup_ratio",
"weight_decay",
"max_grad_norm",
"seed",
"logging_steps",
"auto_find_batch_size",
"save_total_limit",
"eval_strategy",
"early_stopping_patience",
"early_stopping_threshold",
]
task_params = {k: v for k, v in task_params.items() if k not in more_hidden_params}
return task_params
| 6 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/colab.py | import json
import os
import random
import string
import subprocess
import ipywidgets as widgets
import yaml
from autotrain.app.models import fetch_models
from autotrain.app.params import get_task_params
def generate_random_string():
prefix = "autotrain"
part1 = "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
part2 = "".join(random.choices(string.ascii_lowercase + string.digits, k=5))
return f"{prefix}-{part1}-{part2}"
def colab_app():
if not os.path.exists("data"):
os.makedirs("data")
MODEL_CHOICES = fetch_models()
TASK_NAMES = [
"LLM SFT",
"LLM ORPO",
"LLM Generic",
"LLM DPO",
"LLM Reward",
"Text Classification",
"Text Regression",
"Sequence to Sequence",
"Token Classification",
"Image Classification",
"Image Regression",
"Object Detection",
"Tabular Classification",
"Tabular Regression",
"ST Pair",
"ST Pair Classification",
"ST Pair Scoring",
"ST Triplet",
"ST Question Answering",
]
TASK_MAP = {
"LLM SFT": "llm:sft",
"LLM ORPO": "llm:orpo",
"LLM Generic": "llm:generic",
"LLM DPO": "llm:dpo",
"LLM Reward": "llm:reward",
"Text Classification": "text-classification",
"Text Regression": "text-regression",
"Sequence to Sequence": "seq2seq",
"Token Classification": "token-classification",
"Image Classification": "image-classification",
"Image Regression": "image-regression",
"Object Detection": "image-object-detection",
"Tabular Classification": "tabular:classification",
"Tabular Regression": "tabular:regression",
"ST Pair": "st:pair",
"ST Pair Classification": "st:pair_class",
"ST Pair Scoring": "st:pair_score",
"ST Triplet": "st:triplet",
"ST Question Answering": "st:qa",
}
def _get_params(task, param_type):
_p = get_task_params(task, param_type=param_type)
_p["push_to_hub"] = True
_p = json.dumps(_p, indent=4)
return _p
hf_token_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Hugging Face Write Token</h5>")
hf_token = widgets.Password(
value="", description="", disabled=False, layout=widgets.Layout(margin="0 0 0 0", width="200px")
)
hf_user_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Hugging Face Username</h5>")
hf_user = widgets.Text(
value="", description="", disabled=False, layout=widgets.Layout(margin="0 0 0 0", width="200px")
)
base_model_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Base Model</h5>")
base_model = widgets.Text(value=MODEL_CHOICES["llm"][0], disabled=False, layout=widgets.Layout(width="420px"))
project_name_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Project Name</h5>")
project_name = widgets.Text(
value=generate_random_string(),
description="",
disabled=False,
layout=widgets.Layout(margin="0 0 0 0", width="200px"),
)
task_dropdown_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Task</h5>")
task_dropdown = widgets.Dropdown(
options=TASK_NAMES,
value=TASK_NAMES[0],
description="",
disabled=False,
layout=widgets.Layout(margin="0 0 0 0", width="200px"),
)
dataset_path_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Path</h5>")
dataset_path = widgets.Text(
value="", description="", disabled=False, layout=widgets.Layout(margin="0 0 0 0", width="200px")
)
train_split_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Train Split</h5>")
train_split = widgets.Text(
value="", description="", disabled=False, layout=widgets.Layout(margin="0 0 0 0", width="200px")
)
valid_split_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Valid Split</h5>")
valid_split = widgets.Text(
value="",
placeholder="optional",
description="",
disabled=False,
layout=widgets.Layout(margin="0 0 0 0", width="200px"),
)
dataset_source_dropdown_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Source</h5>")
dataset_source_dropdown = widgets.Dropdown(
options=["Hugging Face Hub", "Local"],
value="Hugging Face Hub",
description="",
disabled=False,
layout=widgets.Layout(margin="0 0 0 0", width="200px"),
)
col_mapping_label = widgets.HTML("<h5 style='margin-bottom: 0; margin-top: 0;'>Column Mapping</h5>")
col_mapping = widgets.Text(
value='{"text": "text"}',
placeholder="",
description="",
disabled=False,
layout=widgets.Layout(margin="0 0 0 0", width="420px"),
)
parameters_dropdown = widgets.Dropdown(
options=["Basic", "Full"], value="Basic", description="", disabled=False, layout=widgets.Layout(width="400px")
)
parameters = widgets.Textarea(
value=_get_params("llm:sft", "basic"),
description="",
disabled=False,
layout=widgets.Layout(height="400px", width="400px"),
)
start_training_button = widgets.Button(
description="Start Training",
layout=widgets.Layout(width="1000px"),
disabled=False,
button_style="", # 'success', 'info', 'warning', 'danger' or ''
tooltip="Click to start training",
icon="check", # (FontAwesome names without the `fa-` prefix)
)
spacer = widgets.Box(layout=widgets.Layout(width="20px"))
title_hbox0 = widgets.HTML("<h3>Hugging Face Credentials</h3>")
title_hbox1 = widgets.HTML("<h3>Project Details</h3>")
title_hbox2 = widgets.HTML("<h3>Dataset Details</h3>")
title_hbox3 = widgets.HTML("<h3>Parameters</h3>")
hbox0 = widgets.HBox(
[
widgets.VBox([hf_token_label, hf_token]),
spacer,
widgets.VBox([hf_user_label, hf_user]),
]
)
hbox1 = widgets.HBox(
[
widgets.VBox([project_name_label, project_name]),
spacer,
widgets.VBox([task_dropdown_label, task_dropdown]),
]
)
hbox2_1 = widgets.HBox(
[
widgets.VBox([dataset_source_dropdown_label, dataset_source_dropdown]),
spacer,
widgets.VBox([dataset_path_label, dataset_path]),
]
)
hbox2_2 = widgets.HBox(
[
widgets.VBox([train_split_label, train_split]),
spacer,
widgets.VBox([valid_split_label, valid_split]),
]
)
hbox2_3 = widgets.HBox(
[
widgets.VBox([col_mapping_label, col_mapping]),
]
)
hbox3 = widgets.VBox([parameters_dropdown, parameters])
vbox0 = widgets.VBox([title_hbox0, hbox0])
vbox1 = widgets.VBox([title_hbox1, base_model_label, base_model, hbox1])
vbox2 = widgets.VBox([title_hbox2, hbox2_1, hbox2_2, hbox2_3])
vbox3 = widgets.VBox([title_hbox3, hbox3])
left_column = widgets.VBox([vbox0, vbox1, vbox2], layout=widgets.Layout(width="500px"))
right_column = widgets.VBox([vbox3], layout=widgets.Layout(width="500px", align_items="flex-end"))
separator = widgets.HTML('<div style="border-left: 1px solid black; height: 100%;"></div>')
_main_layout = widgets.HBox([left_column, separator, right_column])
main_layout = widgets.VBox([_main_layout, start_training_button])
def on_dataset_change(change):
if change["new"] == "Local":
dataset_path.value = "data/"
train_split.value = "train"
valid_split.value = ""
else:
dataset_path.value = ""
train_split.value = ""
valid_split.value = ""
def update_parameters(*args):
task = TASK_MAP[task_dropdown.value]
param_type = parameters_dropdown.value.lower()
parameters.value = _get_params(task, param_type)
def update_col_mapping(*args):
task = TASK_MAP[task_dropdown.value]
if task in ["llm:sft", "llm:generic"]:
col_mapping.value = '{"text": "text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task in ["llm:dpo", "llm:orpo"]:
col_mapping.value = '{"prompt": "prompt", "text": "text", "rejected_text": "rejected_text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task == "llm:reward":
col_mapping.value = '{"text": "text", "rejected_text": "rejected_text"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = True
elif task == "text-classification":
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "text-regression":
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "token-classification":
col_mapping.value = '{"text": "tokens", "label": "tags"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "seq2seq":
col_mapping.value = '{"text": "text", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "image-classification":
col_mapping.value = '{"image": "image", "label": "label"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "image-regression":
col_mapping.value = '{"image": "image", "label": "target"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "image-object-detection":
col_mapping.value = '{"image": "image", "objects": "objects"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "tabular:classification":
col_mapping.value = '{"id": "id", "label": ["target"]}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "tabular:regression":
col_mapping.value = '{"id": "id", "label": ["target"]}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "st:pair":
col_mapping.value = '{"sentence1": "anchor", "sentence2": "positive"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "st:pair_class":
col_mapping.value = '{"sentence1": "premise", "sentence2": "hypothesis", "target": "label"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "st:pair_score":
col_mapping.value = '{"sentence1": "sentence1", "sentence2": "sentence2", "target": "score"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "st:triplet":
col_mapping.value = '{"sentence1": "anchor", "sentence2": "positive", "sentence3": "negative"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
elif task == "st:qa":
col_mapping.value = '{"sentence1": "query", "sentence1": "answer"}'
dataset_source_dropdown.disabled = False
valid_split.disabled = False
else:
col_mapping.value = "Enter column mapping..."
def update_base_model(*args):
if TASK_MAP[task_dropdown.value] == "text-classification":
base_model.value = MODEL_CHOICES["text-classification"][0]
elif TASK_MAP[task_dropdown.value].startswith("llm"):
base_model.value = MODEL_CHOICES["llm"][0]
elif TASK_MAP[task_dropdown.value] == "image-classification":
base_model.value = MODEL_CHOICES["image-classification"][0]
elif TASK_MAP[task_dropdown.value] == "seq2seq":
base_model.value = MODEL_CHOICES["seq2seq"][0]
elif TASK_MAP[task_dropdown.value] == "tabular:classification":
base_model.value = MODEL_CHOICES["tabular-classification"][0]
elif TASK_MAP[task_dropdown.value] == "tabular:regression":
base_model.value = MODEL_CHOICES["tabular-regression"][0]
elif TASK_MAP[task_dropdown.value] == "token-classification":
base_model.value = MODEL_CHOICES["token-classification"][0]
elif TASK_MAP[task_dropdown.value] == "text-regression":
base_model.value = MODEL_CHOICES["text-regression"][0]
elif TASK_MAP[task_dropdown.value] == "image-object-detection":
base_model.value = MODEL_CHOICES["image-object-detection"][0]
elif TASK_MAP[task_dropdown.value].startswith("st:"):
base_model.value = MODEL_CHOICES["sentence-transformers"][0]
else:
base_model.value = "Enter base model..."
def start_training(b):
start_training_button.disabled = True
try:
print("Training is starting... Please wait!")
os.environ["HF_USERNAME"] = hf_user.value
os.environ["HF_TOKEN"] = hf_token.value
train_split_value = train_split.value.strip() if train_split.value.strip() != "" else None
valid_split_value = valid_split.value.strip() if valid_split.value.strip() != "" else None
params_val = json.loads(parameters.value)
if task_dropdown.value.startswith("llm") or task_dropdown.value.startswith("sentence-transformers"):
params_val["trainer"] = task_dropdown.value.split(":")[1]
# params_val = {k: v for k, v in params_val.items() if k != "trainer"}
chat_template = params_val.get("chat_template")
if chat_template is not None:
params_val = {k: v for k, v in params_val.items() if k != "chat_template"}
push_to_hub = params_val.get("push_to_hub", True)
if "push_to_hub" in params_val:
params_val = {k: v for k, v in params_val.items() if k != "push_to_hub"}
config = {
"task": TASK_MAP[task_dropdown.value].split(":")[0],
"base_model": base_model.value,
"project_name": project_name.value,
"log": "tensorboard",
"backend": "local",
"data": {
"path": dataset_path.value,
"train_split": train_split_value,
"valid_split": valid_split_value,
"column_mapping": json.loads(col_mapping.value),
},
"params": params_val,
"hub": {
"username": "${{HF_USERNAME}}",
"token": "${{HF_TOKEN}}",
"push_to_hub": push_to_hub,
},
}
if TASK_MAP[task_dropdown.value].startswith("llm"):
config["data"]["chat_template"] = chat_template
if config["data"]["chat_template"] == "none":
config["data"]["chat_template"] = None
with open("config.yml", "w") as f:
yaml.dump(config, f)
cmd = "autotrain --config config.yml"
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.strip())
poll_res = process.poll()
if poll_res != 0:
start_training_button.disabled = False
raise Exception(f"Training failed with exit code: {poll_res}")
print("Training completed successfully!")
start_training_button.disabled = False
except Exception as e:
print("An error occurred while starting training!")
print(f"Error: {e}")
start_training_button.disabled = False
start_training_button.on_click(start_training)
dataset_source_dropdown.observe(on_dataset_change, names="value")
task_dropdown.observe(update_col_mapping, names="value")
task_dropdown.observe(update_parameters, names="value")
task_dropdown.observe(update_base_model, names="value")
parameters_dropdown.observe(update_parameters, names="value")
return main_layout
| 7 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/app.py | import os
from fastapi import FastAPI, Request
from fastapi.responses import RedirectResponse
from fastapi.staticfiles import StaticFiles
from autotrain import __version__, logger
from autotrain.app.api_routes import api_router
from autotrain.app.oauth import attach_oauth
from autotrain.app.ui_routes import ui_router
logger.info("Starting AutoTrain...")
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
app = FastAPI()
if "SPACE_ID" in os.environ:
attach_oauth(app)
app.include_router(ui_router, prefix="/ui", include_in_schema=False)
app.include_router(api_router, prefix="/api")
static_path = os.path.join(BASE_DIR, "static")
app.mount("/static", StaticFiles(directory=static_path), name="static")
logger.info(f"AutoTrain version: {__version__}")
logger.info("AutoTrain started successfully")
@app.get("/")
async def forward_to_ui(request: Request):
"""
Forwards the incoming request to the UI endpoint.
Args:
request (Request): The incoming HTTP request.
Returns:
RedirectResponse: A response object that redirects to the UI endpoint,
including any query parameters from the original request.
"""
query_params = request.query_params
url = "/ui/"
if query_params:
url += f"?{query_params}"
return RedirectResponse(url=url)
| 8 |
0 | hf_public_repos/autotrain-advanced/src/autotrain | hf_public_repos/autotrain-advanced/src/autotrain/app/models.py | import collections
from huggingface_hub import list_models
def get_sorted_models(hub_models):
"""
Filters and sorts a list of models based on their download count.
Args:
hub_models (list): A list of model objects. Each model object must have the attributes 'id', 'downloads', and 'private'.
Returns:
list: A list of model IDs sorted by their download count in descending order. Only includes models that are not private.
"""
hub_models = [{"id": m.id, "downloads": m.downloads} for m in hub_models if m.private is False]
hub_models = sorted(hub_models, key=lambda x: x["downloads"], reverse=True)
hub_models = [m["id"] for m in hub_models]
return hub_models
def _fetch_text_classification_models():
"""
Fetches and sorts text classification models from the Hugging Face model hub.
This function retrieves models for the tasks "fill-mask" and "text-classification"
from the Hugging Face model hub, sorts them by the number of downloads, and combines
them into a single list. Additionally, it fetches trending models based on the number
of likes in the past 7 days, sorts them, and places them at the beginning of the list
if they are not already included.
Returns:
list: A sorted list of model identifiers from the Hugging Face model hub.
"""
hub_models1 = list(
list_models(
task="fill-mask",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models2 = list(
list_models(
task="text-classification",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="fill-mask",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_llm_models():
hub_models = list(
list_models(
task="text-generation",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="text-generation",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_image_classification_models():
hub_models = list(
list_models(
task="image-classification",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="image-classification",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_image_object_detection_models():
hub_models = list(
list_models(
task="object-detection",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
pipeline_tag="object-detection",
)
)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="object-detection",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
pipeline_tag="object-detection",
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_seq2seq_models():
hub_models = list(
list_models(
task="text2text-generation",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="text2text-generation",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_token_classification_models():
hub_models1 = list(
list_models(
task="fill-mask",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models2 = list(
list_models(
task="token-classification",
library="transformers",
sort="downloads",
direction=-1,
limit=100,
full=False,
)
)
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="fill-mask",
library="transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_st_models():
hub_models1 = list(
list_models(
task="sentence-similarity",
library="sentence-transformers",
sort="downloads",
direction=-1,
limit=30,
full=False,
)
)
hub_models2 = list(
list_models(
task="fill-mask",
library="transformers",
sort="downloads",
direction=-1,
limit=30,
full=False,
)
)
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models = list(
list_models(
task="sentence-similarity",
library="sentence-transformers",
sort="likes7d",
direction=-1,
limit=30,
full=False,
)
)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def _fetch_vlm_models():
hub_models1 = list(
list_models(
task="image-text-to-text",
sort="downloads",
direction=-1,
limit=100,
full=False,
filter=["paligemma"],
)
)
# hub_models2 = list(
# list_models(
# task="image-text-to-text",
# sort="downloads",
# direction=-1,
# limit=100,
# full=False,
# filter=["florence2"],
# )
# )
hub_models2 = []
hub_models = list(hub_models1) + list(hub_models2)
hub_models = get_sorted_models(hub_models)
trending_models1 = list(
list_models(
task="image-text-to-text",
sort="likes7d",
direction=-1,
limit=30,
full=False,
filter=["paligemma"],
)
)
# trending_models2 = list(
# list_models(
# task="image-text-to-text",
# sort="likes7d",
# direction=-1,
# limit=30,
# full=False,
# filter=["florence2"],
# )
# )
trending_models2 = []
trending_models = list(trending_models1) + list(trending_models2)
if len(trending_models) > 0:
trending_models = get_sorted_models(trending_models)
hub_models = [m for m in hub_models if m not in trending_models]
hub_models = trending_models + hub_models
return hub_models
def fetch_models():
_mc = collections.defaultdict(list)
_mc["text-classification"] = _fetch_text_classification_models()
_mc["llm"] = _fetch_llm_models()
_mc["image-classification"] = _fetch_image_classification_models()
_mc["image-regression"] = _fetch_image_classification_models()
_mc["seq2seq"] = _fetch_seq2seq_models()
_mc["token-classification"] = _fetch_token_classification_models()
_mc["text-regression"] = _fetch_text_classification_models()
_mc["image-object-detection"] = _fetch_image_object_detection_models()
_mc["sentence-transformers"] = _fetch_st_models()
_mc["vlm"] = _fetch_vlm_models()
_mc["extractive-qa"] = _fetch_text_classification_models()
# tabular-classification
_mc["tabular-classification"] = [
"xgboost",
"random_forest",
"ridge",
"logistic_regression",
"svm",
"extra_trees",
"adaboost",
"decision_tree",
"knn",
]
# tabular-regression
_mc["tabular-regression"] = [
"xgboost",
"random_forest",
"ridge",
"svm",
"extra_trees",
"adaboost",
"decision_tree",
"knn",
]
return _mc
| 9 |
0 | hf_public_repos | hf_public_repos/blog/agents.md | ---
title: "License to Call: Introducing Transformers Agents 2.0"
thumbnail: /blog/assets/agents/thumbnail.png
authors:
- user: m-ric
- user: lysandre
- user: pcuenq
---
# License to Call: Introducing Transformers Agents 2.0
## TL;DR
We are releasing Transformers Agents 2.0!
⇒ 🎁 On top of our existing agent type, we introduce two new agents that **can iterate based on past observations to solve complex tasks**.
⇒ 💡 We aim for the code to be **clear and modular, and for common attributes like the final prompt and tools to be transparent**.
⇒ 🤝 We add **sharing options** to boost community agents.
⇒ 💪 **Extremely performant new agent framework**, allowing a Llama-3-70B-Instruct agent to outperform GPT-4 based agents in the GAIA Leaderboard!
🚀 Go try it out and climb ever higher on the GAIA leaderboard!
## Table of Contents
- [What is an agent?](#what-is-an-agent)
- [The Transformers Agents approach](#the-transformers-agents-approach)
- [Main elements](#main-elements)
- [Example use-cases](#example-use-cases)
- [Self-correcting Retrieval-Augmented-Generation](#self-correcting-retrieval-augmented-generation)
- [Using a simple multi-agent setup 🤝 for efficient web browsing](#using-a-simple-multi-agent-setup-for-efficient-web-browsing)
- [Testing our agents](#testing-our-agents)
- [Benchmarking LLM engines](#benchmarking-llm-engines)
- [Climbing up the GAIA Leaderboard with a multi-modal agent](#climbing-up-the-gaia-leaderboard-with-a-multi-modal-agent)
- [Conclusion](#conclusion)
## What is an agent?
Large Language Models (LLMs) can tackle a wide range of tasks, but they often struggle with specific tasks like logic, calculation, and search. When prompted in these domains in which they do not perform well, they frequently fail to generate a correct answer.
One approach to overcome this weakness is to create an **agent**, which is just a program driven by an LLM. The agent is empowered by **tools** to help it perform actions. When the agent needs a specific skill to solve a particular problem, it relies on an appropriate tool from its toolbox.
Thus when during problem-solving the agent needs a specific skill, it can just rely on an appropriate tool from its toolbox.
Experimentally, agent frameworks generally work very well, achieving state-of-the-art performance on several benchmarks. For instance, have a look at [the top submissions for HumanEval](https://paperswithcode.com/sota/code-generation-on-humaneval): they are agent systems.
## The Transformers Agents approach
Building agent workflows is complex, and we feel these systems need a lot of clarity and modularity. We launched Transformers Agents one year ago, and we’re doubling down on our core design goals.
Our framework strives for:
- **Clarity through simplicity:** we reduce abstractions to the minimum. Simple error logs and accessible attributes let you easily inspect what’s happening and give you more clarity.
- **Modularity:** We prefer to propose building blocks rather than full, complex feature sets. You are free to choose whatever building blocks are best for your project.
- For instance, since any agent system is just a vehicle powered by an LLM engine, we decided to conceptually separate the two, which lets you create any agent type from any underlying LLM.
On top of that, we have **sharing features** that let you build on the shoulders of giants!
### Main elements
- `Tool`: this is the class that lets you use a tool or implement a new one. It is composed mainly of a callable forward `method` that executes the tool action, and a set of a few essential attributes: `name`, `descriptions`, `inputs` and `output_type`. These attributes are used to dynamically generate a usage manual for the tool and insert it into the LLM’s prompt.
- `Toolbox`: It's a set of tools that are provided to an agent as resources to solve a particular task. For performance reasons, tools in a toolbox are already instantiated and ready to go. This is because some tools take time to initialize, so it’s usually better to re-use an existing toolbox and just swap one tool, rather than re-building a set of tools from scratch at each agent initialization.
- `CodeAgent`: a very simple agent that generates its actions as one single blob of Python code. It will not be able to iterate on previous observations.
- `ReactAgent`: ReAct agents follow a cycle of Thought ⇒ Action ⇒ Observation until they’ve solve the task. We propose two classes of ReactAgent:
- `ReactCodeAgent` generates its actions as python blobs.
- `ReactJsonAgent` generates its actions as JSON blobs.
Check out [the documentation](https://huggingface.co/docs/transformers/en/main_classes/agent) to learn how to use each component!
How do agents work under the hood?
In essence, what an agent does is “allowing an LLM to use tools”. Agents have a key `agent.run()` method that:
- Provides information about tool usage to your LLM in a **specific prompt**. This way, the LLM can select tools to run to solve the task.
- **Parses** the tool calls from the LLM output (can be via code, JSON format, or any other format).
- **Executes** the calls.
- If the agent is designed to iterate on previous outputs, it **keeps a memory** with previous tool calls and observations. This memory can be more or less fine-grained depending on how long-term you want it to be.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/agent_single_multistep.png" alt="graph of agent workflows" width=90%>
</p>
For more general context about agents, you could read [this excellent blog post](https://lilianweng.github.io/posts/2023-06-23-agent/) by Lilian Weng or [our earlier blog post](https://huggingface.co/blog/open-source-llms-as-agents) about building agents with LangChain.
To take a deeper dive in our package, go take a look at the [agents documentation](https://huggingface.co/docs/transformers/en/transformers_agents).
## Example use cases
In order to get access to the early access of this feature, please first install `transformers` from its `main` branch:
```
pip install "git+https://github.com/huggingface/transformers.git#egg=transformers[agents]"
```
Agents 2.0 will be released in the v4.41.0 version, landing mid-May.
### Self-correcting Retrieval-Augmented-Generation
Quick definition: Retrieval-Augmented-Generation (RAG) is “using an LLM to answer a user query, but basing the answer on information retrieved from a knowledge base”. It has many advantages over using a vanilla or fine-tuned LLM: to name a few, it allows to ground the answer on true facts and reduce confabulations, it allows to provide the LLM with domain-specific knowledge, and it allows fine-grained control of access to information from the knowledge base.
Let’s say we want to perform RAG, and some parameters must be dynamically generated. For example, depending on the user query we could want to restrict the search to specific subsets of the knowledge base, or we could want to adjust the number of documents retrieved. The difficulty is: how to dynamically adjust these parameters based on the user query?
Well, we can do this by giving our agent an access to these parameters!
Let's setup this system.
Tun the line below to install required dependancies:
```
pip install langchain sentence-transformers faiss-cpu
```
We first load a knowledge base on which we want to perform RAG: this dataset is a compilation of the documentation pages for many `huggingface` packages, stored as markdown.
```python
import datasets
knowledge_base = datasets.load_dataset("m-ric/huggingface_doc", split="train")
```
Now we prepare the knowledge base by processing the dataset and storing it into a vector database to be used by the retriever. We are going to use LangChain, since it features excellent utilities for vector databases:
```python
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain_community.embeddings import HuggingFaceEmbeddings
source_docs = [
Document(
page_content=doc["text"], metadata={"source": doc["source"].split("/")[1]}
) for doc in knowledge_base
]
docs_processed = RecursiveCharacterTextSplitter(chunk_size=500).split_documents(source_docs)[:1000]
embedding_model = HuggingFaceEmbeddings("thenlper/gte-small")
vectordb = FAISS.from_documents(
documents=docs_processed,
embedding=embedding_model
)
```
Now that we have the database ready, let’s build a RAG system that answers user queries based on it!
We want our system to select only from the most relevant sources of information, depending on the query.
Our documentation pages come from the following sources:
```python
>>> all_sources = list(set([doc.metadata["source"] for doc in docs_processed]))
>>> print(all_sources)
['blog', 'optimum', 'datasets-server', 'datasets', 'transformers', 'course',
'gradio', 'diffusers', 'evaluate', 'deep-rl-class', 'peft',
'hf-endpoints-documentation', 'pytorch-image-models', 'hub-docs']
```
How can we select the relevant sources based on the user query?
👉 Let us build our RAG system as an agent that will be free to choose its sources!
We create a retriever tool that the agent can call with the parameters of its choice:
```python
import json
from transformers.agents import Tool
from langchain_core.vectorstores import VectorStore
class RetrieverTool(Tool):
name = "retriever"
description = "Retrieves some documents from the knowledge base that have the closest embeddings to the input query."
inputs = {
"query": {
"type": "text",
"description": "The query to perform. This should be semantically close to your target documents. Use the affirmative form rather than a question.",
},
"source": {
"type": "text",
"description": ""
},
}
output_type = "text"
def __init__(self, vectordb: VectorStore, all_sources: str, **kwargs):
super().__init__(**kwargs)
self.vectordb = vectordb
self.inputs["source"]["description"] = (
f"The source of the documents to search, as a str representation of a list. Possible values in the list are: {all_sources}. If this argument is not provided, all sources will be searched."
)
def forward(self, query: str, source: str = None) -> str:
assert isinstance(query, str), "Your search query must be a string"
if source:
if isinstance(source, str) and "[" not in str(source): # if the source is not representing a list
source = [source]
source = json.loads(str(source).replace("'", '"'))
docs = self.vectordb.similarity_search(query, filter=({"source": source} if source else None), k=3)
if len(docs) == 0:
return "No documents found with this filtering. Try removing the source filter."
return "Retrieved documents:\n\n" + "\n===Document===\n".join(
[doc.page_content for doc in docs]
)
```
Now it’s straightforward to create an agent that leverages this tool!
The agent will need these arguments upon initialization:
- *`tools`*: a list of tools that the agent will be able to call.
- *`llm_engine`*: the LLM that powers the agent.
Our `llm_engine` must be a callable that takes as input a list of [messages](https://huggingface.co/docs/transformers/main/chat_templating) and returns text. It also needs to accept a `stop_sequences` argument that indicates when to stop its generation. For convenience, we directly use the `HfEngine` class provided in the package to get a LLM engine that calls our [Inference API](https://huggingface.co/docs/api-inference/en/index).
```python
from transformers.agents import HfEngine, ReactJsonAgent
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
agent = ReactJsonAgent(
tools=[RetrieverTool(vectordb, all_sources)],
llm_engine=llm_engine
)
agent_output = agent.run("Please show me a LORA finetuning script")
print("Final output:")
print(agent_output)
```
Since we initialized the agent as a `ReactJsonAgent`, it has been automatically given a default system prompt that tells the LLM engine to process step-by-step and generate tool calls as JSON blobs (you could replace this prompt template with your own as needed).
Then when its `.run()` method is launched, the agent takes care of calling the LLM engine, parsing the tool call JSON blobs and executing these tool calls, all in a loop that ends only when the final answer is provided.
And we get the following output:
```
Calling tool: retriever with arguments: {'query': 'LORA finetuning script', 'source': "['transformers', 'datasets-server', 'datasets']"}
Calling tool: retriever with arguments: {'query': 'LORA finetuning script'}
Calling tool: retriever with arguments: {'query': 'LORA finetuning script example', 'source': "['transformers', 'datasets-server', 'datasets']"}
Calling tool: retriever with arguments: {'query': 'LORA finetuning script example'}
Calling tool: final_answer with arguments: {'answer': 'Here is an example of a LORA finetuning script: https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371'}
Final output:
Here is an example of a LORA finetuning script: https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371
```
We can see the self-correction in action: the agent first tried to restrict sources, but due to the lack of corresponding documents it ended up not restricting sources at all.
We can verify that by inspecting the llm output at the logs for step 2: `print(agent.logs[2]['llm_output'])`
```
Thought: I'll try to retrieve some documents related to LORA finetuning scripts from the entire knowledge base, without any source filtering.
Action:
{
"action": "retriever",
"action_input": {"query": "LORA finetuning script"}
}
```
### Using a simple multi-agent setup 🤝 for efficient web browsing
In this example, we want to build an agent and test it on the GAIA benchmark ([Mialon et al. 2023](https://huggingface.co/papers/2311.12983)). GAIA is an extremely difficult benchmark, with most questions requiring several steps of reasoning using different tools. A specifically difficult requirement is to have a powerful web browser, able to navigate to pages with specific constraints: discovering pages using the website’s inner navigation, selecting specific articles in time...
Web browsing requires diving deeper into subpages and scrolling through lots of text tokens that will not be necessary for the higher-level task-solving. We assign the web-browsing sub-tasks to a specialized web surfer agent. We provide it with some tools to browse the web and a specific prompt (check the repo to find specific implementations).
Defining these tools is outside the scope of this post: but you can check [the repository](https://github.com/aymeric-roucher/agent_reasoning_benchmark) to find specific implementations.
```python
from transformers.agents import ReactJsonAgent, HfEngine
WEB_TOOLS = [
SearchInformationTool(),
NavigationalSearchTool(),
VisitTool(),
DownloadTool(),
PageUpTool(),
PageDownTool(),
FinderTool(),
FindNextTool(),
]
websurfer_llm_engine = HfEngine(
model="CohereForAI/c4ai-command-r-plus"
) # We choose Command-R+ for its high context length
websurfer_agent = ReactJsonAgent(
tools=WEB_TOOLS,
llm_engine=websurfer_llm_engine,
)
```
To allow this agent to be called by a higher-level task solving agent, we can simply encapsulate it in another tool:
```python
class SearchTool(Tool):
name = "ask_search_agent"
description = "A search agent that will browse the internet to answer a question. Use it to gather informations, not for problem-solving."
inputs = {
"question": {
"description": "Your question, as a natural language sentence. You are talking to an agent, so provide them with as much context as possible.",
"type": "text",
}
}
output_type = "text"
def forward(self, question: str) -> str:
return websurfer_agent.run(question)
```
Then we initialize the task-solving agent with this search tool:
```python
from transformers.agents import ReactCodeAgent
llm_engine = HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct")
react_agent_hf = ReactCodeAgent(
tools=[SearchTool()],
llm_engine=llm_engine,
)
```
Let's run the agent with the following task:
> *Use density measures from the chemistry materials licensed by Marisa Alviar-Agnew & Henry Agnew under the CK-12 license in LibreText's Introductory Chemistry materials as compiled 08/21/2023.*
> *I have a gallon of honey and a gallon of mayonnaise at 25C. I remove one cup of honey at a time from the gallon of honey. How many times will I need to remove a cup to have the honey weigh less than the mayonaise? Assume the containers themselves weigh the same.*
>
```
Thought: I will use the 'ask_search_agent' tool to find the density of honey and mayonnaise at 25C.
==== Agent is executing the code below:
density_honey = ask_search_agent(question="What is the density of honey at 25C?")
print("Density of honey:", density_honey)
density_mayo = ask_search_agent(question="What is the density of mayonnaise at 25C?")
print("Density of mayo:", density_mayo)
===
Observation:
Density of honey: The density of honey is around 1.38-1.45kg/L at 20C. Although I couldn't find information specific to 25C, minor temperature differences are unlikely to affect the density that much, so it's likely to remain within this range.
Density of mayo: The density of mayonnaise at 25°C is 0.910 g/cm³.
===== New step =====
Thought: I will convert the density of mayonnaise from g/cm³ to kg/L and then calculate the initial weights of the honey and mayonnaise in a gallon. After that, I will calculate the weight of honey after removing one cup at a time until it weighs less than the mayonnaise.
==== Agent is executing the code below:
density_honey = 1.42 # taking the average of the range
density_mayo = 0.910 # converting g/cm³ to kg/L
density_mayo = density_mayo * 1000 / 1000 # conversion
gallon_to_liters = 3.785 # conversion factor
initial_honey_weight = density_honey * gallon_to_liters
initial_mayo_weight = density_mayo * gallon_to_liters
cup_to_liters = 0.236 # conversion factor
removed_honey_weight = cup_to_liters * density_honey
===
Observation:
===== New step =====
Thought: Now that I have the initial weights of honey and mayonnaise, I'll try to calculate the number of cups to remove from the honey to make it weigh less than the mayonnaise using a simple arithmetic operation.
==== Agent is executing the code below:
cups_removed = int((initial_honey_weight - initial_mayo_weight) / removed_honey_weight) + 1
print("Cups removed:", cups_removed)
final_answer(cups_removed)
===
>>> Final answer: 6
```
✅ And the answer is **correct**!
## Testing our agents
Let’s take our agent framework for a spin and benchmark different models with it!
All the code for the experiments below can be found [here](https://github.com/aymeric-roucher/agent_reasoning_benchmark).
### Benchmarking LLM engines
The `agents_reasoning_benchmark` is a small - but mighty- reasoning test for evaluating agent performance. This benchmark was already used and explained in more detail in [our earlier blog post](https://huggingface.co/blog/open-source-llms-as-agents).
The idea is that the choice of tools you use with your agents can radically alter performance for certain tasks. So this benchmark restricts the set of tools used to a calculator and a basic search tool. We picked questions from several datasets that could be solved using only these two tools:
- **30 questions from [HotpotQA](https://huggingface.co/datasets/hotpot_qa)** ([Yang et al., 2018](https://huggingface.co/papers/1809.09600)) to test search tool usage.
- **40 questions from [GSM8K](https://huggingface.co/datasets/gsm8k)** ([Cobbe et al., 2021](https://huggingface.co/papers/2110.14168)) to test calculator usage.
- **20 questions from [GAIA](https://huggingface.co/datasets/gaia-benchmark/GAIA)** ([Mialon et al., 2023](https://huggingface.co/papers/2311.12983)) to test the usage of both tools for solving difficult questions.
Here we try 3 different engines: [Mixtral-8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1), [Llama-3-70B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3-70B-Instruct), and [GPT-4 Turbo](https://platform.openai.com/docs/models).
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/aggregate_score.png" alt="benchmark of agent performances" width=90%>
</p>
The results are shown above - as the average of two complete runs for more precision. We also tested [Command-R+](https://huggingface.co/CohereForAI/c4ai-command-r-plus) and [Mixtral-8x22B](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1), but do not show them for clarity.
⇒ **Llama-3-70B-Instruct leads the Open-Source models: it is on par with GPT-4**, and it’s especially strong in a `ReactCodeAgent` thanks to Llama 3’s strong coding performance!
💡 It's interesting to compare JSON- and Code-based React agents: with less powerful LLM engines like Mixtral-8x7B, Code-based agents do not perform as well as JSON, since the LLM engine frequently fails to generate good code. But the Code version really shines with more powerful models as engines: in our experience, the Code version even outperforms the JSON with Llama-3-70B-Instruct. As a result, we use the Code version for our next challenge: testing on the complete GAIA benchmark.
### Climbing up the GAIA Leaderboard with a multi-modal agent
[GAIA](https://huggingface.co/datasets/gaia-benchmark/GAIA) ([Mialon et al., 2023](https://huggingface.co/papers/2311.12983)) is an extremely difficult benchmark: you can see in the `agent_reasoning_benchmark` above that models do not perform above 50% even though we cherry-picked tasks that could be solved with 2 basic tools.
Now we want to get a score on the complete set, we do not cherry-pick questions anymore. Thus we have to cover all modalities, which leads us to use these specific tools:
- `SearchTool`: the web browser defined above.
- `TextInspectorTool`: open documents as text files and return their content.
- `SpeechToTextTool`: transcribe audio files to text. We use the default tool based on [distil-whisper](https://huggingface.co/distil-whisper/distil-large-v3).
- `VisualQATool`: analyze images visually. For these we use the shiny new [Idefics2-8b-chatty](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty)!
We first initialize these toole (for more detail, inspect the code in the [repository](https://github.com/aymeric-roucher/agent_reasoning_benchmark)).
Then we initialize our agent:
```python
from transformers.agents import ReactCodeAgent, HfEngine
TASK_SOLVING_TOOLBOX = [
SearchTool(),
VisualQATool(),
SpeechToTextTool(),
TextInspectorTool(),
]
react_agent_hf = ReactCodeAgent(
tools=TASK_SOLVING_TOOLBOX,
llm_engine=HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct"),
memory_verbose=True,
)
```
And after some time needed to complete the 165 questions, we submit our result to the [GAIA Leaderboard](https://huggingface.co/spaces/gaia-benchmark/leaderboard), and… 🥁🥁🥁
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/agents/leaderboard.png" alt="GAIA leaderboard" width=90%>
</p>
⇒ Our agent ranks 4th: it beats many GPT-4-based agents, and is now the reigning contender for the Open-Source category!
## Conclusion
We will keep improving this package in the coming months. We have already identified several exciting paths in our development roadmap:
- More agent sharing options: for now you can push or load tools from the Hub, we will implement pushing/loading agents too.
- Better tools, especially for image processing.
- Long-term memory management.
- Multi-agent collaboration.
👉 **Go try out transformers agents!** We’re looking forward to receiving your feedback and your ideas.
Let’s fill the top of the leaderboard with more open-source models! 🚀 | 0 |
0 | hf_public_repos | hf_public_repos/blog/leaderboard-cot.md | ---
title: "Introducing the Open Chain of Thought Leaderboard"
thumbnail: /blog/assets/leaderboards-on-the-hub/thumbnail_cot.png
authors:
- user: ggbetz
guest: true
org: logikon
- user: scacean
guest: true
org: logikon
- user: clefourrier
- user: yakazimir
guest: true
org: allenai
---
# Introducing the Open Chain of Thought Leaderboard
[Chain-of-thought prompting](https://huggingface.co/docs/transformers/main/en/tasks/prompting#chain-of-thought) is emerging as a powerful and effective design pattern for LLM-based apps and agents. The basic idea of chain-of-thought prompting is to let a model generate a step-by-step solution (“reasoning trace”) before answering a question or taking a decision. With the Open CoT Leaderboard we’re tracking LLMs’ ability to generate effective chain-of-thought traces for challenging reasoning tasks.
Unlike most performance based leaderboards, we’re not scoring the absolute accuracy a model achieves on a given task, but the difference between the accuracy with and without chain-of-thought prompting:
```
accuracy gain Δ = accuracy with CoT – accuracy w/o CoT.
```
This allows us to truly inspect the impact that chain-of-thought has on model accuracy.
Note: without CoT prompting, we use the loglikelihood accuracy to score the model on multiple choice evaluation.
<script type="module" src="https://gradio.s3-us-west-2.amazonaws.com/4.4.0/gradio.js"> </script>
<gradio-app theme_mode="light" space="logikon/open_cot_leaderboard"></gradio-app>
## What’s the motivation behind such a leaderboard for chain-of-thought?
Chain-of-thought prompting is a universally applicable prompting strategy that may improve explainability and accuracy of LLM-based apps and agents (see, e.g., [this collection](https://github.com/logikon-ai/awesome-deliberative-prompting#readme) for recent research and implementations)). With frameworks like Langchain or LMQL, it’s straightforward to insert sophisticated reasoning chains in your apps. But even if you’ve never heard about chain-of-thought before, you may have noticed, while using a ChatBot, that it tends to proceed step by step before answering your query. So, a systematic, up-to-date comparison of LLMs’ ability to generate effective chain-of-thought traces may inform the decisions of builders and users when choosing a model.
Over time, static "accuracy-based" benchmarks risk becoming less informative: does a model score well because of its superior skill, because it has seen the correct answers during training, or because it has been developed in a competitive context that is governed by this very benchmark? These widely acknowledged issues are addressed by recent eval approaches such as ChatBot arenas, the use of LLMs as judges, or dynamic benchmarks with programmatically generated tasks. We hope the Open CoT Leaderboard contributes to these efforts, notably by being more robust to training data contamination: knowing the answer to a question doesn’t ensure that one can reason effectively about it.
## Which tasks are used?
The Open CoT Leaderboard evaluates LLMs’ ability to generate effective chain-of-thought reasoning traces for the following tasks:
- LogiQA (new translation of [original version](https://github.com/lgw863/LogiQA-dataset), and [version 2.0](https://github.com/csitfun/LogiQA2.0) with new examples)
- LSAT [dataset](https://arxiv.org/abs/2108.00648) (including subsets on [analytical reasoning](https://huggingface.co/datasets/hails/agieval-lsat-ar), [logical reasoning](https://huggingface.co/datasets/hails/agieval-lsat-lr), and [reading comprehension](https://huggingface.co/datasets/hails/agieval-lsat-rc))
Except for the original version of LogiQA, all these tasks are part of the [AGIEval benchmark](https://arxiv.org/abs/2304.06364), and have been re-published as [logikon-bench](https://huggingface.co/datasets/logikon/logikon-bench).
We’ve chosen these tasks because they
1. are generic, i.e. can be solved through reasoning and just require commonsense knowledge;
2. are still relatively difficult even for the most powerful LLMs (leaving enough room for improvement through chain-of-thought);
3. have been introduced as AI benchmarks before (in AGIEval) and are widely used (e.g., in the Nous benchmark suite).
All tasks are rendered as multiple-choice problems, with the answer options being enumerated in the prompt.
We use the following prompt template for assessing baseline and CoT accuracies – the reasoning traces (starting with `Reasoning`) are only added in the case “with CoT”:
```
Answer the following question about the given passage. Base your answer on the reasoning below.
Passage: <passage>
Question: <question>
A. <choice1>
B. <choice2>
…
Reasoning: <reasoning>
Answer:
```
## How are chain-of-thought traces generated?
[#cot-generation]
The literature on chain-of-thought prompting has exploded in the last year, and the wealth of prompting strategies for CoT (involving, e.g., decoding, prompt wording, prompt chains, decomposition, aggregation and revision of reasoning traces) has become [sheerly mindblowing](https://github.com/logikon-ai/awesome-deliberative-prompting/#readme).
To cope with this variety, the evaluation pipeline behind the Open CoT Leaderboard implements CoT generation through modular and easily extensible prompt chains. So far, we have implemented two fairly simple prompting strategies:
- Classic: Problem presentation followed by “Let’s think step by step.” instruction.
- Reflect: Problem presentation followed by instruction to reflect upon the problem in general terms before solving it step by step.
Let’s have a look at an example. The following problem is from the LSAT logical reasoning benchmark.
```
Editorial: The structure of the present school calendar was established to satisfy the requirements of early-twentieth-century agricultural life. In those days, farmers needed their children to have long breaks during which they could remain at home and help with the harvest. The contemporary school year is thus made up of periods of study interspersed with long breaks. But agricultural life no longer occupies most of our citizens, so we can now make changes that serve the interests of children. Therefore, long breaks should be removed from the school calendar.
Which one of the following is an assumption on which the editorial's argument depends?
A) A change in the present school calendar that shortened breaks would serve the interests of agricultural life.
B) Children of farmers need to continue observing a school calendar made up of periods of study interspersed with long breaks.
C) Long breaks in the school calendar should be replaced with breaks that are no longer than workers' average vacations.
D) A school calendar made up of periods of study without long breaks would serve the interests of children more than a school calendar with long breaks.
E) During long breaks children have a tendency to forget what they have learned.
```
Mixtral-8x7B-Instruct-v0.1 is doing fairly well in generating the following chain-of-thought traces:
<table>
<tr>
<th>With prompting strategy Classic</th>
<th>With prompting strategy Reflect</th>
</tr>
<tr>
<td>The editorial argues that the current school calendar, with long breaks, was designed to accommodate agricultural life but should now be changed because agricultural life no longer occupies most of our citizens. It concludes that long breaks should be removed from the school calendar.
The assumption that the argument depends on is the belief that removing long breaks from the school calendar would serve the interests of children more than the current calendar. This is not explicitly stated in the editorial, but it is the underlying assumption that justifies the proposed change.
Therefore, the correct answer is:
D) A school calendar made up of periods of study without long breaks would serve the interests of children more than a school calendar with long breaks.</td>
<td>The decision problem in abstract terms is to identify the assumption on which the editorial's argument depends.
Common mistakes for this kind of problem include:
1. Misinterpreting the argument's premises or conclusion.
2. Overlooking the specific requirement of identifying an assumption.
3. Confusing an assumption with a conclusion or a premise.
A plan for how to solve this problem could be:
1. Identify the premises and conclusion of the argument.
2. List possible assumptions that could bridge the gap between the premises and the conclusion.
3. Evaluate each assumption to see if it is necessary for the argument.
Solving the problem step by step, following the plan:
1. The premises of the argument are:
- The structure of the present school calendar was established to satisfy the requirements of early-twentieth-century agricultural life.
- Agricultural life no longer occupies most of our citizens.
2. The conclusion of the argument is:
- Long breaks should be removed from the school calendar.
3. Possible assumptions could be:
- A school calendar made up of periods of study without long breaks would serve the interests of children more than a school calendar with long breaks.
- Children of farmers no longer need to remain at home and help with the harvest during long breaks.
- A change in the present school calendar that shortened breaks would not serve the interests of agricultural life.
4. Evaluating each assumption:
- The argument does not depend on the assumption that children of farmers no longer need to remain at home and help with the harvest during long breaks, because the argument is about the interests of all children, not just those of farmers.
- The argument does not depend on the assumption that a change in the present school calendar that shortened breaks would not serve the interests of agricultural life, because the argument is about the interests of children, not agricultural life.
- The argument does depend on the assumption that a school calendar made up of periods of study without long breaks would serve the interests of children more than a school calendar with long breaks. This is the assumption that justifies the conclusion.
Therefore, the assumption on which the editorial's argument depends is D) A school calendar made up of periods of study without long breaks would serve the interests of children more than a school calendar with long breaks.</td>
</tr>
</table>
Each of our two prompting strategies—Classic and Reflect—is combined and run with multiple decoding parameters:
- Greedy.
- Beam search (n=2).
- Sampling (T=.3).
This gives us, in combination, six “CoT generation regimes.” When we evaluate a model, we let it generate, for every example in the test datasets, one chain-of-thought trace for each regime. The generated traces are then plugged into the prompt template shown above, which we use to score the models. We obtain, accordingly, six different numerical scores for accuracy with chain-of-thought (namely one for each regime). In the Open CoT Leaderboard, we report (for every model/task) the best marginal accuracy gain achieved under any regime.
## What are the main take-aways so far?
We’re gradually extending the Open CoT Leaderboard by evaluating more and more models, but current results (model count=30) already suggest some interesting insights.
- Mighty dwarfs: We have been very pleased to see that relatively small (7B) open LLMs are capable of effective, i.e. accuracy-improving, chain-of-thought reasoning, in some cases at a better rate than bigger model. 🎉 For example, a small model like Phi-2 benefits more than the Mixtral model from added CoT traces.
- Instruction- and chat-finetuning helps: Finetuned models score much better than their corresponding base models. More specifically, finetuning may improve both the baseline accuracy without CoT and the marginal accuracy gains achieved through CoT.
- Variable and ambiguous effects of CoT: Digging a bit deeper, we see that there is no single preferred or superior CoT generation [regime](#cot-generation). What works best for one model and one task might not work for another model, or another task. And sometimes CoT reduces accuracy rather than increasing it. We take this as a reminder that finding an implementation of CoT that is universally effective, reliable and robust remains a challenging problem.
## What are the next steps? – And how to contribute.
We’re planning to move ahead in different directions. And contributions to all these efforts are more than welcome.
First, we’d love to evaluate your models! You can 📬 submit any open LLMs for evaluation on the Open CoT Leaderboard space, using the Submission tab!
Then, we’d love some help on the following coding and data analysis tasks.
- **Carry out in-depth analysis of full evaluation results.**
For example, a qualitative analysis of the generated CoT traces to check whether they actually point to the correct answer choice. We’ve created a [notebook](https://github.com/logikon-ai/cot-eval/blob/main/notebooks/CoT_Leaderboard_Results_Exploration.ipynb) that shows how to access and explore the eval results and reasoning traces which back up the Open Cot Leaderboard. You can build on that and share your own analyses in the corresponding repo (or somewhere else, of course). Feel free to open an issue with suggestions or questions. In case you plan to use the data for research projects and want feedback, just drop a note.
- **Create Open CoT Dashboard.**
The Open CoT Leaderboard contends with ranking models according to marginal accuracy gains. It doesn’t display the baseline accuracies, the variance, the scores for different CoT generation [regimes](#cot-generation), properties of the generated reasoning traces (e.g., length), etc. We think it would be super informative to complement the leaderboard with a dashboard (e.g., as an extra tab or a separate HF space) that presents all this info and can be interactively explored by users. In case you’re interested in building such an Open CoT Dashboard (with or without us), just reach out.
- **More CoT chains.**
We’re pondering implementing further CoT generation regimes. Promising candidates are, for example, self-consistency, tree-of-thought, self-check, or debating. Want to help us with that? Get in touch! (🤫: Why not choose such a project for your master’s or bachelor’s thesis?)
- **More tasks and test datasets.**
The Open CoT Leaderboard is arguably built on a rather narrow set of benchmarks. Once we have free compute resources, we’d like to include further challenging reasoning tasks. We’d be happy to learn which tasks you’d like to see included in the Open CoT Leaderboard.
Here’s where we can exchange our ideas and collaborate:
- For non-technical suggestions and feedback, join the [discussion](https://huggingface.co/spaces/logikon/open_cot_leaderboard/discussions) at the leaderboard’s HF space.
- For technical feedback and questions, open an issue at our GitHub [repo](https://github.com/logikon-ai/cot-eval).
Looking forward to hearing from you!
| 1 |
0 | hf_public_repos | hf_public_repos/blog/accelerating-pytorch.md | ---
title: "Accelerating PyTorch distributed fine-tuning with Intel technologies"
thumbnail: /blog/assets/36_accelerating_pytorch/04_four_nodes.png
authors:
- user: juliensimon
---
# Accelerating PyTorch distributed fine-tuning with Intel technologies
For all their amazing performance, state of the art deep learning models often take a long time to train. In order to speed up training jobs, engineering teams rely on distributed training, a divide-and-conquer technique where clustered servers each keep a copy of the model, train it on a subset of the training set, and exchange results to converge to a final model.
Graphical Processing Units (GPUs) have long been the _de facto_ choice to train deep learning models. However, the rise of transfer learning is changing the game. Models are now rarely trained from scratch on humungous datasets. Instead, they are frequently fine-tuned on specific (and smaller) datasets, in order to build specialized models that are more accurate than the base model for particular tasks. As these training jobs are much shorter, using a CPU-based cluster can prove to be an interesting option that keeps both training time and cost under control.
### What this post is about
In this post, you will learn how to accelerate [PyTorch](https://pytorch.org) training jobs by distributing them on a cluster of Intel Xeon Scalable CPU servers, powered by the Ice Lake architecture and running performance-optimized software libraries. We will build the cluster from scratch using virtual machines, and you should be able to easily replicate the demo on your own infrastructure, either in the cloud or on premise.
Running a text classification job, we will fine-tune a [BERT](https://huggingface.co/bert-base-cased) model on the [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) dataset (one of the tasks included in the [GLUE](https://gluebenchmark.com/) benchmark). The MRPC dataset contains 5,800 sentence pairs extracted from news sources, with a label telling us whether the two sentences in each pair are semantically equivalent. We picked this dataset for its reasonable training time, and trying other GLUE tasks is just a parameter away.
Once the cluster is up and running, we will run a baseline job on a single server. Then, we will scale it to 2 servers and 4 servers and measure the speed-up.
Along the way, we will cover the following topics:
* Listing the required infrastructure and software building blocks,
* Setting up our cluster,
* Installing dependencies,
* Running a single-node job,
* Running a distributed job.
Let's get to work!
### Using Intel servers
For best performance, we will use Intel servers based on the Ice Lake architecture, which supports hardware features such as Intel AVX-512 and Intel Vector Neural Network Instructions (VNNI). These features accelerate operations typically found in deep learning training and inference. You can learn more about them in this [presentation](https://www.intel.com/content/dam/www/public/us/en/documents/product-overviews/dl-boost-product-overview.pdf) (PDF).
All three major cloud providers offer virtual machines powered by Intel Ice Lake CPUs:
- Amazon Web Services: Amazon EC2 [M6i](https://aws.amazon.com/blogs/aws/new-amazon-ec2-m6i-instances-powered-by-the-latest-generation-intel-xeon-scalable-processors/)
and [C6i](https://aws.amazon.com/blogs/aws/new-amazon-ec2-c6i-instances-powered-by-the-latest-generation-intel-xeon-scalable-processors/) instances.
- Azure: [Dv5/Dsv5-series](https://docs.microsoft.com/en-us/azure/virtual-machines/dv5-dsv5-series), [Ddv5/Ddsv5-series](https://docs.microsoft.com/en-us/azure/virtual-machines/ddv5-ddsv5-series) and [Edv5/Edsv5-series](https://docs.microsoft.com/en-us/azure/virtual-machines/edv5-edsv5-series) virtual machines.
- Google Cloud Platform: [N2](https://cloud.google.com/blog/products/compute/compute-engine-n2-vms-now-available-with-intel-ice-lake) Compute Engine virtual machines.
Of course, you can also use your own servers. If they are based on the Cascade Lake architecture (Ice Lake's predecessor), they're good to go as Cascade Lake also includes AVX-512 and VNNI.
### Using Intel performance libraries
To leverage AVX-512 and VNNI in PyTorch, Intel has designed the [Intel extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch). This software library provides out of the box speedup for training and inference, so we should definitely install it.
When it comes to distributed training, the main performance bottleneck is often networking. Indeed, the different nodes in the cluster need to periodically exchange model state information to stay in sync. As transformers are large models with billions of parameters (sometimes much more), the volume of information is significant, and things only get worse as the number of nodes increase. Thus, it's important to use a communication library optimized for deep learning.
In fact, PyTorch includes the [```torch.distributed```](https://pytorch.org/tutorials/intermediate/dist_tuto.html) package, which supports different communication backends. Here, we'll use the Intel oneAPI Collective Communications Library [(oneCCL)](https://github.com/oneapi-src/oneCCL), an efficient implementation of communication patterns used in deep learning ([all-reduce](https://en.wikipedia.org/wiki/Collective_operation), etc.). You can learn about the performance of oneCCL versus other backends in this PyTorch [blog post](https://pytorch.medium.com/optimizing-dlrm-by-using-pytorch-with-oneccl-backend-9f85b8ef6929).
Now that we're clear on building blocks, let's talk about the overall setup of our training cluster.
### Setting up our cluster
In this demo, I'm using Amazon EC2 instances running Amazon Linux 2 (c6i.16xlarge, 64 vCPUs, 128GB RAM, 25Gbit/s networking). Setup will be different in other environments, but steps should be very similar.
Please keep in mind that you will need 4 identical instances, so you may want to plan for some sort of automation to avoid running the same setup 4 times. Here, I will set up one instance manually, create a new Amazon Machine Image [(AMI)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) from this instance, and use this AMI to launch three identical instances.
From a networking perspective, we will need the following setup:
* Open port 22 for ```ssh``` access on all instances for setup and debugging.
* Configure [password-less](https://www.redhat.com/sysadmin/passwordless-ssh) ```ssh``` between the master instance (the one you'll launch training from) and all other instances (__master included__).
* Open all TCP ports on all instances for oneCCL communication inside the cluster. __Please make sure NOT to open these ports to the external world__. AWS provides a convenient way to do this by only allowing connections from instances running a particular [security group](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_SecurityGroups.html). Here's how my setup looks.
<kbd>
<img src="assets/36_accelerating_pytorch/01_security_group.png">
</kbd>
Now, let's provision the first instance manually. I first create the instance itself, attach the security group above, and add 128GB of storage. To optimize costs, I have launched it as a [spot instance](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html).
Once the instance is up, I connect to it with ```ssh``` in order to install dependencies.
### Installing dependencies
Here are the steps we will follow:
* Install Intel toolkits,
* Install the Anaconda distribution,
* Create a new ```conda``` environment,
* Install PyTorch and the Intel extension for PyTorch,
* Compile and install oneCCL,
* Install the ```transformers``` library.
It looks like a lot, but there's nothing complicated. Here we go!
__Installing Intel toolkits__
First, we download and install the Intel [OneAPI base toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit-download.html?operatingsystem=linux&distributions=webdownload&options=offline) as well as the [AI toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/ai-analytics-toolkit-download.html?operatingsystem=linux&distributions=webdownload&options=offline). You can learn about them on the Intel [website](https://www.intel.com/content/www/us/en/developer/tools/oneapi/toolkits.html#gs.gmojrp).
```
wget https://registrationcenter-download.intel.com/akdlm/irc_nas/18236/l_BaseKit_p_2021.4.0.3422_offline.sh
sudo bash l_BaseKit_p_2021.4.0.3422_offline.sh
wget https://registrationcenter-download.intel.com/akdlm/irc_nas/18235/l_AIKit_p_2021.4.0.1460_offline.sh
sudo bash l_AIKit_p_2021.4.0.1460_offline.sh
```
__Installing Anaconda__
Then, we [download](https://www.anaconda.com/products/individual) and install the Anaconda distribution.
```
wget https://repo.anaconda.com/archive/Anaconda3-2021.05-Linux-x86_64.sh
sh Anaconda3-2021.05-Linux-x86_64.sh
```
__Creating a new conda environment__
We log out and log in again to refresh paths. Then, we create a new ```conda``` environment to keep things neat and tidy.
```
yes | conda create -n transformer python=3.7.9 -c anaconda
eval "$(conda shell.bash hook)"
conda activate transformer
yes | conda install pip cmake
```
__Installing PyTorch and the Intel extension for PyTorch__
Next, we install PyTorch 1.9 and the Intel extension toolkit. __Versions must match__.
```
yes | conda install pytorch==1.9.0 cpuonly -c pytorch
pip install torch_ipex==1.9.0 -f https://software.intel.com/ipex-whl-stable
```
__Compiling and installing oneCCL__
Then, we install some native dependencies required to compile oneCCL.
```
sudo yum -y update
sudo yum install -y git cmake3 gcc gcc-c++
```
Next, we clone the oneCCL repository, build the library and install it. __Again, versions must match__.
```
source /opt/intel/oneapi/mkl/latest/env/vars.sh
git clone https://github.com/intel/torch-ccl.git
cd torch-ccl
git checkout ccl_torch1.9
git submodule sync
git submodule update --init --recursive
python setup.py install
cd ..
```
__Installing the transformers library__
Next, we install the ```transformers``` library and dependencies required to run GLUE tasks.
```
pip install transformers datasets
yes | conda install scipy scikit-learn
```
Finally, we clone a fork of the ```transformers```repository containing the example we're going to run.
```
git clone https://github.com/kding1/transformers.git
cd transformers
git checkout dist-sigopt
```
We're done! Let's run a single-node job.
### Launching a single-node job
To get a baseline, let's launch a single-node job running the ```run_glue.py``` script in ```transformers/examples/pytorch/text-classification```. This should work on any of the instances, and it's a good sanity check before proceeding to distributed training.
```
python run_glue.py \
--model_name_or_path bert-base-cased --task_name mrpc \
--do_train --do_eval --max_seq_length 128 \
--per_device_train_batch_size 32 --learning_rate 2e-5 --num_train_epochs 3 \
--output_dir /tmp/mrpc/ --overwrite_output_dir True
```
<kbd>
<img src="assets/36_accelerating_pytorch/02_single_node.png">
</kbd>
This job takes __7 minutes and 46 seconds__. Now, let's set up distributed jobs with oneCCL and speed things up!
### Setting up a distributed job with oneCCL
Three steps are required to run a distributed training job:
* List the nodes of the training cluster,
* Define environment variables,
* Modify the training script.
__Listing the nodes of the training cluster__
On the master instance, in ```transformers/examples/pytorch/text-classification```, we create a text file named ```hostfile```. This file stores the names of the nodes in the cluster (IP addresses would work too). The first line should point to the master instance.
Here's my file:
```
ip-172-31-28-17.ec2.internal
ip-172-31-30-87.ec2.internal
ip-172-31-29-11.ec2.internal
ip-172-31-20-77.ec2.internal
```
__Defining environment variables__
Next, we need to set some environment variables on the master node, most notably its IP address. You can find more information on oneCCL variables in the [documentation](https://oneapi-src.github.io/oneCCL/env-variables.html).
```
for nic in eth0 eib0 hib0 enp94s0f0; do
master_addr=$(ifconfig $nic 2>/dev/null | grep netmask | awk '{print $2}'| cut -f2 -d:)
if [ "$master_addr" ]; then
break
fi
done
export MASTER_ADDR=$master_addr
source /home/ec2-user/anaconda3/envs/transformer/lib/python3.7/site-packages/torch_ccl-1.3.0+43f48a1-py3.7-linux-x86_64.egg/torch_ccl/env/setvars.sh
export LD_LIBRARY_PATH=/home/ec2-user/anaconda3/envs/transformer/lib/python3.7/site-packages/torch_ccl-1.3.0+43f48a1-py3.7-linux-x86_64.egg/:$LD_LIBRARY_PATH
export LD_PRELOAD="${CONDA_PREFIX}/lib/libtcmalloc.so:${CONDA_PREFIX}/lib/libiomp5.so"
export CCL_WORKER_COUNT=4
export CCL_WORKER_AFFINITY="0,1,2,3,32,33,34,35"
export CCL_ATL_TRANSPORT=ofi
export ATL_PROGRESS_MODE=0
```
__Modifying the training script__
The following changes have already been applied to our training script (```run_glue.py```) in order to enable distributed training. You would need to apply similar changes when using your own training code.
* Import the ```torch_ccl```package.
* Receive the address of the master node and the local rank of the node in the cluster.
```
+import torch_ccl
+
import datasets
import numpy as np
from datasets import load_dataset, load_metric
@@ -47,7 +49,7 @@ from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
-check_min_version("4.13.0.dev0")
+# check_min_version("4.13.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
@@ -191,6 +193,17 @@ def main():
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
+ # add local rank for cpu-dist
+ sys.argv.append("--local_rank")
+ sys.argv.append(str(os.environ.get("PMI_RANK", -1)))
+
+ # ccl specific environment variables
+ if "ccl" in sys.argv:
+ os.environ["MASTER_ADDR"] = os.environ.get("MASTER_ADDR", "127.0.0.1")
+ os.environ["MASTER_PORT"] = "29500"
+ os.environ["RANK"] = str(os.environ.get("PMI_RANK", -1))
+ os.environ["WORLD_SIZE"] = str(os.environ.get("PMI_SIZE", -1))
+
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
```
Setup is now complete. Let's scale our training job to 2 nodes and 4 nodes.
### Running a distributed job with oneCCL
On the __master node__, I use ```mpirun```to launch a 2-node job: ```-np``` (number of processes) is set to 2 and ```-ppn``` (process per node) is set to 1. Hence, the first two nodes in ```hostfile``` will be selected.
```
mpirun -f hostfile -np 2 -ppn 1 -genv I_MPI_PIN_DOMAIN=[0xfffffff0] \
-genv OMP_NUM_THREADS=28 python run_glue.py \
--model_name_or_path distilbert-base-uncased --task_name mrpc \
--do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 32 \
--learning_rate 2e-5 --num_train_epochs 3 --output_dir /tmp/mrpc/ \
--overwrite_output_dir True --xpu_backend ccl --no_cuda True
```
Within seconds, a job starts on the first two nodes. The job completes in __4 minutes and 39 seconds__, a __1.7x__ speedup.
<kbd>
<img src="assets/36_accelerating_pytorch/03_two_nodes.png">
</kbd>
Setting ```-np``` to 4 and launching a new job, I now see one process running on each node of the cluster.
<kbd>
<img src="assets/36_accelerating_pytorch/04_four_nodes.png">
</kbd>
Training completes in __2 minutes and 36 seconds__, a __3x__ speedup.
One last thing. Changing ```--task_name``` to ```qqp```, I also ran the Quora Question Pairs GLUE task, which is based on a much larger [dataset](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) (over 400,000 training samples). The fine-tuning times were:
* Single-node: 11 hours 22 minutes,
* 2 nodes: 6 hours and 38 minutes (1.71x),
* 4 nodes: 3 hours and 51 minutes (2.95x).
It looks like the speedup is pretty consistent. Feel free to keep experimenting with different learning rates, batch sizes and oneCCL settings. I'm sure you can go even faster!
### Conclusion
In this post, you've learned how to build a distributed training cluster based on Intel CPUs and performance libraries, and how to use this cluster to speed up fine-tuning jobs. Indeed, transfer learning is putting CPU training back into the game, and you should definitely consider it when designing and building your next deep learning workflows.
Thanks for reading this long post. I hope you found it informative. Feedback and questions are welcome at [email protected]_. Until next time, keep learning!
Julien
| 2 |
0 | hf_public_repos | hf_public_repos/blog/intel-sapphire-rapids-inference.md | ---
title: "Accelerating PyTorch Transformers with Intel Sapphire Rapids - part 2"
thumbnail: /blog/assets/129_intel_sapphire_rapids_inference/01.png
authors:
- user: juliensimon
---
# Accelerating PyTorch Transformers with Intel Sapphire Rapids, part 2
In a [recent post](https://huggingface.co/blog/intel-sapphire-rapids), we introduced you to the fourth generation of Intel Xeon CPUs, code-named [Sapphire Rapids](https://en.wikipedia.org/wiki/Sapphire_Rapids), and its new Advanced Matrix Extensions ([AMX](https://en.wikipedia.org/wiki/Advanced_Matrix_Extensions)) instruction set. Combining a cluster of Sapphire Rapids servers running on Amazon EC2 and Intel libraries like the [Intel Extension for PyTorch](https://github.com/intel/intel-extension-for-pytorch), we showed you how to efficiently run distributed training at scale, achieving an 8-fold speedup compared to the previous Xeon generation (Ice Lake) with near-linear scaling.
In this post, we're going to focus on inference. Working with popular HuggingFace transformers implemented with PyTorch, we'll first measure their performance on an Ice Lake server for short and long NLP token sequences. Then, we'll do the same with a Sapphire Rapids server and the latest version of Hugging Face [Optimum Intel](https://github.com/huggingface/optimum-intel), an open-source library dedicated to hardware acceleration for Intel platforms.
Let's get started!
## Why You Should Consider CPU-based Inference
There are several factors to consider when deciding whether to run deep learning inference on a CPU or GPU. The most important one is certainly the size of the model. In general, larger models may benefit more from the additional computational power provided by a GPU, while smaller models can run efficiently on a CPU.
Another factor to consider is the level of parallelism in the model and the inference task. GPUs are designed to excel at massively parallel processing, so they may be more efficient for tasks that can be parallelized effectively. On the other hand, if the model or inference task does not have a very high level of parallelism, a CPU may be a more effective choice.
Cost is also an important factor to consider. GPUs can be expensive, and using a CPU may be a more cost-effective option, particularly if your business use case doesn't require extremely low latency. In addition, if you need the ability to easily scale up or down the number of inference workers, or if you need to be able to run inference on a wide variety of hardware, using CPUs may be a more flexible option.
Now, let's set up our test servers.
## Setting up our Test Servers
Just like in the previous post, we're going to use Amazon EC2 instances:
* a `c6i.16xlarge` instance, based on the Ice Lake architecture,
* a `r7iz.16xlarge-metal` instance, based on the Sapphire Rapids architecture. You can read more about the new r7iz family on the [AWS website](https://aws.amazon.com/ec2/instance-types/r7iz/).
Both instances have 32 physical cores (thus, 64 vCPUs). We will set them up in the same way:
* Ubuntu 22.04 with Linux 5.15.0 (`ami-0574da719dca65348`),
* PyTorch 1.13 with Intel Extension for PyTorch 1.13,
* Transformers 4.25.1.
The only difference will be the addition of the Optimum Intel Library on the r7iz instance.
Here are the setup steps. As usual, we recommend using a virtual environment to keep things nice and tidy.
```
sudo apt-get update
# Add libtcmalloc for extra performance
sudo apt install libgoogle-perftools-dev -y
export LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libtcmalloc.so"
sudo apt-get install python3-pip -y
pip install pip --upgrade
export PATH=/home/ubuntu/.local/bin:$PATH
pip install virtualenv
virtualenv inference_env
source inference_env/bin/activate
pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/cpu
pip3 install intel_extension_for_pytorch==1.13.0 -f https://developer.intel.com/ipex-whl-stable-cpu
pip3 install transformers
# Only needed on the r7iz instance
pip3 install optimum[intel]
```
Once we've completed these steps on the two instances, we can start running our tests.
## Benchmarking Popular NLP models
In this example, we're going to benchmark several NLP models on a text classification task: [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased), [bert-base-uncased](https://huggingface.co/bert-base-uncased) and [roberta-base](https://huggingface.co/roberta-base). You can find the [full script](https://gist.github.com/juliensimon/7ae1c8d12e8a27516e1392a3c73ac1cc) on Github. Feel free to try it with your models!
```
models = ["distilbert-base-uncased", "bert-base-uncased", "roberta-base"]
```
Using both 16-token and 128-token sentences, we will measure mean and p99 prediction latency for single inference and batch inference. This should give us a decent view of the speedup we can expect in real-life scenarios.
```
sentence_short = "This is a really nice pair of shoes, I am completely satisfied with my purchase"
sentence_short_array = [sentence_short] * 8
sentence_long = "These Adidas Lite Racer shoes hit a nice sweet spot for comfort shoes. Despite being a little snug in the toe box, these are very comfortable to wear and provide nice support while wearing. I would stop short of saying they are good running shoes or cross-trainers because they simply lack the ankle and arch support most would desire in those type of shoes and the treads wear fairly quickly, but they are definitely comfortable. I actually walked around Disney World all day in these without issue if that is any reference. Bottom line, I use these as the shoes they are best; versatile, inexpensive, and comfortable, without expecting the performance of a high-end athletic sneaker or expecting the comfort of my favorite pair of slippers."
sentence_long_array = [sentence_long] * 8
```
The benchmarking function is very simple. After a few warmup iterations, we run 1,000 predictions with the pipeline API, store the prediction times, and compute both their mean and p99 values.
```
import time
import numpy as np
def benchmark(pipeline, data, iterations=1000):
# Warmup
for i in range(100):
result = pipeline(data)
times = []
for i in range(iterations):
tick = time.time()
result = pipeline(data)
tock = time.time()
times.append(tock - tick)
return "{:.2f}".format(np.mean(times) * 1000), "{:.2f}".format(
np.percentile(times, 99) * 1000
)
```
On the c6i (Ice Lake) instance, we only use a vanilla Transformers pipeline.
```
from transformers import pipeline
for model in models:
print(f"Benchmarking {model}")
pipe = pipeline("sentiment-analysis", model=model)
result = benchmark(pipe, sentence_short)
print(f"Transformers pipeline, short sentence: {result}")
result = benchmark(pipe, sentence_long)
print(f"Transformers pipeline, long sentence: {result}")
result = benchmark(pipe, sentence_short_array)
print(f"Transformers pipeline, short sentence array: {result}")
result = benchmark(pipe, sentence_long_array)
print(f"Transformers pipeline, long sentence array: {result}")
```
On the r7iz (Sapphire Rapids) instance, we use both a vanilla pipeline and an Optimum pipeline. In the Optimum pipeline, we enable `bfloat16` mode to leverage the AMX instructions. We also set `jit` to `True` to further optimize the model with just-in-time compilation.
```
import torch
from optimum.intel import inference_mode
with inference_mode(pipe, dtype=torch.bfloat16, jit=True) as opt_pipe:
result = benchmark(opt_pipe, sentence_short)
print(f"Optimum pipeline, short sentence: {result}")
result = benchmark(opt_pipe, sentence_long)
print(f"Optimum pipeline, long sentence: {result}")
result = benchmark(opt_pipe, sentence_short_array)
print(f"Optimum pipeline, short sentence array: {result}")
result = benchmark(opt_pipe, sentence_long_array)
print(f"Optimum pipeline, long sentence array: {result}")
```
For the sake of brevity, we'll just look at the p99 results for [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased). All times are in milliseconds. You'll find full results at the end of the post.
<kbd>
<img src="assets/129_intel_sapphire_rapids_inference/01.png">
</kbd>
As you can see in the graph above, single predictions run **60-65%** faster compared to the previous generation of Xeon CPUs. In other words, thanks to the combination of Intel Sapphire Rapids and Hugging Face Optimum, you can accelerate your predictions 3x with only tiny changes to your code.
This lets you achieve reach **single-digit prediction latency** even with long text sequences, which was only possible with GPUs so far.
## Conclusion
The fourth generation of Intel Xeon CPUs delivers excellent inference performance, especially when combined with Hugging Face Optimum. This is yet another step on the way to making Deep Learning more accessible and more cost-effective, and we're looking forward to continuing this work with our friends at Intel.
Here are some additional resources to help you get started:
* [Intel IPEX](https://github.com/intel/intel-extension-for-pytorch) on GitHub
* [Hugging Face Optimum](https://github.com/huggingface/optimum) on GitHub
If you have questions or feedback, we'd love to read them on the [Hugging Face forum](https://discuss.huggingface.co/).
Thanks for reading!
## Appendix: full results
<kbd>
<img src="assets/129_intel_sapphire_rapids_inference/02.png">
</kbd>
*Ubuntu 22.04 with libtcmalloc, Linux 5.15.0 patched for Intel AMX support, PyTorch 1.13 with Intel Extension for PyTorch, Transformers 4.25.1, Optimum 1.6.1, Optimum Intel 1.7.0.dev0*
| 3 |
0 | hf_public_repos | hf_public_repos/blog/hugs.md | ---
title: "Introducing HUGS - Scale your AI with Open Models"
thumbnail: /blog/assets/hugs/thumbnail.jpg
authors:
- user: philschmid
- user: jeffboudier
- user: alvarobartt
- user: pagezyhf
- user: Violette
---
Today, we are thrilled to announce the launch of **Hugging Face Generative AI Services a.k.a. HUGS**: optimized, zero-configuration inference microservices designed to simplify and accelerate the development of AI applications with open models. Built on open-source Hugging Face technologies such as Text Generation Inference and Transformers, HUGS provides the best solution to efficiently build and scale Generative AI Applications in your own infrastructure. HUGS is optimized to run open models on a variety of hardware accelerators, including NVIDIA GPUs, AMD GPUs, and soon AWS Inferentia and Google TPUs.

## Zero-Configuration Optimized Inference for Open Models
HUGS simplifies the optimized deployment of open models in your own infrastructure and on a wide variety of hardware. One key challenge developers and organizations face is the engineering complexity of optimizing inference workloads for LLMs on a particular GPU or AI accelerator. With HUGS, we enable maximum throughput deployments for the most popular open LLMs with zero configuration required. Each deployment configuration offered by HUGS is fully tested and maintained to work out of the box.
HUGS model deployments provide an OpenAI compatible API for a drop-in replacement of existing Generative AI applications built on top of model provider APIs. Just point your code to the HUGS deployment to power your applications with open models hosted in your own infrastructure.
## Why HUGS?
HUGS offers an easy way to build AI applications with open models hosted in your own infrastructure, with the following benefits:
* **In YOUR infrastructure**: Deploy open models within your own secure environment. Keep your data and models off the Internet!
* **Zero-configuration Deployment**: HUGS reduces deployment time from weeks to minutes with zero-configuration setup, automatically optimizing the model and serving configuration for your NVIDIA, AMD GPU or AI accelerator.
* **Hardware-Optimized Inference**: Built on Hugging Face's Text Generation Inference (TGI), HUGS is optimized for peak performance across different hardware setups.
* **Hardware Flexibility**: Run HUGS on a variety of accelerators, including NVIDIA GPUs, AMD GPUs, with support for AWS Inferentia and Google TPUs coming soon.
* **Model Flexibility**: HUGS is compatible with a wide selection of open-source models, ensuring flexibility and choice for your AI applications.
* **Industry Standard APIs**: Deploy HUGS easily using Kubernetes with endpoints compatible with the OpenAI API, minimizing code changes.
* **Enterprise Distribution:** HUGS is an enterprise distribution of Hugging Face open source technologies, offering long-term support, rigorous testing, and SOC2 compliance.
* **Enterprise Compliance**: Minimizes compliance risks by including necessary licenses and terms of service.
**We provided early access to HUGS to select Enterprise Hub customers:**
> HUGS is a huge timesaver to deploy locally ready-to-work models with good performances \- before HUGS it would take us a week, now we can be done in less than 1 hour. For customers with sovereign AI requirements it's a game changer! - [Henri Jouhaud](https://huggingface.co/henrij), CTO at [Polyconseil](https://huggingface.co/polyconseil)
> We tried HUGS to deploy Gemma 2 on GCP using a L4 GPU \- we didn't have to fiddle with libraries, versions and parameters, it just worked out of the box. HUGS gives us confidence we can scale our internal usage of open models! - [Ghislain Putois](https://huggingface.co/ghislain-putois), Research Engineer at [Orange](https://huggingface.co/Orange)
## How it Works
Using HUGS is straightforward. Here's how you can get started:
*Note: You will need access to the appropriate subscription or marketplace offering depending on your chosen deployment method.*
### Where to find HUGS
HUGS is available through several channels:
1. **Cloud Service Provider (CSP) Marketplaces**: You can find and deploy HUGS on [Amazon Web Services (AWS)](https://aws.amazon.com/marketplace/pp/prodview-bqy5zfvz3wox6) and [Google Cloud Platform (GCP)](https://console.cloud.google.com/marketplace/product/huggingface-public/hugs). [Microsoft Azure](https://huggingface.co/docs/hugs/how-to/cloud/azure) support will come soon.
2. **DigitalOcean**: HUGS is natively available within [DigitalOcean as a new 1-Click Models service](http://digitalocean.com/blog/one-click-models-on-do-powered-by-huggingface), powered by Hugging Face HUGS and GPU Droplets.
3. **Enterprise Hub**: If your organization is upgraded to Enterprise Hub, [contact our Sales team](https://huggingface.co/contact/sales?from=hugs) to get access to HUGS.
For specific deployment instructions for each platform, please refer to the relevant documentation linked above.
### Pricing
HUGS offers on-demand pricing based on the uptime of each container, except for deployments on DigitalOcean.
* **AWS Marketplace and Google Cloud Platform Marketplace:** $1 per hour per container, no minimum fee (compute usage billed separately by CSP). On AWS you have 5 day free trial period for you to test HUGS for free.
* **DigitalOcean:** 1-Click Models powered by Hugging Face HUGS are available at no additional cost on DigitalOcean - regular GPU Droplets compute costs apply.
* **Enterprise Hub:** We offer custom HUGS access to Enterprise Hub organizations. Please [contact](https://huggingface.co/contact/sales?from=hugs) our Sales team to learn more.
### Running Inference
HUGS is based on Text Generation Inference (TGI), offering a seamless inference experience. For detailed instructions and examples, refer to the [Run Inference on HUGS](https://huggingface.co/docs/hugs/guides/inference) guide. HUGS leverages the OpenAI-compatible Messages API, allowing you to use familiar tools and libraries like cURL, the `huggingface_hub` SDK, and the `openai` SDK for sending requests.
```py
from huggingface_hub import InferenceClient
ENDPOINT_URL="REPLACE" # replace with your deployed url or IP
client = InferenceClient(base_url=ENDPOINT_URL, api_key="-")
chat_completion = client.chat.completions.create(
messages=[
{"role":"user","content":"What is Deep Learning?"},
],
temperature=0.7,
top_p=0.95,
max_tokens=128,
)
```
## Supported Models and Hardware
HUGS supports a growing ecosystem of open models and hardware platforms. Refer to our [Supported Models](https://huggingface.co/docs/hugs/models) and [Supported Hardware](https://huggingface.co/docs/hugs/hardware) pages for the most up-to-date information.
We launch today with 13 popular open LLMs:
* [meta-llama/Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct)
* [meta-llama/Llama-3.1-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct)
* [meta-llama/Llama-3.1-405B-Instruct-FP8](https://huggingface.co/meta-llama/Llama-3.1-405B-Instruct-FP8)
* [NousResearch/Hermes-3-Llama-3.1-8B](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-8B)
* [NousResearch/Hermes-3-Llama-3.1-70B](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-70B)
* [NousResearch/Hermes-3-Llama-3.1-405B-FP8](https://huggingface.co/NousResearch/Hermes-3-Llama-3.1-405B-FP8)
* [NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO](https://huggingface.co/NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO)
* [mistralai/Mixtral-8x7B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x7B-Instruct-v0.1)
* [mistralai/Mistral-7B-Instruct-v0.3](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3)
* [mistralai/Mixtral-8x22B-Instruct-v0.1](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1)
* [google/gemma-2-27b-it](https://huggingface.co/google/gemma-2-27b-it)
* [google/gemma-2-9b-it](https://huggingface.co/google/gemma-2-9b-it)
* [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct)
For a detailed view of supported Models x Hardware, check out the [documentation](https://huggingface.co/docs/hugs/models).
## Get Started with HUGS Today
HUGS makes it easy to harness the power of open models, with zero-configuration optimized inference in your own infra. With HUGS, you can take control of your AI applications and easily transition proof of concept applications built with closed models to open models you host yourself.
Get started today and deploy HUGS on [AWS](https://aws.amazon.com/marketplace/pp/prodview-bqy5zfvz3wox6), [Google Cloud](https://console.cloud.google.com/marketplace/product/huggingface-public/hugs) or [DigitalOcean](https://www.digitalocean.com/products/ai-ml/1-click-models)!
| 4 |
0 | hf_public_repos | hf_public_repos/blog/inferentia-llama2.md | ---
title: "Make your llama generation time fly with AWS Inferentia2"
thumbnail: /blog/assets/inferentia-llama2/thumbnail.png
authors:
- user: dacorvo
---
**Update (02/2024): Performance has improved even more! Check our [updated benchmarks](https://huggingface.co/docs/optimum-neuron/benchmarks/inferentia-llama2).**
# Make your llama generation time fly with AWS Inferentia2
In a [previous post on the Hugging Face blog](https://huggingface.co/blog/accelerate-transformers-with-inferentia2), we introduced [AWS Inferentia2](https://aws.amazon.com/ec2/instance-types/inf2/), the second-generation AWS Inferentia accelerator, and explained how you could use [optimum-neuron](https://huggingface.co/docs/optimum-neuron/index) to quickly deploy Hugging Face models for standard text and vision tasks on AWS Inferencia 2 instances.
In a further step of integration with the [AWS Neuron SDK](https://github.com/aws-neuron/aws-neuron-sdk), it is now possible to use 🤗 [optimum-neuron](https://huggingface.co/docs/optimum-neuron/index) to deploy LLM models for text generation on AWS Inferentia2.
And what better model could we choose for that demonstration than [Llama 2](https://huggingface.co/meta-llama/Llama-2-13b-hf), one of the most popular models on the [Hugging Face hub](https://huggingface.co/models).
## Setup 🤗 optimum-neuron on your Inferentia2 instance
Our recommendation is to use the [Hugging Face Neuron Deep Learning AMI](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) (DLAMI). The DLAMI comes with all required libraries pre-packaged for you, including the Optimum Neuron, Neuron Drivers, Transformers, Datasets, and Accelerate.
Alternatively, you can use the [Hugging Face Neuron SDK DLC](https://github.com/aws/deep-learning-containers/releases?q=hf&expanded=true) to deploy on Amazon SageMaker.
*Note: stay tuned for an upcoming post dedicated to SageMaker deployment.*
Finally, these components can also be installed manually on a fresh Inferentia2 instance following the `optimum-neuron` [installation instructions](https://huggingface.co/docs/optimum-neuron/installation).
## Export the Llama 2 model to Neuron
As explained in the [optimum-neuron documentation](https://huggingface.co/docs/optimum-neuron/guides/export_model#why-compile-to-neuron-model), models need to be compiled and exported to a serialized format before running them on Neuron devices.
Fortunately, 🤗 `optimum-neuron` offers a [very simple API](https://huggingface.co/docs/optimum-neuron/guides/models#configuring-the-export-of-a-generative-model) to export standard 🤗 [transformers models](https://huggingface.co/docs/transformers/index) to the Neuron format.
```
>>> from optimum.neuron import NeuronModelForCausalLM
>>> compiler_args = {"num_cores": 24, "auto_cast_type": 'fp16'}
>>> input_shapes = {"batch_size": 1, "sequence_length": 2048}
>>> model = NeuronModelForCausalLM.from_pretrained(
"meta-llama/Llama-2-7b-hf",
export=True,
**compiler_args,
**input_shapes)
```
This deserves a little explanation:
- using `compiler_args`, we specify on how many cores we want the model to be deployed (each neuron device has two cores), and with which precision (here `float16`),
- using `input_shape`, we set the static input and output dimensions of the model. All model compilers require static shapes, and neuron makes no exception. Note that the
`sequence_length` not only constrains the length of the input context, but also the length of the KV cache, and thus, the output length.
Depending on your choice of parameters and inferentia host, this may take from a few minutes to more than an hour.
Fortunately, you will need to do this only once because you can save your model and reload it later.
```
>>> model.save_pretrained("a_local_path_for_compiled_neuron_model")
```
Even better, you can push it to the [Hugging Face hub](https://huggingface.co/models).
```
>>> model.push_to_hub(
"a_local_path_for_compiled_neuron_model",
repository_id="aws-neuron/Llama-2-7b-hf-neuron-latency")
```
## Generate Text using Llama 2 on AWS Inferentia2
Once your model has been exported, you can generate text using the transformers library, as it has been described in [detail in this previous post](https://huggingface.co/blog/how-to-generate).
```
>>> from optimum.neuron import NeuronModelForCausalLM
>>> from transformers import AutoTokenizer
>>> model = NeuronModelForCausalLM.from_pretrained('aws-neuron/Llama-2-7b-hf-neuron-latency')
>>> tokenizer = AutoTokenizer.from_pretrained("aws-neuron/Llama-2-7b-hf-neuron-latency")
>>> inputs = tokenizer("What is deep-learning ?", return_tensors="pt")
>>> outputs = model.generate(**inputs,
max_new_tokens=128,
do_sample=True,
temperature=0.9,
top_k=50,
top_p=0.9)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['What is deep-learning ?\nThe term “deep-learning” refers to a type of machine-learning
that aims to model high-level abstractions of the data in the form of a hierarchy of multiple
layers of increasingly complex processing nodes.']
```
*Note: when passing multiple input prompts to a model, the resulting token sequences must be padded to the left with an end-of-stream token.
The tokenizers saved with the exported models are configured accordingly.*
The following generation strategies are supported:
- greedy search,
- multinomial sampling with top-k and top-p (with temperature).
Most logits pre-processing/filters (such as repetition penalty) are supported.
## All-in-one with optimum-neuron pipelines
For those who like to keep it simple, there is an even simpler way to use an LLM model on AWS inferentia 2 using [optimum-neuron pipelines](https://huggingface.co/docs/optimum-neuron/guides/pipelines).
Using them is as simple as:
```
>>> from optimum.neuron import pipeline
>>> p = pipeline('text-generation', 'aws-neuron/Llama-2-7b-hf-neuron-budget')
>>> p("My favorite place on earth is", max_new_tokens=64, do_sample=True, top_k=50)
[{'generated_text': 'My favorite place on earth is the ocean. It is where I feel most
at peace. I love to travel and see new places. I have a'}]
```
## Benchmarks
But how much efficient is text-generation on Inferentia2? Let's figure out!
We have uploaded on the hub pre-compiled versions of the LLama 2 7B and 13B models with different configurations:
| Model type | num cores | batch_size | Hugging Face Hub model |
|----------------------------|-----------|------------|-------------------------------------------|
| Llama2 7B - B (budget) | 2 | 1 |[aws-neuron/Llama-2-7b-hf-neuron-budget](https://huggingface.co/aws-neuron/Llama-2-7b-hf-neuron-budget) |
| Llama2 7B - L (latency) | 24 | 1 |[aws-neuron/Llama-2-7b-hf-neuron-latency](https://huggingface.co/aws-neuron/Llama-2-7b-hf-neuron-latency) |
| Llama2 7B - T (throughput) | 24 | 4 |[aws-neuron/Llama-2-7b-hf-neuron-throughput](https://huggingface.co/aws-neuron/Llama-2-7b-hf-neuron-throughput) |
| Llama2 13B - L (latency) | 24 | 1 |[aws-neuron/Llama-2-13b-hf-neuron-latency](https://huggingface.co/aws-neuron/Llama-2-13b-hf-neuron-latency) |
| Llama2 13B - T (throughput)| 24 | 4 |[aws-neuron/Llama-2-13b-hf-neuron-throughput](https://huggingface.co/aws-neuron/Llama-2-13b-hf-neuron-throughput)|
*Note: all models are compiled with a maximum sequence length of 2048.*
The `llama2 7B` "budget" model is meant to be deployed on `inf2.xlarge` instance that has only one neuron device, and enough `cpu` memory to load the model.
All other models are compiled to use the full extent of cores available on the `inf2.48xlarge` instance.
*Note: please refer to the [inferentia2 product page](https://aws.amazon.com/ec2/instance-types/inf2/) for details on the available instances.*
We created two "latency" oriented configurations for the `llama2 7B` and `llama2 13B` models that can serve only one request at a time, but at full speed.
We also created two "throughput" oriented configurations to serve up to four requests in parallel.
To evaluate the models, we generate tokens up to a total sequence length of 1024, starting from
256 input tokens (i.e. we generate 256, 512 and 768 tokens).
*Note: the "budget" model numbers are reported but not included in the graphs for better readability.*
### Encoding time
The encoding time is the time required to process the input tokens and generate the first output token.
It is a very important metric, as it corresponds to the latency directly perceived by the user when streaming generated tokens.
We test the encoding time for increasing context sizes, 256 input tokens corresponding roughly to a typical Q/A usage,
while 768 is more typical of a Retrieval Augmented Generation (RAG) use-case.
The "budget" model (`Llama2 7B-B`) is deployed on an `inf2.xlarge` instance while other models are deployed on an `inf2.48xlarge` instance.
Encoding time is expressed in **seconds**.
| input tokens | Llama2 7B-L | Llama2 7B-T | Llama2 13B-L | Llama2 13B-T | Llama2 7B-B |
|-----------------|----------------|----------------|-----------------|-----------------|----------------|
| 256 | 0.5 | 0.9 | 0.6 | 1.8 | 0.3 |
| 512 | 0.7 | 1.6 | 1.1 | 3.0 | 0.4 |
| 768 | 1.1 | 3.3 | 1.7 | 5.2 | 0.5 |

We can see that all deployed models exhibit excellent response times, even for long contexts.
### End-to-end Latency
The end-to-end latency corresponds to the total time to reach a sequence length of 1024 tokens.
It therefore includes the encoding and generation time.
The "budget" model (`Llama2 7B-B`) is deployed on an `inf2.xlarge` instance while other models are deployed on an `inf2.48xlarge` instance.
Latency is expressed in **seconds**.
| new tokens | Llama2 7B-L | Llama2 7B-T | Llama2 13B-L | Llama2 13B-T | Llama2 7B-B |
|---------------|----------------|----------------|-----------------|-----------------|----------------|
| 256 | 2.3 | 2.7 | 3.5 | 4.1 | 15.9 |
| 512 | 4.4 | 5.3 | 6.9 | 7.8 | 31.7 |
| 768 | 6.2 | 7.7 | 10.2 | 11.1 | 47.3 |

All models deployed on the high-end instance exhibit a good latency, even those actually configured to optimize throughput.
The "budget" deployed model latency is significantly higher, but still ok.
### Throughput
We adopt the same convention as other benchmarks to evaluate the throughput, by dividing the end-to-end
latency by the sum of both input and output tokens.
In other words, we divide the end-to-end latency by `batch_size * sequence_length` to obtain the number of generated tokens per second.
The "budget" model (`Llama2 7B-B`) is deployed on an `inf2.xlarge` instance while other models are deployed on an `inf2.48xlarge` instance.
Throughput is expressed in **tokens/second**.
| new tokens | Llama2 7B-L | Llama2 7B-T | Llama2 13B-L | Llama2 13B-T | Llama2 7B-B |
|---------------|----------------|----------------|-----------------|-----------------|----------------|
| 256 | 227 | 750 | 145 | 504 | 32 |
| 512 | 177 | 579 | 111 | 394 | 24 |
| 768 | 164 | 529 | 101 | 370 | 22 |

Again, the models deployed on the high-end instance have a very good throughput, even those optimized for latency.
The "budget" model has a much lower throughput, but still ok for a streaming use-case, considering that an average reader reads around 5 words per-second.
## Conclusion
We have illustrated how easy it is to deploy `llama2` models from the [Hugging Face hub](https://huggingface.co/models) on
[AWS Inferentia2](https://aws.amazon.com/ec2/instance-types/inf2/) using 🤗 [optimum-neuron](https://huggingface.co/docs/optimum-neuron/index).
The deployed models demonstrate very good performance in terms of encoding time, latency and throughput.
Interestingly, the deployed models latency is not too sensitive to the batch size, which opens the way for their deployment on inference endpoints
serving multiple requests in parallel.
There is still plenty of room for improvement though:
- in the current implementation, the only way to augment the throughput is to increase the batch size, but it is currently limited by the device memory.
Alternative options such as pipelining are currently integrated,
- the static sequence length limits the model ability to encode long contexts. It would be interesting to see if attention sinks might be a valid option to address this.
| 5 |
0 | hf_public_repos | hf_public_repos/blog/README.md | # The Hugging Face Blog Repository 🤗
This is the official repository of the [Hugging Face Blog](https://hf.co/blog).
**If you are an external contributor**: If your blog post is not a collaboration post with Hugging Face, please consider creating a [community blog](https://huggingface.co/blog-explorers) instead. Community blog posts appear on our blogs main page just like the blogs in this repository.
## How to write an article? 📝
1️⃣ Create a branch `YourName/Title`
2️⃣ Create a md (markdown) file, **use a short file name**.
For instance, if your title is "Introduction to Deep Reinforcement Learning", the md file name could be `intro-rl.md`. This is important because the **file name will be the blogpost's URL**.
3️⃣ Create a new folder in `assets`. Use the same name as the name of the md file. Optionally you may add a numerical prefix to that folder, using the number that hasn't been used yet. But this is no longer required. i.e. the asset folder in this example could be `123_intro-rl` or `intro-rl`. This folder will contain **your thumbnail only**. The folder number is mostly for (rough) ordering purposes, so it's no big deal if two concurrent articles use the same number.
For the rest of your files, create a mirrored folder in the HuggingFace Documentation Images [repo](https://huggingface.co/datasets/huggingface/documentation-images/tree/main/blog). This is to reduce bloat in the GitHub base repo when cloning and pulling.
🖼️: In terms of images, **try to have small files** to avoid having a slow loading user experience:
- Use compressed images, you can use this website: https://tinypng.com or https://www.iloveimg.com/compress-image
4️⃣ Copy and paste this to your md file and change the elements
- title
- thumbnail
- authors
```
---
title: "PUT YOUR TITLE HERE"
thumbnail: /blog/assets/101_decision-transformers-train/thumbnail.gif
authors:
- user: your_hf_user
- user: your_coauthor
---
# Train your first Decision Transformer
Your content here [...]
```
When published, the Hub will insert the following UI elements right after the blogpost's main header (i.e. the line that starts with a single `#`, aka. the `<h1>`):
- "Published on [date]"
- "Update on GitHub" button
- avatars of the authors that were listed in authors.
5️⃣ Then, you can add your content. It's markdown system so if you wrote your text on notion just control shift v to copy/paste as markdown.
6️⃣ Modify `_blog.yml` to add your blogpost.
7️⃣ When your article is ready, **open a pull request**.
8️⃣ The article will be **published automatically when you merge your pull request**.
## How to get a nice responsive thumbnail?
1️⃣ Create a `1300x650` image
2️⃣ Use [this template](https://github.com/huggingface/blog/blob/main/assets/thumbnail-template.svg) and fill the content part.
➡️ Or select a background you like and follow the instructions in [this Figma template](https://www.figma.com/file/sXrf9VtkkbWI7kCIesMkDY/HF-Blog-Template?node-id=351%3A39).
## Using LaTeX
Just add:
```
\\(your_latex_here\\)
```
For instance:
``` \\( Q(S_t, A_t) \\) ``` ➡️ $Q(S_t, A_t)$
## How to Caption Figures
A couple of alternatives:
- Use a Markdown table [like here](https://github.com/huggingface/blog/blob/fd611f6200391b865641c4e7a38e1ea47ec4bc6c/pref-tuning.md?plain=1#L31).
- Use HTML, [like here](https://github.com/huggingface/blog/blob/0e6ebe5ca43d6408c20eddab1f5f3205d680f3aa/watermarking.md?plain=1#L70-L73).
## How to Use Highlight Boxes
See an example [in this PR](https://github.com/huggingface/blog/pull/1180). This is to be used sparingly :)
## Embed a Space
Just add:
```
<script
type="module"
src="https://gradio.s3-us-west-2.amazonaws.com/4.36.1/gradio.js"
></script>
<gradio-app theme_mode="light" space="derek-thomas/hugging-face-blog-assistant"></gradio-app>
```
Code: [leaderboard-bigcodebench.md](leaderboard-bigcodebench.md)
[Blog Example](https://huggingface.co/blog/leaderboard-bigcodebench#how-well-do-llms-perform-on-bigcodebench-%F0%9F%93%8A)
## More Assistance
You can use the [community blog editor](https://huggingface.co/new-blog) which has a preview function.
Or check out this [assistant space](https://huggingface.co/spaces/derek-thomas/hugging-face-blog-assistant)
to help with rendering some tricky markdown blocks like:
- Inserting caption figures with image tables
- Tip formatting
- Inserting clickable references
Do give it a like if it was helpful.
| 6 |
0 | hf_public_repos | hf_public_repos/blog/rearchitecting-uploads-and-downloads.md | ---
title: "Rearchitecting Hugging Face Uploads and Downloads"
thumbnail: /blog/assets/rearchitecting-uploads-and-downloads/thumbnail.png
authors:
- user: port8080
- user: jsulz
- user: erinys
---
# Rearchitecting Hugging Face Uploads and Downloads
As part of Hugging Face's Xet team’s work to [improve Hugging Face Hub’s storage backend](https://huggingface.co/blog/xethub-joins-hf), we analyzed a 24 hour window of Hugging Face upload requests to better understand access patterns. On October 11th, 2024, we saw:
- Uploads from 88 countries
- 8.2 million upload requests
- 130.8 TB of data transferred
The map below visualizes this activity, with countries colored by bytes uploaded per hour.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/animated-uploads-choropleth.gif" alt="Animated view of uploads" width=100%>
</p>
Currently, uploads are stored in an [S3 bucket](https://aws.amazon.com/s3/) in **`us-east-1`** and optimized using [S3 Transfer Acceleration](https://aws.amazon.com/s3/transfer-acceleration/). Downloads are cached and served using [AWS Cloudfront](https://aws.amazon.com/cloudfront/) as a CDN. Cloudfront’s [400+ convenient edge locations](https://aws.amazon.com/blogs/networking-and-content-delivery/400-amazon-cloudfront-points-of-presence/) provide global coverage and low-latency data transfers. However, like most CDNs, it is optimized for web content and has a file size limit of 50GB.
While this size restriction is reasonable for typical internet file transfers, the ever-growing size of files in model and dataset repositories presents a challenge. For instance, the weights of [meta-llama/Meta-Llama-3-70B](https://huggingface.co/meta-llama/Meta-Llama-3-70B) total 131GB and are split across 30 files to meet the Hub’s recommendation of chunking weights into [20 GB segments](https://huggingface.co/docs/hub/en/repositories-recommendations#recommendations). Additionally, to enable advanced deduplication or compression techniques for both uploads and downloads requires a reimagining of how we handle file transfers.
## A Custom Protocol for Uploads and Downloads
To push Hugging Face infrastructure beyond its current limits, we are redesigning the Hub’s upload and download architecture. We plan to insert a [content-addressed store (CAS)](https://en.wikipedia.org/wiki/Content-addressable_storage) as the first stop for content distribution. This enables us to implement a custom protocol built on a guiding philosophy of **_dumb reads and smart writes_**. Unlike Git LFS, which treats files as opaque blobs, our approach analyzes files at the byte level, uncovering opportunities to improve transfer speeds for the massive files found in model and dataset repositories.
The read path prioritizes simplicity and speed to ensure high throughput with minimal latency. Requests for a file are routed to a CAS server, which provides reconstruction information. The data itself remains backed by an S3 bucket in **`us-east-1`**, with AWS CloudFront continuing to serve as the CDN for downloads.
The write path is more complex to optimize upload speeds and provide additional security guarantees. Like reads, upload requests are routed to a CAS server, but instead of querying at the file level [we operate on chunks](https://huggingface.co/blog/from-files-to-chunks). As matches are found, the CAS server instructs the client (e.g., [huggingface_hub](https://github.com/huggingface/huggingface_hub)) to transfer only the necessary (new) chunks. The chunks are validated by CAS before uploading them to S3.
There are many implementation details to address such as network constraints and storage overhead which we’ll cover in future posts. For now, let's look at how reads currently look. The first diagram below show the read and write path as they currently look today:
<figure class="image text-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/old-read-write-path.png" alt="Old read and write sequence diagram" width=100%>
<figcaption> Reads are represented on the left; writes are to the right. Note that writes go directly to S3 without any intermediary.</figcaption>
</figure>
Meanwhile, in the new design, reads will take the following path:
<figure class="image text-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/new-reads.png" alt="New read path in proposed architecture">
<figcaption>New read path with a content addressed store (CAS) providing reconstruction information. Cloudfront continues to act as a CDN.</figcaption>
</figure>
and finally here is the updated write path:
<figure class="image text-center" width=90%>
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/new-writes.png" alt="New read path in proposed architecture" >
<figcaption>New write path with CAS speeding up and validating uploads. S3 continues to provide backing storage.</figcaption>
</figure>
By managing files at the byte level, we can adapt optimizations to suit different file formats. For instance, we have explored [improving the dedupeability of Parquet files](https://huggingface.co/blog/improve_parquet_dedupe), and are now investigating compressing tensor files (e.g., [Safetensors](https://github.com/huggingface/safetensors)) which have the potential to trim 10-25% off upload speeds. As new formats emerge, we are uniquely positioned to develop further enhancements that improve the development experience on the Hub.
This protocol also introduces significant improvements for enterprise customers and power users. Inserting a control plane for file transfers provides added guarantees to ensure malicious or invalid data cannot be uploaded. Operationally, uploads are no longer a black box. Enhanced telemetry provides audit trails and detailed logging, enabling the Hub infrastructure team to identify and resolve issues quickly and efficiently.
## Designing for Global Access
To support this custom protocol, we need to determine the optimal geographic distribution for the CAS service. [AWS Lambda@Edge](https://aws.amazon.com/lambda/edge/) was initially considered for its extensive global coverage to help minimize the round-trip time. However, its reliance on Cloudfront triggers made it incompatible with our updated upload path. Instead, we opted to deploy CAS nodes in a select few of AWS’s 34 regions.
Taking a closer look at our 24-hour window of S3 PUT requests, we identified global traffic patterns that reveal the distribution of data uploads to the Hub. As expected, the majority of activity comes from North America and Europe, with continuous, high-volume uploads throughout the day. The data also highlights a strong and growing presence in Asia. By focusing on these core regions, we can place our CAS [points of presence](https://docs.aws.amazon.com/whitepapers/latest/aws-fault-isolation-boundaries/points-of-presence.html) to balance storage and network resources while minimizing latency.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/pareto-chart.png" alt="Pareto chart of uploads" width=100%>
</p>
While AWS offers 34 regions, our goal is to keep infrastructure costs reasonable while maintaining a high user experience. Out of the 88 countries represented in this snapshot, the Pareto chart above shows that the top 7 countries account for 80% of uploaded bytes, while the top 20 countries contribute 95% of the total upload volume and requests.
The United States emerges as the primary source of upload traffic, necessitating a PoP in this region. In Europe, most activity is concentrated in central and western countries (e.g., Luxembourg, the United Kingdom, and Germany) though there is some additional activity to account for in Africa (specifically Algeria, Egypt, and South Africa). Asia’s upload traffic is primarily driven by Singapore, Hong Kong, Japan, and South Korea.
If we use a simple heuristic to distribute traffic, we can divide our CAS coverage into three major regions:
- **`us-east-1`**: Serving North and South America
- **`eu-west-3`**: Serving Europe, the Middle East, and Africa
- **`ap-southeast-1`**: Serving Asia and Oceania
This ends up being quite effective. The US and Europe account for 78.4% of uploaded bytes, while Asia accounts for 21.6%.
<p align="center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/rearchitecting-uploads-and-downloads/aws-regions.png" alt="New AWS mapping" width=100%>
</p>
This regional breakdown results in a well-balanced load across our three CAS PoPs, with additional capacity for growth in **`ap-southeast-1`** and flexibility to scale up in **`us-east-1`** and **`eu-west-3`** as needed.
Based on expected traffic, we plan to allocate resources as follows:
- **`us-east-1`**: 4 nodes
- **`eu-west-3`**: 4 nodes
- **`ap-southeast-1`**: 2 nodes
## Validating and Vetting
Even though we’re increasing the first hop distance for some users, the overall impact to bandwidth across the Hub will be limited. Our estimates predict that while the cumulative bandwidth for all uploads will decrease from 48.5 Mbps to 42.5 Mbps (a 12% reduction), the performance hit will be more than offset by other system optimizations.
We are currently working toward moving our infrastructure into production by the end of 2024, where we will start with a single CAS in **`us-east-1`**. From there, we’ll start duplicating internal repositories to our new storage system to benchmark transfer performance, and then replicate our CAS to the additional PoPs mentioned above for more benchmarking. Based on those results, we will continue to optimize our approach to ensure that everything works smoothly when our storage backend is fully in place next year.
## Beyond the Bytes
As we continue this analysis, new opportunities for deeper insights are emerging. Hugging Face hosts one of the largest collections of data from the open-source machine learning community, providing a unique vantage point to explore the modalities and trends driving AI development around the world.
For example, future analyses could classify models uploaded to the Hub by use case (such as NLP, computer vision, robotics, or large language models) and examine geographic trends in ML activity. This data not only informs our infrastructure decisions but also provides a lens into the evolving landscape of machine learning.
We invite you to explore our current findings in more detail! Visit [our interactive Space](https://huggingface.co/spaces/xet-team/cas-analysis) to see the upload distribution for your region, and [follow our team](https://huggingface.co/xet-team) to hear more about what we’re building.
| 7 |
0 | hf_public_repos | hf_public_repos/blog/deep-rl-dqn.md | ---
title: "Deep Q-Learning with Space Invaders"
thumbnail: /blog/assets/78_deep_rl_dqn/thumbnail.gif
authors:
- user: ThomasSimonini
---
# Deep Q-Learning with Space Invaders
<h2>Unit 3, of the <a href="https://github.com/huggingface/deep-rl-class">Deep Reinforcement Learning Class with Hugging Face 🤗</a></h2>
⚠️ A **new updated version of this article is available here** 👉 [https://huggingface.co/deep-rl-course/unit1/introduction](https://huggingface.co/deep-rl-course/unit3/introduction)
*This article is part of the Deep Reinforcement Learning Class. A free course from beginner to expert. Check the syllabus [here.](https://huggingface.co/deep-rl-course/unit0/introduction)*
<img src="assets/78_deep_rl_dqn/thumbnail.gif" alt="Thumbnail"/>
---
⚠️ A **new updated version of this article is available here** 👉 [https://huggingface.co/deep-rl-course/unit1/introduction](https://huggingface.co/deep-rl-course/unit3/introduction)
*This article is part of the Deep Reinforcement Learning Class. A free course from beginner to expert. Check the syllabus [here.](https://huggingface.co/deep-rl-course/unit0/introduction)*
[In the last unit](https://huggingface.co/blog/deep-rl-q-part2), we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕.
We got excellent results with this simple algorithm. But these environments were relatively simple because the **State Space was discrete and small** (14 different states for FrozenLake-v1 and 500 for Taxi-v3).
But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.**
So today, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state.
And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos.
<figure class="image table text-center m-0 w-full">
<img src="assets/78_deep_rl_dqn/atari-envs.gif" alt="Environments"/>
</figure>
So let’s get started! 🚀
To be able to understand this unit, **you need to understand [Q-Learning](https://huggingface.co/blog/deep-rl-q-part2) first.**
- [From Q-Learning to Deep Q-Learning](#from-q-learning-to-deep-q-learning)
- [The Deep Q Network](#the-deep-q-network-dqn)
- [Preprocessing the input and temporal limitation](#preprocessing-the-input-and-temporal-limitation)
- [The Deep Q-Learning Algorithm](#the-deep-q-learning-algorithm)
- [Experience Replay to make more efficient use of experiences](#experience-replay-to-make-more-efficient-use-of-experiences)
- [Fixed Q-Target to stabilize the training](#fixed-q-target-to-stabilize-the-training)
- [Double DQN](#double-dqn)
## From Q-Learning to Deep Q-Learning
We learned that **Q-Learning is an algorithm we use to train our Q-Function**, an **action-value function** that determines the value of being at a particular state and taking a specific action at that state.
<figure class="image table text-center m-0 w-full"> <img src="assets/73_deep_rl_q_part2/Q-function.jpg" alt="Q-function"/> <figcaption>Given a state and action, our Q Function outputs a state-action value (also called Q-value)</figcaption> </figure>
The **Q comes from "the Quality" of that action at that state.**
Internally, our Q-function has **a Q-table, a table where each cell corresponds to a state-action pair value.** Think of this Q-table as **the memory or cheat sheet of our Q-function.**
The problem is that Q-Learning is a *tabular method*. Aka, a problem in which the state and actions spaces **are small enough to approximate value functions to be represented as arrays and tables**. And this is **not scalable**.
Q-Learning was working well with small state space environments like:
- FrozenLake, we had 14 states.
- Taxi-v3, we had 500 states.
But think of what we're going to do today: we will train an agent to learn to play Space Invaders using the frames as input.
As **[Nikita Melkozerov mentioned](https://twitter.com/meln1k), Atari environments** have an observation space with a shape of (210, 160, 3), containing values ranging from 0 to 255 so that gives us 256^(210x160x3) = 256^100800 (for comparison, we have approximately 10^80 atoms in the observable universe).
<img src="assets/78_deep_rl_dqn/atari.jpg" alt="Atari State Space"/>
Therefore, the state space is gigantic; hence creating and updating a Q-table for that environment would not be efficient. In this case, the best idea is to approximate the Q-values instead of a Q-table using a parametrized Q-function \\(Q_{\theta}(s,a)\\) .
This neural network will approximate, given a state, the different Q-values for each possible action at that state. And that's exactly what Deep Q-Learning does.
<img src="assets/63_deep_rl_intro/deep.jpg" alt="Deep Q Learning"/>
Now that we understand Deep Q-Learning, let's dive deeper into the Deep Q-Network.
## The Deep Q-Network (DQN)
This is the architecture of our Deep Q-Learning network:
<img src="assets/78_deep_rl_dqn/deep-q-network.jpg" alt="Deep Q Network"/>
As input, we take a **stack of 4 frames** passed through the network as a state and output a **vector of Q-values for each possible action at that state**. Then, like with Q-Learning, we just need to use our epsilon-greedy policy to select which action to take.
When the Neural Network is initialized, **the Q-value estimation is terrible**. But during training, our Deep Q-Network agent will associate a situation with appropriate action and **learn to play the game well**.
### Preprocessing the input and temporal limitation
We mentioned that we **preprocess the input**. It’s an essential step since we want to reduce the complexity of our state to reduce the computation time needed for training.
So what we do is **reduce the state space to 84x84 and grayscale it** (since the colors in Atari environments don't add important information).
This is an essential saving since we **reduce our three color channels (RGB) to 1**.
We can also **crop a part of the screen in some games** if it does not contain important information.
Then we stack four frames together.
<img src="assets/78_deep_rl_dqn/preprocessing.jpg" alt="Preprocessing"/>
Why do we stack four frames together?
We stack frames together because it helps us **handle the problem of temporal limitation**. Let’s take an example with the game of Pong. When you see this frame:
<img src="assets/78_deep_rl_dqn/temporal-limitation.jpg" alt="Temporal Limitation"/>
Can you tell me where the ball is going?
No, because one frame is not enough to have a sense of motion! But what if I add three more frames? **Here you can see that the ball is going to the right**.
<img src="assets/78_deep_rl_dqn/temporal-limitation-2.jpg" alt="Temporal Limitation"/>
That’s why, to capture temporal information, we stack four frames together.
Then, the stacked frames are processed by three convolutional layers. These layers **allow us to capture and exploit spatial relationships in images**. But also, because frames are stacked together, **you can exploit some spatial properties across those frames**.
Finally, we have a couple of fully connected layers that output a Q-value for each possible action at that state.
<img src="assets/78_deep_rl_dqn/deep-q-network.jpg" alt="Deep Q Network"/>
So, we see that Deep Q-Learning is using a neural network to approximate, given a state, the different Q-values for each possible action at that state. Let’s now study the Deep Q-Learning algorithm.
## The Deep Q-Learning Algorithm
We learned that Deep Q-Learning **uses a deep neural network to approximate the different Q-values for each possible action at a state** (value-function estimation).
The difference is that, during the training phase, instead of updating the Q-value of a state-action pair directly as we have done with Q-Learning:
<img src="https://huggingface.co/blog/assets/73_deep_rl_q_part2/q-ex-5.jpg" alt="Q Loss"/>
In Deep Q-Learning, we create a **Loss function between our Q-value prediction and the Q-target and use Gradient Descent to update the weights of our Deep Q-Network to approximate our Q-values better**.
<img src="assets/78_deep_rl_dqn/Q-target.jpg" alt="Q-target"/>
The Deep Q-Learning training algorithm has *two phases*:
- **Sampling**: we perform actions and **store the observed experiences tuples in a replay memory**.
- **Training**: Select the **small batch of tuple randomly and learn from it using a gradient descent update step**.
<img src="assets/78_deep_rl_dqn/sampling-training.jpg" alt="Sampling Training"/>
But, this is not the only change compared with Q-Learning. Deep Q-Learning training **might suffer from instability**, mainly because of combining a non-linear Q-value function (Neural Network) and bootstrapping (when we update targets with existing estimates and not an actual complete return).
To help us stabilize the training, we implement three different solutions:
1. *Experience Replay*, to make more **efficient use of experiences**.
2. *Fixed Q-Target* **to stabilize the training**.
3. *Double Deep Q-Learning*, to **handle the problem of the overestimation of Q-values**.
<!--- We'll see these three solutions in the pseudocode. --->
### Experience Replay to make more efficient use of experiences
Why do we create a replay memory?
Experience Replay in Deep Q-Learning has two functions:
1. **Make more efficient use of the experiences during the training**.
- Experience replay helps us **make more efficient use of the experiences during the training.** Usually, in online reinforcement learning, we interact in the environment, get experiences (state, action, reward, and next state), learn from them (update the neural network) and discard them.
- But with experience replay, we create a replay buffer that saves experience samples **that we can reuse during the training.**
<img src="assets/78_deep_rl_dqn/experience-replay.jpg" alt="Experience Replay"/>
⇒ This allows us to **learn from individual experiences multiple times**.
2. **Avoid forgetting previous experiences and reduce the correlation between experiences**.
- The problem we get if we give sequential samples of experiences to our neural network is that it tends to forget **the previous experiences as it overwrites new experiences.** For instance, if we are in the first level and then the second, which is different, our agent can forget how to behave and play in the first level.
The solution is to create a Replay Buffer that stores experience tuples while interacting with the environment and then sample a small batch of tuples. This prevents **the network from only learning about what it has immediately done.**
Experience replay also has other benefits. By randomly sampling the experiences, we remove correlation in the observation sequences and avoid **action values from oscillating or diverging catastrophically.**
In the Deep Q-Learning pseudocode, we see that we **initialize a replay memory buffer D from capacity N** (N is an hyperparameter that you can define). We then store experiences in the memory and sample a minibatch of experiences to feed the Deep Q-Network during the training phase.
<img src="assets/78_deep_rl_dqn/experience-replay-pseudocode.jpg" alt="Experience Replay Pseudocode"/>
### Fixed Q-Target to stabilize the training
When we want to calculate the TD error (aka the loss), we calculate the **difference between the TD target (Q-Target) and the current Q-value (estimation of Q)**.
But we **don’t have any idea of the real TD target**. We need to estimate it. Using the Bellman equation, we saw that the TD target is just the reward of taking that action at that state plus the discounted highest Q value for the next state.
<img src="assets/78_deep_rl_dqn/Q-target.jpg" alt="Q-target"/>
However, the problem is that we are using the same parameters (weights) for estimating the TD target **and** the Q value. Consequently, there is a significant correlation between the TD target and the parameters we are changing.
Therefore, it means that at every step of training, **our Q values shift but also the target value shifts.** So, we’re getting closer to our target, but the target is also moving. It’s like chasing a moving target! This led to a significant oscillation in training.
It’s like if you were a cowboy (the Q estimation) and you want to catch the cow (the Q-target), you must get closer (reduce the error).
<img src="assets/78_deep_rl_dqn/qtarget-1.jpg" alt="Q-target"/>
At each time step, you’re trying to approach the cow, which also moves at each time step (because you use the same parameters).
<img src="assets/78_deep_rl_dqn/qtarget-2.jpg" alt="Q-target"/>
<img src="assets/78_deep_rl_dqn/qtarget-3.jpg" alt="Q-target"/>
This leads to a bizarre path of chasing (a significant oscillation in training).
<img src="assets/78_deep_rl_dqn/qtarget-4.jpg" alt="Q-target"/>
Instead, what we see in the pseudo-code is that we:
- Use a **separate network with a fixed parameter** for estimating the TD Target
- **Copy the parameters from our Deep Q-Network at every C step** to update the target network.
<img src="assets/78_deep_rl_dqn/fixed-q-target-pseudocode.jpg" alt="Fixed Q-target Pseudocode"/>
### Double DQN
Double DQNs, or Double Learning, were introduced [by Hado van Hasselt](https://papers.nips.cc/paper/3964-double-q-learning). This method **handles the problem of the overestimation of Q-values.**
To understand this problem, remember how we calculate the TD Target:
We face a simple problem by calculating the TD target: how are we sure that **the best action for the next state is the action with the highest Q-value?**
We know that the accuracy of Q values depends on what action we tried **and** what neighboring states we explored.
Consequently, we don’t have enough information about the best action to take at the beginning of the training. Therefore, taking the maximum Q value (which is noisy) as the best action to take can lead to false positives. If non-optimal actions are regularly **given a higher Q value than the optimal best action, the learning will be complicated.**
The solution is: when we compute the Q target, we use two networks to decouple the action selection from the target Q value generation. We:
<!---<img src="assets/78_deep_rl_dqn/double-dqn-pseudocode.jpg" alt="Double DQN Pseudocode"/>--->
- Use our **DQN network** to select the best action to take for the next state (the action with the highest Q value).
- Use our **Target network** to calculate the target Q value of taking that action at the next state.
Therefore, Double DQN helps us reduce the overestimation of q values and, as a consequence, helps us train faster and have more stable learning.
Since these three improvements in Deep Q-Learning, many have been added such as Prioritized Experience Replay, Dueling Deep Q-Learning. They’re out of the scope of this course but if you’re interested, check the links we put in the reading list. 👉 **[https://github.com/huggingface/deep-rl-class/blob/main/unit3/README.md](https://github.com/huggingface/deep-rl-class/blob/main/unit3/README.md)**
Now that you've studied the theory behind Deep Q-Learning, **you’re ready to train your Deep Q-Learning agent to play Atari Games**. We'll start with Space Invaders, but you'll be able to use any Atari game you want 🔥
We're using the RL-Baselines-3 Zoo integration, a vanilla version of Deep Q-Learning with no extensions such as Double-DQN, Dueling-DQN, and Prioritized Experience Replay.
Start the tutorial here 👉 https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/unit3/unit3.ipynb
The leaderboard to compare your results with your classmates 🏆 👉 https://huggingface.co/spaces/chrisjay/Deep-Reinforcement-Learning-Leaderboard
<figure class="image table text-center m-0 w-full">
<img src="assets/78_deep_rl_dqn/atari-envs.gif" alt="Environments"/>
</figure>
---
Congrats on finishing this chapter! There was a lot of information. And congrats on finishing the tutorial. You’ve just trained your first Deep Q-Learning agent and shared it on the Hub 🥳.
That’s **normal if you still feel confused** with all these elements. **This was the same for me and for all people who studied RL.**
Take time to really grasp the material before continuing.
Don't hesitate to train your agent in other environments (Pong, Seaquest, QBert, Ms Pac Man). The **best way to learn is to try things on your own!**
We published additional readings in the syllabus if you want to go deeper 👉 **[https://github.com/huggingface/deep-rl-class/blob/main/unit3/README.md](https://github.com/huggingface/deep-rl-class/blob/main/unit3/README.md)**
In the next unit, we’re going to learn about Policy Gradients methods.
And don't forget to share with your friends who want to learn 🤗 !
Finally, we want **to improve and update the course iteratively with your feedback**. If you have some, please fill this form 👉 **[https://forms.gle/3HgA7bEHwAmmLfwh9](https://forms.gle/3HgA7bEHwAmmLfwh9)**
### **Keep learning, stay awesome,**
| 8 |
0 | hf_public_repos | hf_public_repos/blog/carbon-emissions-on-the-hub.md | ---
title: "CO2 Emissions and the 🤗 Hub: Leading the Charge"
thumbnail: /blog/assets/60_carbon_emissions_on_the_hub/thumbnail.jpg
authors:
- user: sasha
- user: muellerzr
- user: nateraw
---
# CO2 Emissions and the 🤗 Hub: Leading the Charge
## What are CO2 Emissions and why are they important?
Climate change is one of the greatest challenges that we are facing and reducing emissions of greenhouse gases such as carbon dioxide (CO2) is an important part of tackling this problem.
Training and deploying machine learning models will emit CO2 due to the energy usage of the computing infrastructures that are used: from GPUs to storage, it all needs energy to function and emits CO2 in the process.

> Pictured: Recent Transformer models and their carbon footprints
The amount of CO2 emitted depends on different factors such as runtime, hardware used, and carbon intensity of the energy source.
Using the tools described below will help you both track and report your own emissions (which is important to improve the transparency of our field as a whole!) and choose models based on their carbon footprint.
## How to calculate your own CO2 Emissions automatically with Transformers
Before we begin, if you do not have the latest version of the `huggingface_hub` library on your system, please run the following:
```
pip install huggingface_hub -U
```
## How to find low-emission models using the Hugging Face Hub
With the model now uploaded to the Hub, how can you search for models on the Hub while trying to be eco-friendly? Well, the `huggingface_hub` library has a new special parameter to perform this search: `emissions_threshold`. All you need to do is specify a minimum or maximum number of grams, and all models that fall within that range.
For example, we can search for all models that took a maximum of 100 grams to make:
```python
from huggingface_hub import HfApi
api = HfApi()
models = api.list_models(emissions_thresholds=(None, 100), cardData=True)
len(models)
>>> 191
```
There were quite a few! This also helps to find smaller models, given they typically did not release as much carbon during training.
We can look at one up close to see it does fit our threshold:
```python
model = models[0]
print(f'Model Name: {model.modelId}\nCO2 Emitted during training: {model.cardData["co2_eq_emissions"]}')
>>> Model Name: esiebomajeremiah/autonlp-email-classification-657119381
CO2 Emitted during training: 3.516233232503715
```
Similarly, we can search for a minimum value to find very large models that emitted a lot of CO2 during training:
```python
models = api.list_models(emissions_thresholds=(500, None), cardData=True)
len(models)
>>> 10
```
Now let's see exactly how much CO2 one of these emitted:
```python
model = models[0]
print(f'Model Name: {model.modelId}\nCO2 Emitted during training: {model.cardData["co2_eq_emissions"]}')
>>> Model Name: Maltehb/aelaectra-danish-electra-small-cased
CO2 Emitted during training: 4009.5
```
That's a lot of CO2!
As you can see, in just a few lines of code we can quickly vet models we may want to use to make sure we're being environmentally cognizant!
## How to Report Your Carbon Emissions with `transformers`
If you're using `transformers`, you can automatically track and report carbon emissions thanks to the `codecarbon` integration. If you've installed `codecarbon` on your machine, the `Trainer` object will automatically add the `CodeCarbonCallback` while training, which will store carbon emissions data for you as you train.
So, if you run something like this...
```python
from datasets import load_dataset
from transformers import AutoModelForSequenceClassification, AutoTokenizer, Trainer, TrainingArguments
ds = load_dataset("imdb")
model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=2)
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True)
small_train_dataset = ds["train"].shuffle(seed=42).select(range(1000)).map(tokenize_function, batched=True)
small_eval_dataset = ds["test"].shuffle(seed=42).select(range(1000)).map(tokenize_function, batched=True)
training_args = TrainingArguments(
"codecarbon-text-classification",
num_train_epochs=4,
push_to_hub=True
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=small_train_dataset,
eval_dataset=small_eval_dataset,
)
trainer.train()
```
...you'll be left with a file within the `codecarbon-text-classification` directory called `emissions.csv`. This file will keep track of the carbon emissions across different training runs. Then, when you're ready, you can take the emissions from the run you used to train your final model and include that in its model card. 📝
An example of this data being included at the top of the model card is shown below:

For more references on the metadata format for `co2_eq_emissions ` see [the hub docs](https://huggingface.co/docs/hub/model-cards-co2).
### Further readings
- Rolnick et al. (2019) - [Tackling Climate Change with Machine Learning](https://arxiv.org/pdf/1906.05433.pdf)
- Strubell et al. (2019) - [Energy and Policy Considerations for Deep Learning in NLP](https://arxiv.org/pdf/1906.02243.pdf)
- Schwartz et al. (2020) - [Green AI](https://dl.acm.org/doi/abs/10.1145/3381831)
| 9 |
0 | hf_public_repos/candle/candle-book/src | hf_public_repos/candle/candle-book/src/guide/installation.md | # Installation
**With Cuda support**:
1. First, make sure that Cuda is correctly installed.
- `nvcc --version` should print information about your Cuda compiler driver.
- `nvidia-smi --query-gpu=compute_cap --format=csv` should print your GPUs compute capability, e.g. something
like:
```bash
compute_cap
8.9
```
You can also compile the Cuda kernels for a specific compute cap using the
`CUDA_COMPUTE_CAP=<compute cap>` environment variable.
If any of the above commands errors out, please make sure to update your Cuda version.
2. Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) with Cuda support.
Start by creating a new cargo:
```bash
cargo new myapp
cd myapp
```
Make sure to add the `candle-core` crate with the cuda feature:
```bash
cargo add --git https://github.com/huggingface/candle.git candle-core --features "cuda"
```
Run `cargo build` to make sure everything can be correctly built.
```bash
cargo build
```
**Without Cuda support**:
Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as follows:
```bash
cargo new myapp
cd myapp
cargo add --git https://github.com/huggingface/candle.git candle-core
```
Finally, run `cargo build` to make sure everything can be correctly built.
```bash
cargo build
```
**With mkl support**
You can also see the `mkl` feature which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)
| 0 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-flash-attn/build.rs | // Build script to run nvcc and generate the C glue code for launching the flash-attention kernel.
// The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment
// variable in order to cache the compiled artifacts and avoid recompiling too often.
use anyhow::{Context, Result};
use std::path::PathBuf;
const KERNEL_FILES: [&str; 33] = [
"kernels/flash_api.cu",
"kernels/flash_fwd_hdim128_fp16_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_sm80.cu",
"kernels/flash_fwd_hdim128_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_fp16_causal_sm80.cu",
"kernels/flash_fwd_hdim128_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim160_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim192_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim224_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim256_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim32_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim64_bf16_causal_sm80.cu",
"kernels/flash_fwd_hdim96_bf16_causal_sm80.cu",
];
fn main() -> Result<()> {
println!("cargo:rerun-if-changed=build.rs");
for kernel_file in KERNEL_FILES.iter() {
println!("cargo:rerun-if-changed={kernel_file}");
}
println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h");
println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h");
println!("cargo:rerun-if-changed=kernels/flash.h");
println!("cargo:rerun-if-changed=kernels/philox.cuh");
println!("cargo:rerun-if-changed=kernels/softmax.h");
println!("cargo:rerun-if-changed=kernels/utils.h");
println!("cargo:rerun-if-changed=kernels/kernel_traits.h");
println!("cargo:rerun-if-changed=kernels/block_info.h");
println!("cargo:rerun-if-changed=kernels/static_switch.h");
let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?);
let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") {
Err(_) =>
{
#[allow(clippy::redundant_clone)]
out_dir.clone()
}
Ok(build_dir) => {
let path = PathBuf::from(build_dir);
path.canonicalize().expect(&format!(
"Directory doesn't exists: {} (the current directory is {})",
&path.display(),
std::env::current_dir()?.display()
))
}
};
let kernels = KERNEL_FILES.iter().collect();
let builder = bindgen_cuda::Builder::default()
.kernel_paths(kernels)
.out_dir(build_dir.clone())
.arg("-std=c++17")
.arg("-O3")
.arg("-U__CUDA_NO_HALF_OPERATORS__")
.arg("-U__CUDA_NO_HALF_CONVERSIONS__")
.arg("-U__CUDA_NO_HALF2_OPERATORS__")
.arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__")
.arg("-Icutlass/include")
.arg("--expt-relaxed-constexpr")
.arg("--expt-extended-lambda")
.arg("--use_fast_math")
.arg("--verbose");
let out_file = build_dir.join("libflashattention.a");
builder.build_lib(out_file);
println!("cargo:rustc-link-search={}", build_dir.display());
println!("cargo:rustc-link-lib=flashattention");
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=stdc++");
Ok(())
}
| 1 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-flash-attn/Cargo.toml | [package]
name = "candle-flash-attn"
version = "0.8.0"
edition = "2021"
description = "Flash attention layer for the candle ML framework."
repository = "https://github.com/huggingface/candle"
keywords = ["blas", "tensor", "machine-learning"]
categories = ["science"]
license = "MIT OR Apache-2.0"
readme = "README.md"
[dependencies]
candle = { path = "../candle-core", features = ["cuda"], package = "candle-core", version = "0.8.0" }
half = { version = "2.3.1", features = ["num-traits"] }
[build-dependencies]
bindgen_cuda = "0.1.1"
anyhow = { version = "1", features = ["backtrace"] }
[dev-dependencies]
anyhow = { version = "1", features = ["backtrace"] }
candle-nn = { path = "../candle-nn", features = ["cuda"] }
| 2 |
0 | hf_public_repos/candle | hf_public_repos/candle/candle-flash-attn/README.md | # candle-flash-attn
| 3 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/src/lib.rs | mod ffi;
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::DevicePtr;
use candle::cuda_backend::WrapErr;
use candle::{CpuStorage, DType, Layout, Result, Shape, Tensor};
use half::{bf16, f16};
pub struct FlashAttn {
pub softmax_scale: f32,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
}
fn round_multiple(x: usize, m: usize) -> usize {
(x + m - 1) / m * m
}
impl FlashAttn {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/b252072409e69c25f2b9d473cc534e49b24decd2/csrc/flash_attn/flash_api.cpp#L187
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let q = q.as_cuda_slice::<T>()?;
let k = k.as_cuda_slice::<T>()?;
let v = v.as_cuda_slice::<T>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 4 || k_rank != 4 || v_rank != 4 {
candle::bail!(
"flash-attn expects input tensors of rank 4 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (b_sz, seqlen_q, num_heads, head_size_og) = q_l.shape().dims4()?;
let (_b_sz, seqlen_k, num_heads_k, _head_size_og) = k_l.shape().dims4()?;
let expected_kv = (b_sz, seqlen_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
*alibi_slopes.device_ptr() as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(seqlen_q, 128);
let seqlen_k_rounded = round_multiple(seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<T>(elem_count) }.w()?;
let softmax_lse = dev
.alloc_zeros::<f32>(b_sz * 128 * num_heads * seqlen_q)
.w()?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = seqlen_k as i32;
}
unsafe {
let q_ptr = *q.device_ptr() as *const core::ffi::c_void;
let k_ptr = *k.device_ptr() as *const core::ffi::c_void;
let v_ptr = *v.device_ptr() as *const core::ffi::c_void;
let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void;
let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void;
ffi::run_mha(
q_ptr,
k_ptr,
v_ptr,
dst_ptr,
softmax_lse_ptr,
/* alibi_slopes_ptr */ alibi_slopes_ptr,
/* cu_seqlens_q_ptr */ std::ptr::null(),
/* cu_seqlens_k_ptr */ std::ptr::null(),
/* q_batch_stride */ q_stride[0] as u32,
/* k_batch_stride */ k_stride[0] as u32,
/* v_batch_stride */ v_stride[0] as u32,
/* o_batch_stride */ o_stride[0] as u32,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ b_sz as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ seqlen_q as u32,
/* seqlen_k */ seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttn {
fn name(&self) -> &'static str {
"flash-attn"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"),
}
}
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v2 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
struct FlashAttnVarLen {
pub softmax_scale: f32,
pub max_seqlen_q: usize,
pub max_seqlen_k: usize,
pub seqlens_q: Tensor,
pub seqlens_k: Tensor,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
}
impl FlashAttnVarLen {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/184b992dcb2a0890adaa19eb9b541c3e4f9d2a08/csrc/flash_attn/flash_api.cpp#L327
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let (seqlens_q, seqlens_q_layout) = self.seqlens_q.storage_and_layout();
let seqlens_q = match &*seqlens_q {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_q must be a cuda tensor"),
};
let seqlens_q = match seqlens_q_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_q.slice(o1..o2),
None => candle::bail!("seqlens_q has to be contiguous"),
};
let (seqlens_k, seqlens_k_layout) = self.seqlens_k.storage_and_layout();
let seqlens_k = match &*seqlens_k {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_k must be a cuda tensor"),
};
let seqlens_k = match seqlens_k_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_k.slice(o1..o2),
None => candle::bail!("seqlens_k has to be contiguous"),
};
let q = q.as_cuda_slice::<f16>()?;
let k = k.as_cuda_slice::<f16>()?;
let v = v.as_cuda_slice::<f16>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 3 || k_rank != 3 || v_rank != 3 {
candle::bail!(
"flash-attn-varlen expects input tensors of rank 3 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (_total_q, num_heads, head_size_og) = q_l.shape().dims3()?;
let (total_k, num_heads_k, _head_size_og) = k_l.shape().dims3()?;
let expected_kv = (total_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let nseqlens_q = seqlens_q_layout.shape().dims1()?;
if nseqlens_q < 2 {
candle::bail!("seqlens_q should have a len >= 2 {nseqlens_q}")
}
let nseqlens_k = seqlens_k_layout.shape().dims1()?;
if nseqlens_k != nseqlens_q {
candle::bail!("seqlens_q and seqlens_k should have the same number of elements {nseqlens_q} <> {nseqlens_k}")
}
let batch_size = nseqlens_q - 1;
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
*alibi_slopes.device_ptr() as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(self.max_seqlen_q, 128);
let seqlen_k_rounded = round_multiple(self.max_seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<f16>(elem_count) }.w()?;
let softmax_lse = dev
.alloc_zeros::<f32>(batch_size * num_heads * self.max_seqlen_q)
.w()?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = self.max_seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = self.max_seqlen_k as i32;
}
unsafe {
let q_ptr = *q.device_ptr() as *const core::ffi::c_void;
let k_ptr = *k.device_ptr() as *const core::ffi::c_void;
let v_ptr = *v.device_ptr() as *const core::ffi::c_void;
let dst_ptr = *dst.device_ptr() as *const core::ffi::c_void;
let softmax_lse_ptr = *softmax_lse.device_ptr() as *const core::ffi::c_void;
let seqlens_q_ptr = *seqlens_q.device_ptr() as *const core::ffi::c_int;
let seqlens_k_ptr = *seqlens_k.device_ptr() as *const core::ffi::c_int;
ffi::run_mha(
q_ptr,
k_ptr,
v_ptr,
dst_ptr,
softmax_lse_ptr,
/* alibi_slopes_ptr */ alibi_slopes_ptr,
/* cu_seqlens_q_ptr */ seqlens_q_ptr,
/* cu_seqlens_k_ptr */ seqlens_k_ptr,
/* q_batch_stride */ 0,
/* k_batch_stride */ 0,
/* v_batch_stride */ 0,
/* o_batch_stride */ 0,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ batch_size as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ self.max_seqlen_q as u32,
/* seqlen_k */ self.max_seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttnVarLen {
fn name(&self) -> &'static str {
"flash-attn-varlen"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn is only supported for f16/bf16 ({dt:?})"),
}
}
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
pub fn flash_attn_varlen(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
pub fn flash_attn_varlen_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
pub fn flash_attn_varlen_alibi(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
causal: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v2 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
pub fn flash_attn_varlen_alibi_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
) -> Result<Tensor> {
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
};
q.apply_op3(k, v, op)
}
| 4 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/src/ffi.rs | use core::ffi::{c_int, c_void};
extern "C" {
pub(crate) fn run_mha(
q_ptr: *const c_void,
k_ptr: *const c_void,
v_ptr: *const c_void,
o_ptr: *const c_void,
softmax_lse_ptr: *const c_void,
alibi_slopes_ptr: *const c_void,
cu_seqlens_q_ptr: *const i32,
cu_seqlens_k_ptr: *const i32,
q_batch_stride: u32,
k_batch_stride: u32,
v_batch_stride: u32,
o_batch_stride: u32,
alibi_slopes_batch_stride: u32,
q_row_stride: u32,
k_row_stride: u32,
v_row_stride: u32,
o_row_stride: u32,
q_head_stride: u32,
k_head_stride: u32,
v_head_stride: u32,
o_head_stride: u32,
b: u32,
h: u32,
h_k: u32,
d: u32,
d_rounded: u32,
softmax_scale: f32,
seqlen_q: u32,
seqlen_k: u32,
seqlen_q_rounded: u32,
seqlen_k_rounded: u32,
is_bf16: c_int,
is_causal: c_int,
window_size_left: c_int,
window_size_right: c_int,
);
}
| 5 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim192_bf16_causal_sm80.cu | // Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template<>
void run_mha_fwd_<cutlass::bfloat16_t, 192, true>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim192<cutlass::bfloat16_t, true>(params, stream);
}
| 6 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim192_bf16_sm80.cu | // Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template<>
void run_mha_fwd_<cutlass::bfloat16_t, 192, false>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim192<cutlass::bfloat16_t, false>(params, stream);
}
| 7 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim192_fp16_causal_sm80.cu | // Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template<>
void run_mha_fwd_<cutlass::half_t, 192, true>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim192<cutlass::half_t, true>(params, stream);
}
| 8 |
0 | hf_public_repos/candle/candle-flash-attn | hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_causal_sm80.cu | // Copyright (c) 2023, Tri Dao.
// Splitting the different head dimensions to different files to speed up compilation.
// This file is auto-generated. See "generate_kernels.py"
#include "flash_fwd_launch_template.h"
template<>
void run_mha_fwd_<cutlass::half_t, 224, true>(Flash_fwd_params ¶ms, cudaStream_t stream) {
run_mha_fwd_hdim224<cutlass::half_t, true>(params, stream);
}
| 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.