text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
def remove_prefix(text, prefix): return text[text.startswith(prefix) and len(prefix):] nps = {} for k, v in model.state_dict().items(): k = remove_prefix(k, 'module_list.') nps[k] = v.detach().numpy() np.savez('yolo-v3.ot', **nps)
candle/candle-examples/examples/yolo-v3/extract-weights.py/0
{ "file_path": "candle/candle-examples/examples/yolo-v3/extract-weights.py", "repo_id": "candle", "token_count": 98 }
23
use std::io::prelude::*; pub trait Sample { fn to_i16(&self) -> i16; } impl Sample for f32 { fn to_i16(&self) -> i16 { (self.clamp(-1.0, 1.0) * 32767.0) as i16 } } impl Sample for f64 { fn to_i16(&self) -> i16 { (self.clamp(-1.0, 1.0) * 32767.0) as i16 } } impl Sample for i16 { fn to_i16(&self) -> i16 { *self } } pub fn write_pcm_as_wav<W: Write, S: Sample>( w: &mut W, samples: &[S], sample_rate: u32, ) -> std::io::Result<()> { let len = 12u32; // header let len = len + 24u32; // fmt let len = len + samples.len() as u32 * 2 + 8; // data let n_channels = 1u16; let bytes_per_second = sample_rate * 2 * n_channels as u32; w.write_all(b"RIFF")?; w.write_all(&(len - 8).to_le_bytes())?; // total length minus 8 bytes w.write_all(b"WAVE")?; // Format block w.write_all(b"fmt ")?; w.write_all(&16u32.to_le_bytes())?; // block len minus 8 bytes w.write_all(&1u16.to_le_bytes())?; // PCM w.write_all(&n_channels.to_le_bytes())?; // one channel w.write_all(&sample_rate.to_le_bytes())?; w.write_all(&bytes_per_second.to_le_bytes())?; w.write_all(&2u16.to_le_bytes())?; // 2 bytes of data per sample w.write_all(&16u16.to_le_bytes())?; // bits per sample // Data block w.write_all(b"data")?; w.write_all(&(samples.len() as u32 * 2).to_le_bytes())?; for sample in samples.iter() { w.write_all(&sample.to_i16().to_le_bytes())? } Ok(()) }
candle/candle-examples/src/wav.rs/0
{ "file_path": "candle/candle-examples/src/wav.rs", "repo_id": "candle", "token_count": 729 }
24
use core::ffi::{c_int, c_void}; extern "C" { pub(crate) fn run_mha( q_ptr: *const c_void, k_ptr: *const c_void, v_ptr: *const c_void, o_ptr: *const c_void, softmax_lse_ptr: *const c_void, alibi_slopes_ptr: *const c_void, cu_seqlens_q_ptr: *const i32, cu_seqlens_k_ptr: *const i32, q_batch_stride: u32, k_batch_stride: u32, v_batch_stride: u32, o_batch_stride: u32, alibi_slopes_batch_stride: u32, q_row_stride: u32, k_row_stride: u32, v_row_stride: u32, o_row_stride: u32, q_head_stride: u32, k_head_stride: u32, v_head_stride: u32, o_head_stride: u32, b: u32, h: u32, h_k: u32, d: u32, d_rounded: u32, softmax_scale: f32, seqlen_q: u32, seqlen_k: u32, seqlen_q_rounded: u32, seqlen_k_rounded: u32, is_bf16: c_int, is_causal: c_int, window_size_left: c_int, window_size_right: c_int, ); }
candle/candle-flash-attn/src/ffi.rs/0
{ "file_path": "candle/candle-flash-attn/src/ffi.rs", "repo_id": "candle", "token_count": 670 }
25
// Kernels adapted from llama.cpp ggml-cuda.cu // https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda.cu #include "cuda_fp16.h" #include "cuda_bf16.h" #include<stdint.h> #ifdef GGML_QKK_64 #define QK_K 64 #define K_SCALE_SIZE 4 #else #define QK_K 256 #define K_SCALE_SIZE 12 #endif #undef GGML_CUDA_F16 #define GGML_CUDA_DMMV_X 32 #define CUDA_QUANTIZE_BLOCK_SIZE 256 #define CUDA_DEQUANTIZE_BLOCK_SIZE 256 #define K_QUANTS_PER_ITERATION 2 typedef uint16_t ggml_fp16_t; typedef float dfloat; // dequantize float typedef float2 dfloat2; typedef void (*dequantize_kernel_t)(const void * vx, const int ib, const int iqs, dfloat2 & v); static __device__ __forceinline__ int get_int_from_int8(const int8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_uint8(const uint8_t * x8, const int & i32) { const uint16_t * x16 = (const uint16_t *) (x8 + sizeof(int) * i32); // assume at least 2 byte alignment int x32 = 0; x32 |= x16[0] << 0; x32 |= x16[1] << 16; return x32; } static __device__ __forceinline__ int get_int_from_int8_aligned(const int8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } static __device__ __forceinline__ int get_int_from_uint8_aligned(const uint8_t * x8, const int & i32) { return *((const int *) (x8 + sizeof(int) * i32)); // assume at least 4 byte alignment } #define CUDA_USE_TENSOR_CORES #define WARP_SIZE 32 #define CUDART_HMAX 11070 // CUDA 11.7, min. ver. for which __hmax and __hmax2 are known to work (may be higher than needed) #define CC_PASCAL 600 #define MIN_CC_DP4A 610 // minimum compute capability for __dp4a, an intrinsic for byte-wise dot products #define CC_VOLTA 700 #define CC_OFFSET_AMD 1000000 #define CC_RDNA1 (CC_OFFSET_AMD + 1010) #define CC_RDNA2 (CC_OFFSET_AMD + 1030) #define CC_RDNA3 (CC_OFFSET_AMD + 1100) #define MMQ_X_Q4_0_RDNA2 64 #define MMQ_Y_Q4_0_RDNA2 128 #define NWARPS_Q4_0_RDNA2 8 #define MMQ_X_Q4_0_RDNA1 64 #define MMQ_Y_Q4_0_RDNA1 64 #define NWARPS_Q4_0_RDNA1 8 #if defined(CUDA_USE_TENSOR_CORES) #define MMQ_X_Q4_0_AMPERE 4 #define MMQ_Y_Q4_0_AMPERE 32 #define NWARPS_Q4_0_AMPERE 4 #else #define MMQ_X_Q4_0_AMPERE 64 #define MMQ_Y_Q4_0_AMPERE 128 #define NWARPS_Q4_0_AMPERE 4 #endif #define MMQ_X_Q4_0_PASCAL 64 #define MMQ_Y_Q4_0_PASCAL 64 #define NWARPS_Q4_0_PASCAL 8 // QK = number of values after dequantization // QR = QK / number of values before dequantization // QI = number of 32 bit integers before dequantization #define QK4_0 32 #define QR4_0 2 #define QI4_0 (QK4_0 / (4 * QR4_0)) typedef struct { half d; // delta uint8_t qs[QK4_0 / 2]; // nibbles / quants } block_q4_0; static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0 block size/padding"); #define QK4_1 32 #define QR4_1 2 #define QI4_1 (QK4_1 / (4 * QR4_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qs[QK4_1 / 2]; // nibbles / quants } block_q4_1; static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong q4_1 block size/padding"); #define QK5_0 32 #define QR5_0 2 #define QI5_0 (QK5_0 / (4 * QR5_0)) typedef struct { half d; // delta uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_0 / 2]; // nibbles / quants } block_q5_0; static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding"); #define QK5_1 32 #define QR5_1 2 #define QI5_1 (QK5_1 / (4 * QR5_1)) typedef struct { half2 dm; // dm.x = delta, dm.y = min uint8_t qh[4]; // 5-th bit of quants uint8_t qs[QK5_1 / 2]; // nibbles / quants } block_q5_1; static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5_1 / 2, "wrong q5_1 block size/padding"); #define QK8_0 32 #define QR8_0 1 #define QI8_0 (QK8_0 / (4 * QR8_0)) typedef struct { half d; // delta int8_t qs[QK8_0]; // quants } block_q8_0; static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding"); #define QK8_1 32 #define QR8_1 1 #define QI8_1 (QK8_1 / (4 * QR8_1)) typedef struct { half2 ds; // ds.x = delta, ds.y = sum int8_t qs[QK8_0]; // quants } block_q8_1; static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding"); typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & iqs); typedef void (*allocate_tiles_cuda_t)(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc); typedef void (*load_tiles_cuda_t)( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row); typedef float (*vec_dot_q_mul_mat_cuda_t)( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ms, const int & i, const int & j, const int & k); #define QR2_K 4 #define QI2_K (QK_K / (4*QR2_K)) typedef struct { uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits uint8_t qs[QK_K/4]; // quants half2 dm; // super-block scale for quantized scales/mins } block_q2_K; static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "wrong q2_K block size/padding"); #define QR3_K 4 #define QI3_K (QK_K / (4*QR3_K)) typedef struct { uint8_t hmask[QK_K/8]; // quants - high bit uint8_t qs[QK_K/4]; // quants - low 2 bits #ifdef GGML_QKK_64 uint8_t scales[2]; // scales, quantized with 8 bits #else uint8_t scales[K_SCALE_SIZE]; // scales, quantized with 6 bits #endif half d; // super-block scale } block_q3_K; //static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 + K_SCALE_SIZE, "wrong q3_K block size/padding"); #define QR4_K 2 #define QI4_K (QK_K / (4*QR4_K)) #ifdef GGML_QKK_64 typedef struct { half dm[2]; // super-block scales/mins uint8_t scales[2]; // 4-bit block scales/mins uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == sizeof(half2) + QK_K/2 + 2, "wrong q4_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[3*QK_K/64]; // scales, quantized with 6 bits uint8_t qs[QK_K/2]; // 4--bit quants } block_q4_K; static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + 3*QK_K/64 + QK_K/2, "wrong q4_K block size/padding"); #endif #define QR5_K 2 #define QI5_K (QK_K / (4*QR5_K)) #ifdef GGML_QKK_64 typedef struct { half d; // super-block scale int8_t scales[QK_K/16]; // block scales uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == sizeof(ggml_fp16_t) + QK_K/2 + QK_K/8 + QK_K/16, "wrong q5_K block size/padding"); #else typedef struct { half2 dm; // super-block scale for quantized scales/mins uint8_t scales[K_SCALE_SIZE]; // scales and mins, quantized with 6 bits uint8_t qh[QK_K/8]; // quants, high bit uint8_t qs[QK_K/2]; // quants, low 4 bits } block_q5_K; static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/2 + QK_K/8, "wrong q5_K block size/padding"); #endif #define QR6_K 2 #define QI6_K (QK_K / (4*QR6_K)) typedef struct { uint8_t ql[QK_K/2]; // quants, lower 4 bits uint8_t qh[QK_K/4]; // quants, upper 2 bits int8_t scales[QK_K/16]; // scales half d; // delta } block_q6_K; static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_K block size/padding"); // In llama.cpp this is only used for intermediate quantization and dot products typedef struct { float d; // delta int8_t qs[QK_K]; // quants int16_t bsums[QK_K/16]; // sum of quants in groups of 16 } block_q8_K; static_assert(sizeof(block_q8_K) == sizeof(float) + QK_K + QK_K/16*sizeof(int16_t), "wrong q8_K block size/padding"); // VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called // MMVQ = mul_mat_vec_q, MMQ = mul_mat_q #define VDR_Q4_0_Q8_1_MMVQ 2 #define VDR_Q4_0_Q8_1_MMQ 4 template <int vdr> static __device__ __forceinline__ float vec_dot_q4_0_q8_1_impl( const int * v, const int * u, const float & d4, const half2 & ds8) { int sumi = 0; #pragma unroll for (int i = 0; i < vdr; ++i) { const int vi0 = (v[i] >> 0) & 0x0F0F0F0F; const int vi1 = (v[i] >> 4) & 0x0F0F0F0F; // SIMD dot product of quantized values sumi = __dp4a(vi0, u[2*i+0], sumi); sumi = __dp4a(vi1, u[2*i+1], sumi); } const float2 ds8f = __half22float2(ds8); // second part effectively subtracts 8 from each quant value const float res = d4 * (sumi * ds8f.x - (8*vdr/QI4_0) * ds8f.y); printf("%f %f %f %f %f %f\n", res, d4, sumi, ds8f.x, vdr/QI4_0, ds8f.y); return res; } static __device__ __forceinline__ float vec_dot_q4_0_q8_1_mul_mat( const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc, const int * __restrict__ y_qs, const half2 * __restrict__ y_ds, const int & i, const int & j, const int & k) { (void)x_qh; (void)x_sc; const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2)); const float * x_dmf = (const float *) x_dm; int u[2*VDR_Q4_0_Q8_1_MMQ]; #pragma unroll for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) { u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE]; u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE]; } return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ> (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]); } template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x, int mmq_y, int nwarps, allocate_tiles_cuda_t allocate_tiles, load_tiles_cuda_t load_tiles, int vdr, vec_dot_q_mul_mat_cuda_t vec_dot> static __device__ __forceinline__ void mul_mat_q( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const block_q_t * x = (const block_q_t *) vx; const block_q8_1 * y = (const block_q8_1 *) vy; const int blocks_per_row_x = ncols_x / qk; const int blocks_per_col_y = nrows_y / QK8_1; const int blocks_per_warp = WARP_SIZE / qi; const int & ncols_dst = ncols_y; const int row_dst_0 = blockIdx.x*mmq_y; const int & row_x_0 = row_dst_0; const int col_dst_0 = blockIdx.y*mmq_x; const int & col_y_0 = col_dst_0; int * tile_x_ql = nullptr; half2 * tile_x_dm = nullptr; int * tile_x_qh = nullptr; int * tile_x_sc = nullptr; allocate_tiles(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc); __shared__ int tile_y_qs[mmq_x * WARP_SIZE]; __shared__ half2 tile_y_ds[mmq_x * WARP_SIZE/QI8_1]; float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}}; for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) { load_tiles(x + row_x_0*blocks_per_row_x + ib0, tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, threadIdx.y, nrows_x-row_x_0-1, threadIdx.x, blocks_per_row_x); #pragma unroll for (int ir = 0; ir < qr; ++ir) { const int kqs = ir*WARP_SIZE + threadIdx.x; const int kbxd = kqs / QI8_1; #pragma unroll for (int i = 0; i < mmq_x; i += nwarps) { const int col_y_eff = min(col_y_0 + threadIdx.y + i, ncols_y-1); // to prevent out-of-bounds memory accesses const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd]; const int index_y = (threadIdx.y + i) * WARP_SIZE + kqs % WARP_SIZE; tile_y_qs[index_y] = get_int_from_int8_aligned(by0->qs, threadIdx.x % QI8_1); } #pragma unroll for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) { const int ids = (ids0 + threadIdx.y * QI8_1 + threadIdx.x / (WARP_SIZE/QI8_1)) % mmq_x; const int kby = threadIdx.x % (WARP_SIZE/QI8_1); const int col_y_eff = min(col_y_0 + ids, ncols_y-1); // if the sum is not needed it's faster to transform the scale to f32 ahead of time const half2 * dsi_src = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + ir*(WARP_SIZE/QI8_1) + kby].ds; half2 * dsi_dst = &tile_y_ds[ids * (WARP_SIZE/QI8_1) + kby]; if (need_sum) { *dsi_dst = *dsi_src; } else { float * dfi_dst = (float *) dsi_dst; *dfi_dst = __low2half(*dsi_src); } } __syncthreads(); // #pragma unroll // unrolling this loop causes too much register pressure for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) { #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { sum[i/WARP_SIZE][j/nwarps] += vec_dot( tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc, tile_y_qs, tile_y_ds, threadIdx.x + i, threadIdx.y + j, k); } } } __syncthreads(); } } #pragma unroll for (int j = 0; j < mmq_x; j += nwarps) { const int col_dst = col_dst_0 + j + threadIdx.y; if (col_dst >= ncols_dst) { return; } #pragma unroll for (int i = 0; i < mmq_y; i += WARP_SIZE) { const int row_dst = row_dst_0 + threadIdx.x + i; if (row_dst >= nrows_dst) { continue; } dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps]; } } } template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_q4_0( const void * __restrict__ vx, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh, int * __restrict__ x_sc, const int & i_offset, const int & i_max, const int & k, const int & blocks_per_row) { (void)x_qh; (void)x_sc; const int kbx = k / QI4_0; const int kqsx = k % QI4_0; const block_q4_0 * bx0 = (const block_q4_0 *) vx; float * x_dmf = (float *) x_dm; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + i_offset; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx; x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx); // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d; } const int blocks_per_tile_x_row = WARP_SIZE / QI4_0; const int kbxd = k % blocks_per_tile_x_row; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) { int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row; if (need_check) { i = min(i, i_max); } const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd; x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d; } } template <int mmq_y> static __device__ __forceinline__ void allocate_tiles_q4_0(int ** x_ql, half2 ** x_dm, int ** x_qh, int ** x_sc) { (void)x_qh; (void)x_sc; __shared__ int tile_x_qs[mmq_y * (WARP_SIZE) + mmq_y]; __shared__ float tile_x_d[mmq_y * (WARP_SIZE/QI4_0) + mmq_y/QI4_0]; *x_ql = tile_x_qs; *x_dm = (half2 *) tile_x_d; } extern "C" __global__ void mul_mat_q4_0_check( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_0_AMPERE; const int mmq_y = MMQ_Y_Q4_0_AMPERE; const int nwarps = NWARPS_Q4_0_AMPERE; mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, load_tiles_q4_0<mmq_y, nwarps, true>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } extern "C" __global__ void mul_mat_q4_0_no_check( const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst, const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst) { const int mmq_x = MMQ_X_Q4_0_AMPERE; const int mmq_y = MMQ_Y_Q4_0_AMPERE; const int nwarps = NWARPS_Q4_0_AMPERE; mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps, allocate_tiles_q4_0<mmq_y>, load_tiles_q4_0<mmq_y, nwarps, false>, VDR_Q4_0_Q8_1_MMQ, vec_dot_q4_0_q8_1_mul_mat> (vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst); } static __device__ __forceinline__ void dequantize_q4_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_0 * x = (const block_q4_0 *) vx; const dfloat d = x[ib].d; const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hsub2(v, {8.0f, 8.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 8.0f) * d; v.y = (v.y - 8.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q4_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q4_1 * x = (const block_q4_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); const int vui = x[ib].qs[iqs]; v.x = vui & 0xF; v.y = vui >> 4; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_0 * x = (const block_q5_0 *) vx; const dfloat d = x[ib].d; uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hsub2(v, {16.0f, 16.0f}); v = __hmul2(v, {d, d}); #else v.x = (v.x - 16.0f) * d; v.y = (v.y - 16.0f) * d; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q5_1(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q5_1 * x = (const block_q5_1 *) vx; const dfloat d = __low2half(x[ib].dm); const dfloat m = __high2half(x[ib].dm); uint32_t qh; memcpy(&qh, x[ib].qh, sizeof(qh)); const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10; const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10; v.x = ((x[ib].qs[iqs] & 0xf) | xh_0); v.y = ((x[ib].qs[iqs] >> 4) | xh_1); #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); v = __hadd2(v, {m, m}); #else v.x = (v.x * d) + m; v.y = (v.y * d) + m; #endif // GGML_CUDA_F16 } static __device__ __forceinline__ void dequantize_q8_0(const void * vx, const int ib, const int iqs, dfloat2 & v){ const block_q8_0 * x = (const block_q8_0 *) vx; const dfloat d = x[ib].d; v.x = x[ib].qs[iqs + 0]; v.y = x[ib].qs[iqs + 1]; #ifdef GGML_CUDA_F16 v = __hmul2(v, {d, d}); #else v.x *= d; v.y *= d; #endif // GGML_CUDA_F16 } template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> static __device__ void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int k) { const int i = 2*(blockDim.x*blockIdx.x + threadIdx.x); if (i >= k) { return; } const int ib = i/qk; // block index const int iqs = (i%qk)/qr; // quant index const int iybs = i - i%qk; // y block start index const int y_offset = qr == 1 ? 1 : qk/2; // dequantize dfloat2 v; dequantize_kernel(vx, ib, iqs, v); y[iybs + iqs + 0] = v.x; y[iybs + iqs + y_offset] = v.y; } extern "C" __global__ void dequantize_block_q4_0(const void * __restrict__ vx, float * __restrict__ yy, int nb32) { const int i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int ib = 8*i + ir; if (ib >= nb32) { return; } float * y = yy + 256*i + 32*ir + 4*il; const block_q4_0 * x = (const block_q4_0 *)vx + ib; const float d = __half2float(x->d); const float dm = -8*d; const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d * (q[l] & 0xF) + dm; y[l+16] = d * (q[l] >> 4) + dm; } } extern "C" __global__ void dequantize_block_q4_1(const void * __restrict__ vx, float * __restrict__ yy, int nb32) { const int i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int ib = 8*i + ir; if (ib >= nb32) { return; } float * y = yy + 256*i + 32*ir + 4*il; const block_q4_1 * x = (const block_q4_1 *)vx + ib; const float2 d = __half22float2(x->dm); const uint8_t * q = x->qs + 4*il; for (int l = 0; l < 4; ++l) { y[l+ 0] = d.x * (q[l] & 0xF) + d.y; y[l+16] = d.x * (q[l] >> 4) + d.y; } } //================================== k-quants extern "C" __global__ void dequantize_block_q2_K(const void * __restrict__ vx, float * __restrict__ yy) { const int i = blockIdx.x; const block_q2_K * x = (const block_q2_K *) vx; const int tid = threadIdx.x; #if QK_K == 256 const int n = tid/32; const int l = tid - 32*n; const int is = 8*n + l/16; const uint8_t q = x[i].qs[32*n + l]; float * y = yy + i*QK_K + 128*n; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4); y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4); y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4); #else const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const uint8_t q = x[i].qs[il] >> (2*is); float * y = yy + i*QK_K + 16*is + il; float dall = __low2half(x[i].dm); float dmin = __high2half(x[i].dm); y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4); y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4); #endif } extern "C" __global__ void dequantize_block_q3_K(const void * __restrict__ vx, float * __restrict__ yy) { const int i = blockIdx.x; const block_q3_K * x = (const block_q3_K *) vx; #if QK_K == 256 const int r = threadIdx.x/4; const int tid = r/2; const int is0 = r%2; const int l0 = 16*is0 + 4*(threadIdx.x%4); const int n = tid / 4; const int j = tid - 4*n; uint8_t m = 1 << (4*n + j); int is = 8*n + 2*j + is0; int shift = 2*j; int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) : is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) : is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) : (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4); float d_all = x[i].d; float dl = d_all * (us - 32); float * y = yy + i*QK_K + 128*n + 32*j; const uint8_t * q = x[i].qs + 32*n; const uint8_t * hm = x[i].hmask; for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4)); #else const int tid = threadIdx.x; const int is = tid/16; // 0 or 1 const int il = tid%16; // 0...15 const int im = il/8; // 0...1 const int in = il%8; // 0...7 float * y = yy + i*QK_K + 16*is + il; const uint8_t q = x[i].qs[il] >> (2*is); const uint8_t h = x[i].hmask[in] >> (2*is + im); const float d = (float)x[i].d; if (is == 0) { y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } else { y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4)); y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4)); } #endif } #if QK_K == 256 static inline __device__ void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) { if (j < 4) { d = q[j] & 63; m = q[j + 4] & 63; } else { d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4); m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4); } } #endif extern "C" __global__ void dequantize_block_q4_K(const void * __restrict__ vx, float * __restrict__ yy) { const block_q4_K * x = (const block_q4_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int is = 2*il; const int n = 4; float * y = yy + i*QK_K + 64*il + n*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * q = x[i].qs + 32*il + n*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; for (int l = 0; l < n; ++l) { y[l + 0] = d1 * (q[l] & 0xF) - m1; y[l +32] = d2 * (q[l] >> 4) - m2; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; float * y = yy + i*QK_K; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4); y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4); #endif } extern "C" __global__ void dequantize_block_q5_K(const void * __restrict__ vx, float * __restrict__ yy) { const block_q5_K * x = (const block_q5_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int tid = threadIdx.x; const int il = tid/16; // il is in 0...3 const int ir = tid%16; // ir is in 0...15 const int is = 2*il; // is is in 0...6 float * y = yy + i*QK_K + 64*il + 2*ir; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint8_t * ql = x[i].qs + 32*il + 2*ir; const uint8_t * qh = x[i].qh + 2*ir; uint8_t sc, m; get_scale_min_k4(is + 0, x[i].scales, sc, m); const float d1 = dall * sc; const float m1 = dmin * m; get_scale_min_k4(is + 1, x[i].scales, sc, m); const float d2 = dall * sc; const float m2 = dmin * m; uint8_t hm = 1 << (2*il); y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1; y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1; hm <<= 1; y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2; y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2; #else const int tid = threadIdx.x; const uint8_t q = x[i].qs[tid]; const int im = tid/8; // 0...3 const int in = tid%8; // 0...7 const int is = tid/16; // 0 or 1 const uint8_t h = x[i].qh[in] >> im; const float d = x[i].d; float * y = yy + i*QK_K + tid; y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16)); y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16)); #endif } extern "C" __global__ void dequantize_block_q6_K(const void * __restrict__ vx, float * __restrict__ yy) { const block_q6_K * x = (const block_q6_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 64 threads - this is very slightly better than the one below const int tid = threadIdx.x; const int ip = tid/32; // ip is 0 or 1 const int il = tid - 32*ip; // 0...32 const int is = 8*ip + il/16; float * y = yy + i*QK_K + 128*ip + il; const float d = x[i].d; const uint8_t * ql = x[i].ql + 64*ip + il; const uint8_t qh = x[i].qh[32*ip + il]; const int8_t * sc = x[i].scales + is; y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32); y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32); y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32); #else // assume 32 threads const int tid = threadIdx.x; const int ip = tid/16; // 0 or 1 const int il = tid - 16*ip; // 0...15 float * y = yy + i*QK_K + 16*ip + il; const float d = x[i].d; const uint8_t ql = x[i].ql[16*ip + il]; const uint8_t qh = x[i].qh[il] >> (2*ip); const int8_t * sc = x[i].scales; y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32); y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32); #endif } extern "C" __global__ void dequantize_block_q8_0(const void * __restrict__ vx, float * __restrict__ yy, int nb32) { const int i = blockIdx.x; // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int ib = 8*i + ir; if (ib >= nb32) { return; } float * y = yy + 256*i + 32*ir + 8*il; const block_q8_0 * x = (const block_q8_0 *)vx + ib; const float d = __half2float(x->d); const int8_t * q = x->qs + 8*il; for (int l = 0; l < 8; ++l) { y[l] = d * q[l]; } } extern "C" __global__ void dequantize_block_q8_K(const void * __restrict__ vx, float * __restrict__ yy) { const block_q8_K * x = (const block_q8_K *) vx; const int i = blockIdx.x; #if QK_K == 256 // assume 32 threads const int tid = threadIdx.x; const int il = tid/8; const int ir = tid%8; const int n = 8; float * y = yy + i*QK_K + 64*il + n*ir; const int8_t * q = x[i].qs + 64*il + n*ir; for (int l = 0; l < n; ++l) { y[l] = q[l] * x[i].d; } #else const int tid = threadIdx.x; const uint8_t * q = x[i].qs; float * y = yy + i*QK_K; y[tid] = x[i].d * x[i].scales[0]; #endif } extern "C" __global__ void dequantize_block_q5_0(const void * __restrict__ vx, float * __restrict__ yy, int nb32) { return dequantize_block<QK5_0, QR5_0, dequantize_q5_0>(vx, yy, nb32); } extern "C" __global__ void dequantize_block_q5_1(const void * __restrict__ vx, float * __restrict__ yy, int nb32) { return dequantize_block<QK5_1, QR5_1, dequantize_q5_1>(vx, yy, nb32); } template <int qk, int qr, dequantize_kernel_t dequantize_kernel> static __device__ void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows) { // qk = quantized weights per x block // qr = number of quantized weights per data value in x block const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row >= nrows) { return; } const int tid = threadIdx.x; const int iter_stride = 2*GGML_CUDA_DMMV_X; const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter const int y_offset = qr == 1 ? 1 : qk/2; // partial sum for each thread #ifdef GGML_CUDA_F16 half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics #else float tmp = 0.0f; #endif // GGML_CUDA_F16 for (int i = 0; i < ncols; i += iter_stride) { const int col = i + vals_per_iter*tid; const int ib = (row*ncols + col)/qk; // x block index const int iqs = (col%qk)/qr; // x quant index const int iybs = col - col%qk; // y block start index // processing >2 values per i iter is faster for fast GPUs #pragma unroll for (int j = 0; j < vals_per_iter; j += 2) { // process 2 vals per j iter // dequantize // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val dfloat2 v; dequantize_kernel(vx, ib, iqs + j/qr, v); // matrix multiplication // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2 #ifdef GGML_CUDA_F16 tmp += __hmul2(v, { y[iybs + iqs + j/qr + 0], y[iybs + iqs + j/qr + y_offset] }); #else tmp += v.x * y[iybs + iqs + j/qr + 0]; tmp += v.y * y[iybs + iqs + j/qr + y_offset]; #endif // GGML_CUDA_F16 } } // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { #ifdef GGML_CUDA_F16 dst[row] = tmp.x + tmp.y; #else dst[row] = tmp; #endif // GGML_CUDA_F16 } } extern "C" __global__ void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) { dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(vx, y, dst, ncols, nrows); } extern "C" __global__ void dequantize_mul_mat_vec_q2_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q2_K * x = (const block_q2_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...15 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int s_offset = 8*im; const int y_offset = 128*im + l0; uint32_t aux[4]; const uint8_t * d = (const uint8_t *)aux; const uint8_t * m = (const uint8_t *)(aux + 2); for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset); aux[0] = a[0] & 0x0f0f0f0f; aux[1] = a[1] & 0x0f0f0f0f; aux[2] = (a[0] >> 4) & 0x0f0f0f0f; aux[3] = (a[1] >> 4) & 0x0f0f0f0f; float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3) + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3) + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3) + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3) + y[l+16] * d[1] * ((q[l+16] >> 0) & 3) + y[l+48] * d[3] * ((q[l+16] >> 2) & 3) + y[l+80] * d[5] * ((q[l+16] >> 4) & 3) +y[l+112] * d[7] * ((q[l+16] >> 6) & 3); sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6] + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7]; } tmp += dall * sum1 - dmin * sum2; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; uint32_t uaux[2]; const uint8_t * d = (const uint8_t *)uaux; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint32_t * s = (const uint32_t *)x[i].scales; uaux[0] = s[0] & 0x0f0f0f0f; uaux[1] = (s[0] >> 4) & 0x0f0f0f0f; const float2 dall = __half22float2(x[i].dm); float sum1 = 0, sum2 = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t ql = q[l]; sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3) + y[l+16] * d[1] * ((ql >> 2) & 3) + y[l+32] * d[2] * ((ql >> 4) & 3) + y[l+48] * d[3] * ((ql >> 6) & 3); sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7]; } tmp += dall.x * sum1 - dall.y * sum2; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q3_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q3_K * x = (const block_q3_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x0303; const uint16_t kmask2 = 0x0f0f; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop const int step = 16/K_QUANTS_PER_ITERATION; const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0....15 or 0...7 const uint8_t m = 1 << (4*im); const int l0 = n*in; // 0...15 or 0...14 in steps of 2 const int q_offset = 32*im + l0; const int y_offset = 128*im + l0; uint16_t utmp[4]; const int8_t * s = (const int8_t *)utmp; const uint16_t s_shift = 4*im; for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * q = x[i].qs + q_offset; const uint8_t * h = x[i].hmask + l0; const uint16_t * a = (const uint16_t *)x[i].scales; utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4); utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4); utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4); utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4); const float d = x[i].d; float sum = 0; for (int l = 0; l < n; ++l) { sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4)) + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4)) + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4)) + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4)); sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4)) + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4)) + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4)) + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4)); } tmp += d * sum; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3 const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14 const int in = offset/8; // 0 or 1 const int im = offset%8; // 0...7 for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + offset; const uint8_t * q = x[i].qs + offset; const uint8_t * s = x[i].scales; const float dall = (float)x[i].d; float sum = 0; for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) { const uint8_t hl = x[i].hmask[im+l] >> in; const uint8_t ql = q[l]; sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4)) + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4)) + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4)) + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q4_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q4_K * x = (const block_q4_K *)vx + ib0; #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0,1 const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4 const int il = tid/step; // 0...3 const int ir = tid - step*il; // 0...7 or 0...3 const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4 const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; #if K_QUANTS_PER_ITERATION == 2 uint32_t q32[4]; const uint8_t * q4 = (const uint8_t *)q32; #else uint16_t q16[4]; const uint8_t * q4 = (const uint8_t *)q16; #endif float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); #if K_QUANTS_PER_ITERATION == 2 const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset); const uint32_t * q2 = q1 + 16; q32[0] = q1[0] & 0x0f0f0f0f; q32[1] = q1[0] & 0xf0f0f0f0; q32[2] = q2[0] & 0x0f0f0f0f; q32[3] = q2[0] & 0xf0f0f0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 4; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+ 4]; s.z += y2[l] * q4[l+8]; s.w += y2[l+32] * q4[l+12]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #else const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset); const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[0] & 0xf0f0; q16[2] = q2[0] & 0x0f0f; q16[3] = q2[0] & 0xf0f0; float4 s = {0.f, 0.f, 0.f, 0.f}; float smin = 0; for (int l = 0; l < 2; ++l) { s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2]; s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6]; smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7]; } tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; uint16_t aux16[2]; const uint8_t * s = (const uint8_t *)aux16; float tmp = 0; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const float * y = yy + i*QK_K + step; const uint16_t * a = (const uint16_t *)x[i].scales; aux16[0] = a[0] & 0x0f0f; aux16[1] = (a[0] >> 4) & 0x0f0f; const float d = (float)x[i].dm[0]; const float m = (float)x[i].dm[1]; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2]) + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2]) + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3]) + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q5_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols) { const int row = blockIdx.x; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q5_K * x = (const block_q5_K *)vx + ib0; float tmp = 0; // partial sum for thread in warp #if QK_K == 256 const uint16_t kmask1 = 0x3f3f; const uint16_t kmask2 = 0x0f0f; const uint16_t kmask3 = 0xc0c0; const int tid = threadIdx.x/2; // 0...15 const int ix = threadIdx.x%2; const int il = tid/4; // 0...3 const int ir = tid - 4*il;// 0...3 const int n = 2; const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224 const int in = il%2; const int l0 = n*(2*ir + in); const int q_offset = 32*im + l0; const int y_offset = 64*im + l0; const uint8_t hm1 = 1 << (2*im); const uint8_t hm2 = hm1 << 4; uint16_t aux[4]; const uint8_t * sc = (const uint8_t *)aux; uint16_t q16[8]; const uint8_t * q4 = (const uint8_t *)q16; for (int i = ix; i < num_blocks_per_row; i += 2) { const uint8_t * ql1 = x[i].qs + q_offset; const uint8_t * qh = x[i].qh + l0; const float * y1 = yy + i*QK_K + y_offset; const float * y2 = y1 + 128; const float dall = __low2half(x[i].dm); const float dmin = __high2half(x[i].dm); const uint16_t * a = (const uint16_t *)x[i].scales; aux[0] = a[im+0] & kmask1; aux[1] = a[im+2] & kmask1; aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2); aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2); float4 sum = {0.f, 0.f, 0.f, 0.f}; float smin = 0; const uint16_t * q1 = (const uint16_t *)ql1; const uint16_t * q2 = q1 + 32; q16[0] = q1[0] & 0x0f0f; q16[1] = q1[8] & 0x0f0f; q16[2] = (q1[0] >> 4) & 0x0f0f; q16[3] = (q1[8] >> 4) & 0x0f0f; q16[4] = q2[0] & 0x0f0f; q16[5] = q2[8] & 0x0f0f; q16[6] = (q2[0] >> 4) & 0x0f0f; q16[7] = (q2[8] >> 4) & 0x0f0f; for (int l = 0; l < n; ++l) { sum.x += y1[l+ 0] * (q4[l +0] + (qh[l+ 0] & (hm1 << 0) ? 16 : 0)) + y1[l+16] * (q4[l +2] + (qh[l+16] & (hm1 << 0) ? 16 : 0)); sum.y += y1[l+32] * (q4[l +4] + (qh[l+ 0] & (hm1 << 1) ? 16 : 0)) + y1[l+48] * (q4[l +6] + (qh[l+16] & (hm1 << 1) ? 16 : 0)); sum.z += y2[l+ 0] * (q4[l +8] + (qh[l+ 0] & (hm2 << 0) ? 16 : 0)) + y2[l+16] * (q4[l+10] + (qh[l+16] & (hm2 << 0) ? 16 : 0)); sum.w += y2[l+32] * (q4[l+12] + (qh[l+ 0] & (hm2 << 1) ? 16 : 0)) + y2[l+48] * (q4[l+14] + (qh[l+16] & (hm2 << 1) ? 16 : 0)); smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3] + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7]; } tmp += dall * (sum.x * sc[0] + sum.y * sc[1] + sum.z * sc[4] + sum.w * sc[5]) - dmin * smin; } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...15 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); const int step = tid * K_QUANTS_PER_ITERATION; const int im = step/8; const int in = step%8; for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const uint8_t * q = x[i].qs + step; const int8_t * s = x[i].scales; const float * y = yy + i*QK_K + step; const float d = x[i].d; float sum = 0.f; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { const uint8_t h = x[i].qh[in+j] >> im; sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16)) + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16)) + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16)) + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16)); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (threadIdx.x == 0) { dst[row] = tmp; } } extern "C" __global__ void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows) { static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION"); const int row = blockIdx.x*blockDim.y + threadIdx.y; if (row > nrows) return; const int num_blocks_per_row = ncols / QK_K; const int ib0 = row*num_blocks_per_row; const block_q6_K * x = (const block_q6_K *)vx + ib0; #if QK_K == 256 const int tid = threadIdx.x/K_QUANTS_PER_ITERATION; // 0...31 or 0...16 const int ix = threadIdx.x%K_QUANTS_PER_ITERATION; // 0 or 0, 1 const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8 const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128... const int in = tid - step*im; // 0...15 or 0...7 #if K_QUANTS_PER_ITERATION == 1 const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 const int is = 0; #else const int l0 = 4 * in; // 0, 4, 8, ..., 28 const int is = in / 4; #endif const int ql_offset = 64*im + l0; const int qh_offset = 32*im + l0; const int s_offset = 8*im + is; const int y_offset = 128*im + l0; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + y_offset; const uint8_t * ql = x[i].ql + ql_offset; const uint8_t * qh = x[i].qh + qh_offset; const int8_t * s = x[i].scales + s_offset; const float d = x[i].d; #if K_QUANTS_PER_ITERATION == 1 float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32) + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32) + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32) + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32) + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32) + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32) + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32) +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32); tmp += sum; #else float sum = 0; for (int l = 0; l < 4; ++l) { sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32) + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32) + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32) + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32); } tmp += sum; #endif } #else const int tid = threadIdx.x/(2*K_QUANTS_PER_ITERATION); // 0...7 const int ix = threadIdx.x%(2*K_QUANTS_PER_ITERATION); // 0...3 const int step = tid * K_QUANTS_PER_ITERATION; float tmp = 0; // partial sum for thread in warp for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) { const float * y = yy + i * QK_K + step; const uint8_t * ql = x[i].ql + step; const uint8_t * qh = x[i].qh + step; const int8_t * s = x[i].scales; const float d = x[i+0].d; float sum = 0; for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) { sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32) + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32) + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32) + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32); } tmp += sum; } #endif // sum up partial sums and write back result #pragma unroll for (int mask = 16; mask > 0; mask >>= 1) { tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32); } if (tid == 0) { dst[row] = tmp; } }
candle/candle-kernels/src/quantized.cu/0
{ "file_path": "candle/candle-kernels/src/quantized.cu", "repo_id": "candle", "token_count": 30648 }
26
#include <metal_stdlib> # using namespace metal; METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } template<typename T, typename ID> METAL_FUNC void where_cond( constant size_t &numel, constant size_t &num_dims, constant size_t *dims, constant size_t *strides, constant size_t *strides_t, constant size_t *strides_f, device const ID *ids, device const T *t, device const T *f, device T *out, uint i [[ thread_position_in_grid ]] ) { if (i >= numel){ return; } uint strided_i = get_strided_index(i, num_dims, dims, strides); uint strided_i_t = get_strided_index(i, num_dims, dims, strides_t); uint strided_i_f = get_strided_index(i, num_dims, dims, strides_f); out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; } #define WHERE_OP(T, ID, FN_NAME) \ kernel void FN_NAME( \ constant size_t &numel, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t *strides_t, \ constant size_t *strides_f, \ device const ID *ids, \ device const T *t, \ device const T *f, \ device T *out, \ uint i [[ thread_position_in_grid ]] \ ) { \ where_cond<T, ID>(numel, num_dims, dims, strides, strides_t, strides_f, ids, t, f, out, i); \ } \ // WHERE_OP(float, int64_t, where_i64_f32) // WHERE_OP(double, int64_t, where_i64_f64) // WHERE_OP(uint8_t, int64_t, where_i64_u8) // WHERE_OP(uint32_t, int64_t, where_i64_u32) // WHERE_OP(int64_t, int64_t, where_i64_i64) // // WHERE_OP(float, uint32_t, where_u32_f32) // WHERE_OP(double, uint32_t, where_u32_f64) // WHERE_OP(uint8_t, uint32_t, where_u32_u8) // WHERE_OP(uint32_t, uint32_t, where_u32_u32) // WHERE_OP(int64_t, uint32_t, where_u32_i64) WHERE_OP(float, uint8_t, where_u8_f32) WHERE_OP(half, uint8_t, where_u8_f16) WHERE_OP(uint8_t, uint8_t, where_u8_u8) WHERE_OP(uint32_t, uint8_t, where_u8_u32) #if __METAL_VERSION__ >= 220 WHERE_OP(int64_t, uint8_t, where_u8_i64) #endif #if defined(__HAVE_BFLOAT__) WHERE_OP(bfloat, uint8_t, where_u8_bf16) #endif
candle/candle-metal-kernels/src/ternary.metal/0
{ "file_path": "candle/candle-metal-kernels/src/ternary.metal", "repo_id": "candle", "token_count": 2209 }
27
//! Layers defined by closures. use candle::{Result, Tensor}; use std::sync::Arc; /// A layer defined by a simple closure. #[derive(Clone)] pub struct Func<'a> { #[allow(clippy::type_complexity)] f: Arc<dyn 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync>, } impl<'a> std::fmt::Debug for Func<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "func") } } pub fn func<'a, F>(f: F) -> Func<'a> where F: 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { Func { f: Arc::new(f) } } impl<'a> super::Module for Func<'a> { fn forward(&self, xs: &Tensor) -> Result<Tensor> { (*self.f)(xs) } } impl<'a> Func<'a> { pub fn new<F>(f: F) -> Self where F: 'a + Fn(&Tensor) -> Result<Tensor> + Send + Sync, { Self { f: Arc::new(f) } } } /// A layer defined by a simple closure. #[derive(Clone)] pub struct FuncT<'a> { #[allow(clippy::type_complexity)] f: Arc<dyn 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync>, } impl<'a> std::fmt::Debug for FuncT<'a> { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "func") } } pub fn func_t<'a, F>(f: F) -> FuncT<'a> where F: 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync, { FuncT { f: Arc::new(f) } } impl<'a> super::ModuleT for FuncT<'a> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { (*self.f)(xs, train) } } impl<'a> FuncT<'a> { pub fn new<F>(f: F) -> Self where F: 'a + Fn(&Tensor, bool) -> Result<Tensor> + Send + Sync, { Self { f: Arc::new(f) } } }
candle/candle-nn/src/func.rs/0
{ "file_path": "candle/candle-nn/src/func.rs", "repo_id": "candle", "token_count": 804 }
28
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::to_vec0_round; use candle::{Device, Result, Tensor}; /* Equivalent python code: import torch import torch.nn.functional as F input = torch.tensor([ [ 1.1050, 0.3013, -1.5394, -2.1528, -0.8634], [ 1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [ 0.8318, 1.1154, -0.3610, 0.5351, 1.0830]]) target = torch.tensor([1, 0, 4]) print(F.nll_loss(F.log_softmax(input, dim=1), target)) print(F.cross_entropy(input, target)) */ #[test] fn nll_and_cross_entropy() -> Result<()> { let cpu = Device::Cpu; let input = Tensor::new( &[ [1.1050f32, 0.3013, -1.5394, -2.1528, -0.8634], [1.0730, -0.9419, -0.1670, -0.6582, 0.5061], [0.8318, 1.1154, -0.3610, 0.5351, 1.0830], ], &cpu, )?; let target = Tensor::new(&[1u32, 0, 4], &cpu)?; let log_softmax = candle_nn::ops::log_softmax(&input, 1)?; let loss = candle_nn::loss::nll(&log_softmax, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); let loss = candle_nn::loss::cross_entropy(&input, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 1.1312); Ok(()) } /* Equivalent python code: import torch import torch.nn.functional as F inp = torch.Tensor([[ 2.3611, -0.8813, -0.5006, -0.2178], [ 0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [ 1.3081, 0.6641, 1.1802, -0.2547], [ 0.5292, 0.7636, 0.3692, -0.8318]]) target = torch.Tensor([[0., 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.]]) print(F.binary_cross_entropy_with_logits(inp, target)) */ #[test] fn binary_cross_entropy_with_logit() -> Result<()> { let cpu = Device::Cpu; let inp = [ [2.3611f32, -0.8813, -0.5006, -0.2178], [0.0419, 0.0763, -1.0457, -1.6692], [-1.0494, 0.8111, 1.5723, 1.2315], [1.3081, 0.6641, 1.1802, -0.2547], [0.5292, 0.7636, 0.3692, -0.8318], ]; let target = [ [0.0f32, 1., 0., 0.], [0., 1., 0., 0.], [0., 0., 0., 1.], [1., 0., 0., 0.], [0., 0., 1., 0.], ]; let inp = Tensor::new(&inp, &cpu)?; let target = Tensor::new(&target, &cpu)?; let loss = candle_nn::loss::binary_cross_entropy_with_logit(&inp, &target)?; assert_eq!(to_vec0_round(&loss, 4)?, 0.8224); Ok(()) }
candle/candle-nn/tests/loss.rs/0
{ "file_path": "candle/candle-nn/tests/loss.rs", "repo_id": "candle", "token_count": 1344 }
29
from typing import Union, Sequence class Tensor: """ This contains the type hints for the magic methodes of the `candle.Tensor` class. """ def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass
candle/candle-pyo3/_additional_typing/__init__.py/0
{ "file_path": "candle/candle-pyo3/_additional_typing/__init__.py", "repo_id": "candle", "token_count": 1174 }
30
# Generated content DO NOT EDIT from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike from candle.typing import _ArrayLike, Device, Scalar, Index, Shape from candle import Tensor, DType, QTensor class ONNXModel: """ A wrapper around an ONNX model. """ def __init__(self, path: str): pass @property def doc_string(self) -> str: """ The doc string of the model. """ pass @property def domain(self) -> str: """ The domain of the operator set of the model. """ pass def initializers(self) -> Dict[str, Tensor]: """ Get the weights of the model. """ pass @property def inputs(self) -> Optional[Dict[str, ONNXTensorDescription]]: """ The inputs of the model. """ pass @property def ir_version(self) -> int: """ The version of the IR this model targets. """ pass @property def model_version(self) -> int: """ The version of the model. """ pass @property def outputs(self) -> Optional[Dict[str, ONNXTensorDescription]]: """ The outputs of the model. """ pass @property def producer_name(self) -> str: """ The producer of the model. """ pass @property def producer_version(self) -> str: """ The version of the producer of the model. """ pass def run(self, inputs: Dict[str, Tensor]) -> Dict[str, Tensor]: """ Run the model on the given inputs. """ pass class ONNXTensorDescription: """ A wrapper around an ONNX tensor description. """ @property def dtype(self) -> DType: """ The data type of the tensor. """ pass @property def shape(self) -> Tuple[Union[int, str, Any]]: """ The shape of the tensor. """ pass
candle/candle-pyo3/py_src/candle/onnx/__init__.pyi/0
{ "file_path": "candle/candle-pyo3/py_src/candle/onnx/__init__.pyi", "repo_id": "candle", "token_count": 939 }
31
import candle from candle import Tensor, QTensor from candle.nn import Module, Linear from candle.utils import cuda_is_available import pytest def test_module_can_be_constructed(): class A(Module): pass a = A() assert a is not None assert len(list(a.buffers())) == 0 def test_module_registers_tensors(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) a = A() named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 1 assert "t" in named_buffers def test_module_registers_submodules(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) a = A() named_modules = dict(a.named_modules()) named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 2 assert "linear" in named_modules assert "linear.weight" in named_buffers assert "linear.bias" in named_buffers def test_module_can_dump_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) a = A() state_dict = a.state_dict() assert hasattr(state_dict, "_metadata") assert "t" in state_dict assert "linear.weight" in state_dict assert "linear.bias" in state_dict assert len(state_dict) == 3 def test_module_can_load_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) statedict = { "linear.weight": candle.ones((20, 10)), "linear.bias": candle.zeros((20,)), "t": Tensor(42.0), } a = A() a.load_state_dict(statedict) def test_module_throws_on_shape_mismatch(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "t": candle.ones((20,)), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert "size mismatch" in str(excinfo.value) def test_module_throws_on_missing_key(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "not_t": Tensor(42.0), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert 'Missing key(s) in state_dict: "t".' in str(excinfo.value) def test_module_can_load_quantized_tensors(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) self._quantizable_buffers.add("t") statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, QTensor) assert a.t.ggml_dtype == "Q4_0" def test_module_dequantizes_tensors_automatically(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, Tensor) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_to_cuda(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_from_cuda_to_cpu(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" a.cpu() assert a.t.device == "cpu"
candle/candle-pyo3/tests/bindings/test_module.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_module.py", "repo_id": "candle", "token_count": 1853 }
32
use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 518; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { qkv: Linear, proj: Linear, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, ) -> Result<Self> { let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { qkv, proj, num_heads, scale, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = self .qkv .forward(xs)? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = (qkv.i(0)? * self.scale)?; let k = qkv.i(1)?; let v = qkv.i(2)?; let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct LayerScale { gamma: Tensor, } impl LayerScale { fn new(vb: VarBuilder, dim: usize) -> Result<Self> { let gamma = vb.get(dim, "gamma")?; Ok(Self { gamma }) } } impl Module for LayerScale { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.broadcast_mul(&self.gamma) } } #[derive(Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?.gelu()?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, ls1: LayerScale, norm2: LayerNorm, mlp: Mlp, ls2: LayerScale, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?; let ls1 = LayerScale::new(vb.pp("ls1"), dim)?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?; let ls2 = LayerScale::new(vb.pp("ls2"), dim)?; Ok(Self { norm1, attn, ls1, norm2, mlp, ls2, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self .ls1 .forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = self .ls2 .forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct DinoVisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl DinoVisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let num_tokens = 1; let pos_embed = vb.get( (1, patch_embed.num_patches + num_tokens, embed_dim), "pos_embed", )?; let head = linear(vb.pp("head"), 2 * embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-5, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(&i.to_string()), embed_dim, num_heads)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(xs.clone()); } let class_pos_embed = self.pos_embed.i((.., ..1))?; let patch_pos_embed = self.pos_embed.i((.., 1..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&class_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; &xs + &self.interpolate_pos_encoding(&xs, w, h)? } } impl Module for DinoVisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs = self.norm.forward(&xs)?; let xs_norm_clstoken = xs.i((.., 0))?; let xs_norm_patchtokens = xs.i((.., 1..))?.mean(1)?; let xs = Tensor::cat(&[xs_norm_clstoken, xs_norm_patchtokens], D::Minus1)?; self.head.forward(&xs) } } pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 384, 6) }
candle/candle-transformers/src/models/dinov2.rs/0
{ "file_path": "candle/candle-transformers/src/models/dinov2.rs", "repo_id": "candle", "token_count": 4376 }
33
use crate::models::with_tracing::{linear_no_bias, Linear, RmsNorm}; /// Mixtral Model /// https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/modeling_mixtral.py /// https://mistral.ai/news/mixtral-of-experts/ use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; use std::sync::Arc; /// https://github.com/huggingface/transformers/blob/1a585c1222a56bcaecc070966d558d4a9d862e83/src/transformers/models/mixtral/configuration_mixtral.py#L113 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) hidden_size: usize, pub(crate) intermediate_size: usize, pub(crate) num_hidden_layers: usize, pub(crate) num_attention_heads: usize, pub(crate) num_key_value_heads: usize, pub(crate) hidden_act: Activation, pub(crate) max_position_embeddings: usize, pub(crate) rms_norm_eps: f64, pub(crate) rope_theta: f64, pub(crate) sliding_window: usize, pub(crate) num_experts_per_tok: usize, pub(crate) num_local_experts: usize, pub(crate) use_flash_attn: bool, } impl Config { /// https://huggingface.co/mistralai/Mixtral-8x7B-v0.1/blob/main/config.json pub fn v0_1_8x7b(use_flash_attn: bool) -> Self { Self { vocab_size: 32000, hidden_size: 4096, intermediate_size: 14336, num_hidden_layers: 32, num_attention_heads: 32, num_key_value_heads: 8, hidden_act: Activation::Silu, max_position_embeddings: 32768, rms_norm_eps: 1e-5, rope_theta: 1e6, sliding_window: 4096, num_experts_per_tok: 2, num_local_experts: 8, use_flash_attn, } } } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / (cfg.rope_theta as f32).powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_flash_attn: bool, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear_no_bias(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_flash_attn: cfg.use_flash_attn, }) } fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> { let n_rep = self.num_kv_groups; if n_rep == 1 { Ok(xs) } else { let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?; xs.unsqueeze(2)? .expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))? .reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim)) } } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let attn_output = if self.use_flash_attn { // flash-attn expects (b_sz, seq_len, nheads, head_dim) let q = query_states.transpose(1, 2)?; let k = key_states.transpose(1, 2)?; let v = value_states.transpose(1, 2)?; let softmax_scale = 1f32 / (self.head_dim as f32).sqrt(); flash_attn(&q, &k, &v, softmax_scale, q_len > 1)?.transpose(1, 2)? } else { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct BlockSparseTop2MLP { w1: Linear, w2: Linear, w3: Linear, act_fn: Activation, } impl BlockSparseTop2MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let w1 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w1"))?; let w2 = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("w2"))?; let w3 = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("w3"))?; Ok(Self { w1, w2, w3, act_fn: cfg.hidden_act, }) } } impl Module for BlockSparseTop2MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.w1)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.w3)?; (lhs * rhs)?.apply(&self.w2) } } #[derive(Debug, Clone)] struct SparseMoeBlock { gate: Linear, experts: Vec<BlockSparseTop2MLP>, num_experts_per_tok: usize, } impl SparseMoeBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let gate = linear_no_bias(cfg.hidden_size, cfg.num_local_experts, vb.pp("gate"))?; let mut experts = Vec::with_capacity(cfg.num_local_experts); let vb = vb.pp("experts"); for idx in 0..cfg.num_local_experts { let expert = BlockSparseTop2MLP::new(cfg, vb.pp(idx))?; experts.push(expert) } Ok(SparseMoeBlock { gate, experts, num_experts_per_tok: cfg.num_experts_per_tok, }) } } impl Module for SparseMoeBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b_size, seq_len, hidden_dim) = xs.dims3()?; let xs = xs.reshape(((), hidden_dim))?; let router_logits = xs.apply(&self.gate)?; let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?; // In order to extract topk, we extract the data from the tensor and manipulate it // directly. Maybe we will want to use some custom ops instead at some point. let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?; // routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) // top_x contains the row indexes to evaluate for each expert. let mut top_x = vec![vec![]; self.experts.len()]; let mut selected_rws = vec![vec![]; self.experts.len()]; for (row_idx, rw) in routing_weights.iter().enumerate() { let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>(); dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize])); let mut sum_routing_weights = 0f32; for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; sum_routing_weights += routing_weight; top_x[expert_idx].push(row_idx as u32); } for &expert_idx in dst.iter().take(self.num_experts_per_tok) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; selected_rws[expert_idx].push(routing_weight / sum_routing_weights) } } // routing_weights /= routing_weights.sum(dim=-1, keepdim=True) // expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) let mut ys = xs.zeros_like()?; for (expert_idx, expert_layer) in self.experts.iter().enumerate() { let top_x = &top_x[expert_idx]; if top_x.is_empty() { continue; } let top_x = Tensor::new(top_x.as_slice(), xs.device())?; let selected_rws = Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())?.reshape(((), 1))?; // Index the correct hidden states and compute the expert hidden state for // the current expert. We need to make sure to multiply the output hidden // states by `routing_weights` on the corresponding tokens (top-1 and top-2) let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?; // current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None]) let current_hidden_states = expert_layer.forward(&current_state)?; let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?; ys = ys.index_add(&top_x, &current_hidden_states, 0)?; } let ys = ys.reshape((b_size, seq_len, hidden_dim))?; Ok(ys) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, block_sparse_moe: SparseMoeBlock, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let block_sparse_moe = SparseMoeBlock::new(cfg, vb.pp("block_sparse_moe"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, block_sparse_moe, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs .apply(&self.post_attention_layernorm)? .apply(&self.block_sparse_moe)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, lm_head: Linear, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/mixtral.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixtral.rs", "repo_id": "candle", "token_count": 9108 }
34
use crate::quantized_nn::{layer_norm, linear, linear_no_bias, Embedding, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, Device, Module, Result, Tensor, D}; use candle_nn::{Activation, LayerNorm}; use std::sync::Arc; pub use crate::models::stable_lm::Config; use crate::models::stable_lm::RotaryEmbedding; #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, use_cache: bool, rotary_ndims: usize, span: tracing::Span, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let head_dim = cfg.head_dim(); let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let linear_layer = if cfg.use_qkv_bias { linear } else { linear_no_bias }; let q_proj = linear_layer(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear_layer(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups: cfg.num_kv_groups(), head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, use_cache: cfg.use_cache, rotary_ndims: cfg.rotary_ndims(), span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn repeat_kv(&self, xs: Tensor) -> Result<Tensor> { let n_rep = self.num_kv_groups; if n_rep == 1 { Ok(xs) } else { let (b_sz, num_kv_heads, seq_len, head_dim) = xs.dims4()?; xs.unsqueeze(2)? .expand((b_sz, num_kv_heads, n_rep, seq_len, head_dim))? .reshape((b_sz, num_kv_heads * n_rep, seq_len, head_dim)) } } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (rot_ndims, pass_ndims) = (self.rotary_ndims, self.head_dim - self.rotary_ndims); let query_rot = query_states.narrow(D::Minus1, 0, rot_ndims)?; let query_pass = query_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let key_rot = key_states.narrow(D::Minus1, 0, rot_ndims)?; let key_pass = key_states.narrow(D::Minus1, rot_ndims, pass_ndims)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_rot, &key_rot, seqlen_offset)?; let query_states = Tensor::cat(&[query_rot, query_pass], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[key_rot, key_pass], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; if self.use_cache { self.kv_cache = Some((key_states.clone(), value_states.clone())); } let key_states = self.repeat_kv(key_states)?.contiguous()?; let value_states = self.repeat_kv(value_states)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: LayerNorm, post_attention_layernorm: LayerNorm, span: tracing::Span, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("input_layernorm"), )?; let post_attention_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: Embedding, layers: Vec<DecoderLayer>, norm: LayerNorm, lm_head: Linear, device: Device, span: tracing::Span, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = Embedding::new(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(DType::F32, cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_m.pp("norm"))?; let lm_head = linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))?; Ok(Self { embed_tokens, layers, norm, lm_head, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(DType::F32) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len) = input_ids.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, seqlen_offset)?; Some(mask) }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm)? .apply(&self.lm_head) } }
candle/candle-transformers/src/models/quantized_stable_lm.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_stable_lm.rs", "repo_id": "candle", "token_count": 5441 }
35
//! Contrastive Language-Image Pre-Training //! //! Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! https://github.com/openai/CLIP use candle::{DType, Device, Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone, Copy)] pub enum Activation { QuickGelu, Gelu, GeluErf, } impl Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Activation::QuickGelu => xs * nn::ops::sigmoid(&(xs * 1.702f64)?)?, Activation::Gelu => xs.gelu(), Activation::GeluErf => xs.gelu_erf(), } } } #[derive(Debug, Clone)] pub struct Config { vocab_size: usize, embed_dim: usize, // aka config.hidden_size activation: Activation, // aka config.hidden_act intermediate_size: usize, pub max_position_embeddings: usize, // The character to use for padding, use EOS when not set. pub pad_with: Option<String>, num_hidden_layers: usize, num_attention_heads: usize, #[allow(dead_code)] projection_dim: usize, } impl Config { // The config details can be found in the "text_config" section of this json file: // https://huggingface.co/openai/clip-vit-large-patch14/blob/main/config.json pub fn v1_5() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/text_encoder/config.json pub fn v2_1() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 23, num_attention_heads: 16, projection_dim: 512, activation: Activation::Gelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder/config.json pub fn sdxl() -> Self { Self { vocab_size: 49408, embed_dim: 768, intermediate_size: 3072, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 12, num_attention_heads: 12, projection_dim: 768, activation: Activation::QuickGelu, } } // https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/text_encoder_2/config.json pub fn sdxl2() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: Some("!".to_string()), num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 1280, activation: Activation::Gelu, } } pub fn ssd1b() -> Self { Self::sdxl() } pub fn ssd1b2() -> Self { Self::sdxl2() } // https://huggingface.co/warp-ai/wuerstchen/blob/main/text_encoder/config.json pub fn wuerstchen() -> Self { Self { vocab_size: 49408, embed_dim: 1024, intermediate_size: 4096, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 24, num_attention_heads: 16, projection_dim: 1024, activation: Activation::GeluErf, } } // https://huggingface.co/warp-ai/wuerstchen-prior/blob/main/text_encoder/config.json pub fn wuerstchen_prior() -> Self { Self { vocab_size: 49408, embed_dim: 1280, intermediate_size: 5120, max_position_embeddings: 77, pad_with: None, num_hidden_layers: 32, num_attention_heads: 20, projection_dim: 512, activation: Activation::GeluErf, } } } // CLIP Text Model // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py #[derive(Debug)] struct ClipTextEmbeddings { token_embedding: candle_nn::Embedding, position_embedding: candle_nn::Embedding, position_ids: Tensor, } impl ClipTextEmbeddings { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let token_embedding = candle_nn::embedding(c.vocab_size, c.embed_dim, vs.pp("token_embedding"))?; let position_embedding = candle_nn::embedding( c.max_position_embeddings, c.embed_dim, vs.pp("position_embedding"), )?; let position_ids = Tensor::arange(0u32, c.max_position_embeddings as u32, vs.device())?.unsqueeze(0)?; Ok(ClipTextEmbeddings { token_embedding, position_embedding, position_ids, }) } } impl Module for ClipTextEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let token_embedding = self.token_embedding.forward(xs)?; let position_embedding = self.position_embedding.forward(&self.position_ids)?; token_embedding.broadcast_add(&position_embedding) } } #[derive(Debug)] struct ClipAttention { k_proj: candle_nn::Linear, v_proj: candle_nn::Linear, q_proj: candle_nn::Linear, out_proj: candle_nn::Linear, head_dim: usize, scale: f64, num_attention_heads: usize, } impl ClipAttention { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let embed_dim = c.embed_dim; let num_attention_heads = c.num_attention_heads; let k_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("k_proj"))?; let v_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("v_proj"))?; let q_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("q_proj"))?; let out_proj = candle_nn::linear(embed_dim, embed_dim, vs.pp("out_proj"))?; let head_dim = embed_dim / num_attention_heads; let scale = (head_dim as f64).powf(-0.5); Ok(ClipAttention { k_proj, v_proj, q_proj, out_proj, head_dim, scale, num_attention_heads, }) } fn shape(&self, xs: &Tensor, seq_len: usize, bsz: usize) -> Result<Tensor> { xs.reshape((bsz, seq_len, self.num_attention_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let in_dtype = xs.dtype(); let (bsz, seq_len, embed_dim) = xs.dims3()?; let query_states = (self.q_proj.forward(xs)? * self.scale)?; let proj_shape = (bsz * self.num_attention_heads, seq_len, self.head_dim); let query_states = self .shape(&query_states, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let key_states = self .shape(&self.k_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let value_states = self .shape(&self.v_proj.forward(xs)?, seq_len, bsz)? .reshape(proj_shape)? .to_dtype(DType::F32)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let src_len = key_states.dim(1)?; let attn_weights = attn_weights .reshape((bsz, self.num_attention_heads, seq_len, src_len))? .broadcast_add(causal_attention_mask)?; let attn_weights = attn_weights.reshape((bsz * self.num_attention_heads, seq_len, src_len))?; let attn_weights = candle_nn::ops::softmax(&attn_weights, D::Minus1)?; let attn_output = attn_weights.matmul(&value_states)?.to_dtype(in_dtype)?; let attn_output = attn_output .reshape((bsz, self.num_attention_heads, seq_len, self.head_dim))? .transpose(1, 2)? .reshape((bsz, seq_len, embed_dim))?; self.out_proj.forward(&attn_output) } } #[derive(Debug)] struct ClipMlp { fc1: candle_nn::Linear, fc2: candle_nn::Linear, activation: Activation, } impl ClipMlp { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let fc1 = candle_nn::linear(c.embed_dim, c.intermediate_size, vs.pp("fc1"))?; let fc2 = candle_nn::linear(c.intermediate_size, c.embed_dim, vs.pp("fc2"))?; Ok(ClipMlp { fc1, fc2, activation: c.activation, }) } } impl ClipMlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?; self.fc2.forward(&self.activation.forward(&xs)?) } } #[derive(Debug)] struct ClipEncoderLayer { self_attn: ClipAttention, layer_norm1: candle_nn::LayerNorm, mlp: ClipMlp, layer_norm2: candle_nn::LayerNorm, } impl ClipEncoderLayer { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let self_attn = ClipAttention::new(vs.pp("self_attn"), c)?; let layer_norm1 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm1"))?; let mlp = ClipMlp::new(vs.pp("mlp"), c)?; let layer_norm2 = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("layer_norm2"))?; Ok(ClipEncoderLayer { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self.layer_norm1.forward(xs)?; let xs = self.self_attn.forward(&xs, causal_attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = self.layer_norm2.forward(&xs)?; let xs = self.mlp.forward(&xs)?; xs + residual } } #[derive(Debug)] struct ClipEncoder { layers: Vec<ClipEncoderLayer>, } impl ClipEncoder { fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("layers"); let mut layers: Vec<ClipEncoderLayer> = Vec::new(); for index in 0..c.num_hidden_layers { let layer = ClipEncoderLayer::new(vs.pp(&index.to_string()), c)?; layers.push(layer) } Ok(ClipEncoder { layers }) } fn forward(&self, xs: &Tensor, causal_attention_mask: &Tensor) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, causal_attention_mask)?; } Ok(xs) } } /// A CLIP transformer based model. #[derive(Debug)] pub struct ClipTextTransformer { embeddings: ClipTextEmbeddings, encoder: ClipEncoder, final_layer_norm: candle_nn::LayerNorm, } impl ClipTextTransformer { pub fn new(vs: candle_nn::VarBuilder, c: &Config) -> Result<Self> { let vs = vs.pp("text_model"); let embeddings = ClipTextEmbeddings::new(vs.pp("embeddings"), c)?; let encoder = ClipEncoder::new(vs.pp("encoder"), c)?; let final_layer_norm = candle_nn::layer_norm(c.embed_dim, 1e-5, vs.pp("final_layer_norm"))?; Ok(ClipTextTransformer { embeddings, encoder, final_layer_norm, }) } // https://github.com/huggingface/transformers/blob/674f750a57431222fa2832503a108df3badf1564/src/transformers/models/clip/modeling_clip.py#L678 fn build_causal_attention_mask( bsz: usize, seq_len: usize, mask_after: usize, device: &Device, ) -> Result<Tensor> { let mask: Vec<_> = (0..seq_len) .flat_map(|i| { (0..seq_len).map(move |j| { if j > i || j > mask_after { f32::MIN } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (seq_len, seq_len), device)?; mask.broadcast_as((bsz, seq_len, seq_len)) } pub fn forward_with_mask(&self, xs: &Tensor, mask_after: usize) -> Result<Tensor> { let (bsz, seq_len) = xs.dims2()?; let xs = self.embeddings.forward(xs)?; let causal_attention_mask = Self::build_causal_attention_mask(bsz, seq_len, mask_after, xs.device())?; let xs = self.encoder.forward(&xs, &causal_attention_mask)?; self.final_layer_norm.forward(&xs) } } impl Module for ClipTextTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.forward_with_mask(xs, usize::MAX) } }
candle/candle-transformers/src/models/stable_diffusion/clip.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/clip.rs", "repo_id": "candle", "token_count": 6474 }
36
//! VGG-16 model implementation. //! //! See Very Deep Convolutional Networks for Large-Scale Image Recognition //! <https://arxiv.org/abs/1409.1556> use candle::{ModuleT, Result, Tensor}; use candle_nn::{FuncT, VarBuilder}; // Enum representing the different VGG models pub enum Models { Vgg13, Vgg16, Vgg19, } // Struct representing a VGG model #[derive(Debug)] pub struct Vgg<'a> { blocks: Vec<FuncT<'a>>, } // Struct representing the configuration for the pre-logit layer struct PreLogitConfig { in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, } // Implementation of the VGG model impl<'a> Vgg<'a> { // Function to create a new VGG model pub fn new(vb: VarBuilder<'a>, model: Models) -> Result<Self> { let blocks = match model { Models::Vgg13 => vgg13_blocks(vb)?, Models::Vgg16 => vgg16_blocks(vb)?, Models::Vgg19 => vgg19_blocks(vb)?, }; Ok(Self { blocks }) } } // Implementation of the forward pass for the VGG model impl ModuleT for Vgg<'_> { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor> { let mut xs = xs.unsqueeze(0)?; for block in self.blocks.iter() { xs = xs.apply_t(block, train)?; } Ok(xs) } } // Function to create a conv2d block // The block is composed of two conv2d layers followed by a max pool layer fn conv2d_block(convs: &[(usize, usize, &str)], vb: &VarBuilder) -> Result<FuncT<'static>> { let layers = convs .iter() .enumerate() .map(|(_, &(in_c, out_c, name))| { candle_nn::conv2d( in_c, out_c, 3, candle_nn::Conv2dConfig { stride: 1, padding: 1, ..Default::default() }, vb.pp(name), ) }) .collect::<Result<Vec<_>>>()?; Ok(FuncT::new(move |xs, _train| { let mut xs = xs.clone(); for layer in layers.iter() { xs = xs.apply(layer)?.relu()? } xs = xs.max_pool2d_with_stride(2, 2)?; Ok(xs) })) } // Function to create a fully connected layer // The layer is composed of two linear layers followed by a dropout layer fn fully_connected( num_classes: usize, pre_logit_1: PreLogitConfig, pre_logit_2: PreLogitConfig, vb: VarBuilder, ) -> Result<FuncT> { let lin = get_weights_and_biases( &vb.pp("pre_logits.fc1"), pre_logit_1.in_dim, pre_logit_1.target_in, pre_logit_1.target_out, )?; let lin2 = get_weights_and_biases( &vb.pp("pre_logits.fc2"), pre_logit_2.in_dim, pre_logit_2.target_in, pre_logit_2.target_out, )?; let dropout1 = candle_nn::Dropout::new(0.5); let dropout2 = candle_nn::Dropout::new(0.5); let dropout3 = candle_nn::Dropout::new(0.5); Ok(FuncT::new(move |xs, train| { let xs = xs.reshape((1, pre_logit_1.target_out))?; let xs = xs.apply_t(&dropout1, train)?.apply(&lin)?.relu()?; let xs = xs.apply_t(&dropout2, train)?.apply(&lin2)?.relu()?; let lin3 = candle_nn::linear(4096, num_classes, vb.pp("head.fc"))?; let xs = xs.apply_t(&dropout3, train)?.apply(&lin3)?.relu()?; Ok(xs) })) } // Function to get the weights and biases for a layer // This is required because the weights and biases are stored in different format than our linear layer expects fn get_weights_and_biases( vs: &VarBuilder, in_dim: (usize, usize, usize, usize), target_in: usize, target_out: usize, ) -> Result<candle_nn::Linear> { let init_ws = candle_nn::init::DEFAULT_KAIMING_NORMAL; let ws = vs.get_with_hints(in_dim, "weight", init_ws)?; let ws = ws.reshape((target_in, target_out))?; let bound = 1. / (target_out as f64).sqrt(); let init_bs = candle_nn::Init::Uniform { lo: -bound, up: bound, }; let bs = vs.get_with_hints(target_in, "bias", init_bs)?; Ok(candle_nn::Linear::new(ws, Some(bs))) } fn vgg13_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block(&[(128, 256, "features.10"), (256, 256, "features.12")], &vb)?, conv2d_block(&[(256, 512, "features.15"), (512, 512, "features.17")], &vb)?, conv2d_block(&[(512, 512, "features.20"), (512, 512, "features.22")], &vb)?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg16_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.17"), (512, 512, "features.19"), (512, 512, "features.21"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.24"), (512, 512, "features.26"), (512, 512, "features.28"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) } fn vgg19_blocks(vb: VarBuilder) -> Result<Vec<FuncT>> { let num_classes = 1000; let blocks = vec![ conv2d_block(&[(3, 64, "features.0"), (64, 64, "features.2")], &vb)?, conv2d_block(&[(64, 128, "features.5"), (128, 128, "features.7")], &vb)?, conv2d_block( &[ (128, 256, "features.10"), (256, 256, "features.12"), (256, 256, "features.14"), (256, 256, "features.16"), ], &vb, )?, conv2d_block( &[ (256, 512, "features.19"), (512, 512, "features.21"), (512, 512, "features.23"), (512, 512, "features.25"), ], &vb, )?, conv2d_block( &[ (512, 512, "features.28"), (512, 512, "features.30"), (512, 512, "features.32"), (512, 512, "features.34"), ], &vb, )?, fully_connected( num_classes, PreLogitConfig { in_dim: (4096, 512, 7, 7), target_in: 4096, target_out: 512 * 7 * 7, }, PreLogitConfig { in_dim: (4096, 4096, 1, 1), target_in: 4096, target_out: 4096, }, vb.clone(), )?, ]; Ok(blocks) }
candle/candle-transformers/src/models/vgg.rs/0
{ "file_path": "candle/candle-transformers/src/models/vgg.rs", "repo_id": "candle", "token_count": 4303 }
37
pub mod text_generation;
candle/candle-transformers/src/pipelines/mod.rs/0
{ "file_path": "candle/candle-transformers/src/pipelines/mod.rs", "repo_id": "candle", "token_count": 7 }
38
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheFile = true) { if (!cacheFile) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "blip-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Blip { static instance = {}; static async getInstance( weightsURL, tokenizerURL, configURL, modelID, quantized ) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, configArrayU8, quantized ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized } = event.data; try { self.postMessage({ status: "status", message: "Loading Blip Model..." }); const model = await Blip.getInstance( weightsURL, tokenizerURL, configURL, modelID, quantized ); self.postMessage({ status: "status", message: "Running Blip Inference...", }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); const output = model.generate_caption_from_image(imageArrayU8); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/blip/blipWorker.js/0
{ "file_path": "candle/candle-wasm-examples/blip/blipWorker.js", "repo_id": "candle", "token_count": 815 }
39
mod app; pub mod model; pub mod worker; pub use app::App; pub use worker::Worker;
candle/candle-wasm-examples/llama2-c/src/lib.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/lib.rs", "repo_id": "candle", "token_count": 29 }
40
import init, { run_app } from './pkg/candle_wasm_example_whisper.js'; async function main() { await init('/pkg/candle_wasm_example_whisper_bg.wasm'); run_app(); } main()
candle/candle-wasm-examples/whisper/main.js/0
{ "file_path": "candle/candle-wasm-examples/whisper/main.js", "repo_id": "candle", "token_count": 73 }
41
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); console_error_panic_hook::set_once(); yew::Renderer::<candle_wasm_example_yolo::App>::new().render(); }
candle/candle-wasm-examples/yolo/src/bin/app.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/bin/app.rs", "repo_id": "candle", "token_count": 82 }
42
MONGODB_URL=mongodb://localhost:27017/
chat-ui/.env.ci/0
{ "file_path": "chat-ui/.env.ci", "repo_id": "chat-ui", "token_count": 16 }
43
export default { plugins: { tailwindcss: {}, autoprefixer: {}, }, };
chat-ui/postcss.config.js/0
{ "file_path": "chat-ui/postcss.config.js", "repo_id": "chat-ui", "token_count": 34 }
44
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { PUBLIC_APP_DESCRIPTION, PUBLIC_APP_NAME, PUBLIC_APP_DISCLAIMER_MESSAGE, } from "$env/static/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal> <div class="flex w-full flex-col items-center gap-6 bg-gradient-to-b from-primary-500/40 via-primary-500/10 to-primary-500/0 px-5 pb-8 pt-9 text-center sm:px-6" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {PUBLIC_APP_NAME} </h2> <p class="text-lg font-semibold leading-snug text-gray-800" style="text-wrap: balance;"> {PUBLIC_APP_DESCRIPTION} </p> <p class="text-sm text-gray-500"> {PUBLIC_APP_DISCLAIMER_MESSAGE} </p> <div class="flex w-full flex-col items-center gap-2"> {#if $page.data.guestMode || !$page.data.loginEnabled} <button class="w-full justify-center rounded-full border-2 border-gray-300 bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" class:bg-white={$page.data.loginEnabled} class:text-gray-800={$page.data.loginEnabled} class:hover:bg-slate-100={$page.data.loginEnabled} on:click|preventDefault|stopPropagation={() => { if (!cookiesAreEnabled()) { window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > {#if $page.data.loginEnabled} Try as guest {:else} Start chatting {/if} </button> {/if} {#if $page.data.loginEnabled} <form action="{base}/login" target="_parent" method="POST" class="w-full"> <button type="submit" class="flex w-full items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if PUBLIC_APP_NAME === "HuggingChat"} with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5 flex-none" /> Hugging Face {/if} </button> </form> {/if} </div> </div> </Modal>
chat-ui/src/lib/components/DisclaimerModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/DisclaimerModal.svelte", "repo_id": "chat-ui", "token_count": 1051 }
45
<script lang="ts"> import Modal from "./Modal.svelte"; import CarbonClose from "~icons/carbon/close"; import CarbonBlockchain from "~icons/carbon/blockchain"; export let preprompt: string; let isOpen = false; </script> <button type="button" class="mx-auto flex items-center gap-1.5 rounded-full border border-gray-100 bg-gray-50 px-3 py-1 text-xs text-gray-500 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700" on:click={() => (isOpen = !isOpen)} on:keydown={(e) => e.key === "Enter" && (isOpen = !isOpen)} > <CarbonBlockchain class="text-xxs" /> Using Custom System Prompt </button> {#if isOpen} <Modal on:close={() => (isOpen = false)} width="w-full max-w-2xl"> <div class="flex w-full flex-col gap-5 p-6"> <div class="flex items-start justify-between text-xl font-semibold text-gray-800"> <h2>System Prompt</h2> <button type="button" class="group" on:click={() => (isOpen = false)}> <CarbonClose class="mt-auto text-gray-900 group-hover:text-gray-500" /> </button> </div> <textarea disabled value={preprompt} class="min-h-[420px] w-full resize-none rounded-lg border bg-gray-50 p-2.5 text-gray-600 max-sm:text-sm" /> </div> </Modal> {/if}
chat-ui/src/lib/components/SystemPromptModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/SystemPromptModal.svelte", "repo_id": "chat-ui", "token_count": 508 }
46
<script lang="ts"> export let classNames = ""; </script> <svg xmlns="http://www.w3.org/2000/svg" class={classNames} width="1em" height="1em" fill="none" viewBox="0 0 32 32" ><path fill="currentColor" fill-rule="evenodd" d="M3.143 20.286h4.286v2.142H3.143A2.143 2.143 0 0 1 1 20.287V3.143A2.143 2.143 0 0 1 3.143 1h17.143a2.143 2.143 0 0 1 2.142 2.143v4.286h-2.142V3.143H3.143v17.143Zm9.643-12.857v3.214H16v2.143h-3.214V16h-2.143v-3.214H7.429v-2.143h3.214V7.429h2.143Zm14.185 2.639 3.533 3.532a1.7 1.7 0 0 1 0 2.4L15.5 31H9.57v-5.928l15-15.004a1.7 1.7 0 0 1 2.4 0Zm-15.257 18.79h2.897l10.116-10.116-2.899-2.897L11.714 25.96v2.897ZM23.346 14.33l2.897 2.897 2.429-2.43-2.897-2.896-2.43 2.429Z" clip-rule="evenodd" /></svg >
chat-ui/src/lib/components/icons/IconNew.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconNew.svelte", "repo_id": "chat-ui", "token_count": 426 }
47
import { TEXT_EMBEDDING_MODELS } from "$env/static/private"; import { z } from "zod"; import { sum } from "$lib/utils/sum"; import { embeddingEndpoints, embeddingEndpointSchema, type EmbeddingEndpoint, } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints"; import JSON5 from "json5"; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), endpoints: z.array(embeddingEndpointSchema).nonempty(), chunkCharLength: z.number().positive(), maxBatchSize: z.number().positive().optional(), preQuery: z.string().default(""), prePassage: z.string().default(""), }); // Default embedding model for backward compatibility const rawEmbeddingModelJSON = TEXT_EMBEDDING_MODELS || `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]`; const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON)); const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, id: m.id || m.name, }); const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({ ...m, getEndpoint: async (): Promise<EmbeddingEndpoint> => { if (!m.endpoints) { return embeddingEndpointTransformersJS({ type: "transformersjs", weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tei": return embeddingEndpoints.tei(args); case "transformersjs": return embeddingEndpoints.transformersjs(args); case "openai": return embeddingEndpoints.openai(args); } } random -= endpoint.weight; } throw new Error(`Failed to select embedding endpoint`); }, }); export const embeddingModels = await Promise.all( embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint)) ); export const defaultEmbeddingModel = embeddingModels[0]; const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => { return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]); }; export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "id"); }; export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "name"); }; export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
chat-ui/src/lib/server/embeddingModels.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingModels.ts", "repo_id": "chat-ui", "token_count": 1061 }
48
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import { format } from "date-fns"; import { downloadFile } from "./files/downloadFile"; export async function preprocessMessages( messages: Message[], webSearch: Message["webSearch"], multimodal: boolean, id: Conversation["_id"] ): Promise<Message[]> { return await Promise.all( structuredClone(messages).map(async (message, idx) => { // start by adding websearch to the last message if (idx === messages.length - 1 && webSearch && webSearch.context) { const lastQuestion = messages.findLast((el) => el.from === "user")?.content ?? ""; const previousQuestions = messages .filter((el) => el.from === "user") .slice(0, -1) .map((el) => el.content); const currentDate = format(new Date(), "MMMM d, yyyy"); message.content = `I searched the web using the query: ${webSearch.searchQuery}. Today is ${currentDate} and here are the results: ===================== ${webSearch.context} ===================== ${previousQuestions.length > 0 ? `Previous questions: \n- ${previousQuestions.join("\n- ")}` : ""} Answer the question: ${lastQuestion}`; } // handle files if model is multimodal if (multimodal) { if (message.files && message.files.length > 0) { const markdowns = await Promise.all( message.files.map(async (hash) => { try { const { content: image, mime } = await downloadFile(hash, id); const b64 = image.toString("base64"); return `![](data:${mime};base64,${b64})})`; } catch (e) { console.error(e); } }) ); message.content += markdowns.join("\n "); } else { // if no image, append an empty white image message.content += "\n![](data:image/png;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/wAARCAAQABADAREAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD+/igAoAKACgD/2Q==)"; } } return message; }) ); }
chat-ui/src/lib/server/preprocessMessages.ts/0
{ "file_path": "chat-ui/src/lib/server/preprocessMessages.ts", "repo_id": "chat-ui", "token_count": 1335 }
49
import { writable } from "svelte/store"; export interface TitleUpdate { convId: string; title: string; } export default writable<TitleUpdate | null>(null);
chat-ui/src/lib/stores/titleUpdate.ts/0
{ "file_path": "chat-ui/src/lib/stores/titleUpdate.ts", "repo_id": "chat-ui", "token_count": 50 }
50
import { defaultModel } from "$lib/server/models"; import type { Assistant } from "./Assistant"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; export interface Settings extends Timestamps { userId?: User["_id"]; sessionId?: string; /** * Note: Only conversations with this settings explicitly set to true should be shared. * * This setting is explicitly set to true when users accept the ethics modal. * */ shareConversationsWithModelAuthors: boolean; ethicsModalAcceptedAt: Date | null; activeModel: string; hideEmojiOnSidebar?: boolean; // model name and system prompts customPrompts?: Record<string, string>; assistants?: Assistant["_id"][]; } // TODO: move this to a constant file along with other constants export const DEFAULT_SETTINGS = { shareConversationsWithModelAuthors: true, activeModel: defaultModel.id, hideEmojiOnSidebar: false, customPrompts: {}, assistants: [], };
chat-ui/src/lib/types/Settings.ts/0
{ "file_path": "chat-ui/src/lib/types/Settings.ts", "repo_id": "chat-ui", "token_count": 289 }
51
import { base } from "$app/paths"; import { PUBLIC_ORIGIN, PUBLIC_SHARE_PREFIX } from "$env/static/public"; export function getShareUrl(url: URL, shareId: string): string { return `${PUBLIC_SHARE_PREFIX || `${PUBLIC_ORIGIN || url.origin}${base}`}/r/${shareId}`; }
chat-ui/src/lib/utils/getShareUrl.ts/0
{ "file_path": "chat-ui/src/lib/utils/getShareUrl.ts", "repo_id": "chat-ui", "token_count": 99 }
52
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import { v4 } from "uuid"; export function addChildren( conv: Pick<Conversation, "messages" | "rootMessageId">, message: Omit<Message, "id">, parentId?: Message["id"] ): Message["id"] { // if this is the first message we just push it if (conv.messages.length === 0) { const messageId = v4(); conv.rootMessageId = messageId; conv.messages.push({ ...message, ancestors: [], id: messageId, }); return messageId; } if (!parentId) { throw new Error("You need to specify a parentId if this is not the first message"); } const messageId = v4(); if (!conv.rootMessageId) { // if there is no parentId we just push the message if (!!parentId && parentId !== conv.messages[conv.messages.length - 1].id) { throw new Error("This is a legacy conversation, you can only append to the last message"); } conv.messages.push({ ...message, id: messageId }); return messageId; } const ancestors = [...(conv.messages.find((m) => m.id === parentId)?.ancestors ?? []), parentId]; conv.messages.push({ ...message, ancestors, id: messageId, children: [], }); const parent = conv.messages.find((m) => m.id === parentId); if (parent) { if (parent.children) { parent.children.push(messageId); } else parent.children = [messageId]; } return messageId; }
chat-ui/src/lib/utils/tree/addChildren.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addChildren.ts", "repo_id": "chat-ui", "token_count": 501 }
53
import { collections } from "$lib/server/database"; import { authCondition } from "$lib/server/auth"; import { z } from "zod"; import { ObjectId } from "mongodb"; export async function GET({ locals, params }) { const id = z.string().parse(params.id); const convId = new ObjectId(id); if (locals.user?._id || locals.sessionId) { const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (conv) { const res = { id: conv._id, title: conv.title, updatedAt: conv.updatedAt, modelId: conv.model, messages: conv.messages.map((message) => ({ content: message.content, from: message.from, id: message.id, createdAt: message.createdAt, updatedAt: message.updatedAt, webSearch: message.webSearch, })), }; return Response.json(res); } else { return Response.json({ message: "Conversation not found" }, { status: 404 }); } } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } }
chat-ui/src/routes/api/conversation/[id]/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/conversation/[id]/+server.ts", "repo_id": "chat-ui", "token_count": 396 }
54
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; import { z } from "zod"; import type { RequestHandler } from "./$types"; import { downloadFile } from "$lib/server/files/downloadFile"; export const GET: RequestHandler = async ({ locals, params }) => { const sha256 = z.string().parse(params.sha256); const userId = locals.user?._id ?? locals.sessionId; // check user if (!userId) { throw error(401, "Unauthorized"); } if (params.id.length !== 7) { const convId = new ObjectId(z.string().parse(params.id)); // check if the user has access to the conversation const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (!conv) { throw error(404, "Conversation not found"); } } else { // check if the user has access to the conversation const conv = await collections.sharedConversations.findOne({ _id: params.id, }); if (!conv) { throw error(404, "Conversation not found"); } } const { content, mime } = await downloadFile(sha256, params.id); return new Response(content, { headers: { "Content-Type": mime ?? "application/octet-stream", }, }); };
chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/output/[sha256]/+server.ts", "repo_id": "chat-ui", "token_count": 439 }
55
<script lang="ts"> import { onMount } from "svelte"; import { base } from "$app/paths"; import { afterNavigate, goto } from "$app/navigation"; import { page } from "$app/stores"; import { useSettingsStore } from "$lib/stores/settings"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/ArrowUpRight"; import CarbonAdd from "~icons/carbon/add"; import UserIcon from "~icons/carbon/user"; import type { LayoutData } from "../$types"; export let data: LayoutData; let previousPage: string = base; let assistantsSection: HTMLHeadingElement; onMount(() => { if ($page.params?.assistantId) { assistantsSection.scrollIntoView(); } }); afterNavigate(({ from }) => { if (!from?.url.pathname.includes("settings")) { previousPage = from?.url.toString() || previousPage; } }); const settings = useSettingsStore(); </script> <div class="grid h-full w-full grid-cols-1 grid-rows-[auto,1fr] content-start gap-x-4 overflow-hidden p-4 md:grid-cols-3 md:grid-rows-[auto,1fr] md:p-8" > <div class="col-span-1 mb-4 flex items-center justify-between md:col-span-3"> <h2 class="text-xl font-bold">Settings</h2> <button class="btn rounded-lg" on:click={() => { goto(previousPage); }} > <CarbonClose class="text-xl text-gray-900 hover:text-black" /> </button> </div> <div class="col-span-1 flex flex-col overflow-y-auto whitespace-nowrap max-md:-mx-4 max-md:h-[245px] max-md:border max-md:border-b-2 md:pr-6" > <h3 class="pb-3 pl-3 pt-2 text-[.8rem] text-gray-800 sm:pl-1">Models</h3> {#each data.models.filter((el) => !el.unlisted) as model} <a href="{base}/settings/{model.id}" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {model.id === $page.params.model ? '!bg-gray-100 !text-gray-800' : ''}" > <div class="truncate">{model.displayName}</div> {#if model.id === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <!-- if its huggingchat, the number of assistants owned by the user must be non-zero to show the UI --> {#if data.enableAssistants} <h3 bind:this={assistantsSection} class="pb-3 pl-3 pt-5 text-[.8rem] text-gray-800 sm:pl-1"> Assistants </h3> {#if !data.loginEnabled || (data.loginEnabled && !!data.user)} <a href="{base}/settings/assistants/new" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {$page.url.pathname === `${base}/settings/assistants/new` ? '!bg-gray-100 !text-gray-800' : ''}" > <CarbonAdd /> <div class="truncate">Create new assistant</div> </a> {/if} {#each data.assistants as assistant} <a href="{base}/settings/assistants/{assistant._id.toString()}" class="group flex h-10 flex-none items-center gap-2 pl-2 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl {assistant._id.toString() === $page.params.assistantId ? '!bg-gray-100 !text-gray-800' : ''}" > {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id.toString()}/avatar.jpg?hash={assistant.avatar}" alt="Avatar" class="h-6 w-6 rounded-full object-cover" /> {:else} <div class="flex size-6 items-center justify-center rounded-full bg-gray-300 font-bold uppercase text-gray-500" > {assistant.name[0]} </div> {/if} <div class="truncate">{assistant.name}</div> {#if assistant._id.toString() === $settings.activeModel} <div class="ml-auto rounded-lg bg-black px-2 py-1.5 text-xs font-semibold leading-none text-white" > Active </div> {/if} </a> {/each} <a href="{base}/assistants" class="group flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 md:rounded-xl" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> <div class="truncate">Browse Assistants</div> </a> {/if} <a href="{base}/settings" class="group mt-auto flex h-10 flex-none items-center gap-2 pl-3 pr-2 text-sm text-gray-500 hover:bg-gray-100 max-md:order-first md:rounded-xl {$page.url.pathname === `${base}/settings` ? '!bg-gray-100 !text-gray-800' : ''}" > <UserIcon class="text-sm" /> Application Settings </a> </div> <div class="col-span-1 w-full overflow-y-auto overflow-x-clip px-1 max-md:pt-4 md:col-span-2 md:row-span-2" > <slot /> </div> </div>
chat-ui/src/routes/settings/(nav)/+layout.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/+layout.svelte", "repo_id": "chat-ui", "token_count": 2052 }
56
@import "highlight.js/styles/atom-one-dark";
chat-ui/src/styles/highlight-js.css/0
{ "file_path": "chat-ui/src/styles/highlight-js.css", "repo_id": "chat-ui", "token_count": 17 }
57
const defaultTheme = require("tailwindcss/defaultTheme"); const colors = require("tailwindcss/colors"); import dotenv from "dotenv"; dotenv.config({ path: "./.env" }); /** @type {import('tailwindcss').Config} */ export default { darkMode: "class", content: ["./src/**/*.{html,js,svelte,ts}"], theme: { extend: { colors: { primary: colors[process.env.PUBLIC_APP_COLOR], }, // fontFamily: { // sans: ['"Inter"', ...defaultTheme.fontFamily.sans] // }, fontSize: { xxs: "0.625rem", smd: "0.94rem", }, }, }, plugins: [ require("tailwind-scrollbar")({ nocompatible: true }), require("@tailwindcss/typography"), ], };
chat-ui/tailwind.config.cjs/0
{ "file_path": "chat-ui/tailwind.config.cjs", "repo_id": "chat-ui", "token_count": 276 }
58
import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 50_000 SMALL_TEST = 5_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def read(dataset: datasets.Dataset, length): for i in range(length): _ = dataset[i] @get_duration def read_batch(dataset: datasets.Dataset, length, batch_size): for i in range(0, len(dataset), batch_size): _ = dataset[i : i + batch_size] @get_duration def read_formatted(dataset: datasets.Dataset, length, type): with dataset.formatted_as(type=type): for i in range(length): _ = dataset[i] @get_duration def read_formatted_batch(dataset: datasets.Dataset, length, batch_size, type): with dataset.formatted_as(type=type): for i in range(0, length, batch_size): _ = dataset[i : i + batch_size] def benchmark_iterating(): times = {"num examples": SPEED_TEST_N_EXAMPLES} functions = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted, {"type": "pandas", "length": SMALL_TEST}), (read_formatted, {"type": "torch", "length": SMALL_TEST}), (read_formatted, {"type": "tensorflow", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] functions_shuffled = [ (read, {"length": SMALL_TEST}), (read, {"length": SPEED_TEST_N_EXAMPLES}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}), (read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}), (read_formatted, {"type": "numpy", "length": SMALL_TEST}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}), (read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("generating dataset") features = datasets.Features( {"list": datasets.Sequence(datasets.Value("float32")), "numbers": datasets.Value("float32")} ) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES, seq_shapes={"list": (100,)}, ) print("first set of iterations") for func, kwargs in functions: print(func.__name__, str(kwargs)) times[func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func(dataset, **kwargs) print("shuffling dataset") dataset = dataset.shuffle() print("Second set of iterations (after shuffling") for func, kwargs in functions_shuffled: print("shuffled ", func.__name__, str(kwargs)) times["shuffled " + func.__name__ + " " + " ".join(str(v) for v in kwargs.values())] = func( dataset, **kwargs ) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
datasets/benchmarks/benchmark_iterating.py/0
{ "file_path": "datasets/benchmarks/benchmark_iterating.py", "repo_id": "datasets", "token_count": 1697 }
59
# Dataset features [`Features`] defines the internal structure of a dataset. It is used to specify the underlying serialization format. What's more interesting to you though is that [`Features`] contains high-level information about everything from the column names and types, to the [`ClassLabel`]. You can think of [`Features`] as the backbone of a dataset. The [`Features`] format is simple: `dict[column_name, column_type]`. It is a dictionary of column name and column type pairs. The column type provides a wide range of options for describing the type of data you have. Let's have a look at the features of the MRPC dataset from the GLUE benchmark: ```py >>> from datasets import load_dataset >>> dataset = load_dataset('glue', 'mrpc', split='train') >>> dataset.features {'idx': Value(dtype='int32', id=None), 'label': ClassLabel(num_classes=2, names=['not_equivalent', 'equivalent'], names_file=None, id=None), 'sentence1': Value(dtype='string', id=None), 'sentence2': Value(dtype='string', id=None), } ``` The [`Value`] feature tells 🤗 Datasets: - The `idx` data type is `int32`. - The `sentence1` and `sentence2` data types are `string`. 🤗 Datasets supports many other data types such as `bool`, `float32` and `binary` to name just a few. <Tip> Refer to [`Value`] for a full list of supported data types. </Tip> The [`ClassLabel`] feature informs 🤗 Datasets the `label` column contains two classes. The classes are labeled `not_equivalent` and `equivalent`. Labels are stored as integers in the dataset. When you retrieve the labels, [`ClassLabel.int2str`] and [`ClassLabel.str2int`] carries out the conversion from integer value to label name, and vice versa. If your data type contains a list of objects, then you want to use the [`Sequence`] feature. Remember the SQuAD dataset? ```py >>> from datasets import load_dataset >>> dataset = load_dataset('squad', split='train') >>> dataset.features {'answers': Sequence(feature={'text': Value(dtype='string', id=None), 'answer_start': Value(dtype='int32', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)} ``` The `answers` field is constructed using the [`Sequence`] feature because it contains two subfields, `text` and `answer_start`, which are lists of `string` and `int32`, respectively. <Tip> See the [flatten](./process#flatten) section to learn how you can extract the nested subfields as their own independent columns. </Tip> The array feature type is useful for creating arrays of various sizes. You can create arrays with two dimensions using [`Array2D`], and even arrays with five dimensions using [`Array5D`]. ```py >>> features = Features({'a': Array2D(shape=(1, 3), dtype='int32')}) ``` The array type also allows the first dimension of the array to be dynamic. This is useful for handling sequences with variable lengths such as sentences, without having to pad or truncate the input to a uniform shape. ```py >>> features = Features({'a': Array3D(shape=(None, 5, 2), dtype='int32')}) ``` ## Audio feature Audio datasets have a column with type [`Audio`], which contains three important fields: * `array`: the decoded audio data represented as a 1-dimensional array. * `path`: the path to the downloaded audio file. * `sampling_rate`: the sampling rate of the audio data. When you load an audio dataset and call the audio column, the [`Audio`] feature automatically decodes and resamples the audio file: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` <Tip warning={true}> Index into an audio dataset using the row index first and then the `audio` column - `dataset[0]["audio"]` - to avoid decoding and resampling all the audio files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Audio`] type simply gives you the path or the bytes of the audio file, without decoding it into an `array`, ```py >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train").cast_column("audio", Audio(decode=False)) >>> dataset[0] {'audio': {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav'}, 'english_transcription': 'I would like to set up a joint account with my partner', 'intent_class': 11, 'lang_id': 4, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'transcription': 'I would like to set up a joint account with my partner'} ``` ## Image feature Image datasets have a column with type [`Image`], which loads `PIL.Image` objects from images stored as bytes: When you load an image dataset and call the image column, the [`Image`] feature automatically decodes the image file: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") >>> dataset[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x125506CF8> ``` <Tip warning={true}> Index into an image dataset using the row index first and then the `image` column - `dataset[0]["image"]` - to avoid decoding all the image files in the dataset. Otherwise, this can be a slow and time-consuming process if you have a large dataset. </Tip> With `decode=False`, the [`Image`] type simply gives you the path or the bytes of the image file, without decoding it into an `PIL.Image`, ```py >>> dataset = load_dataset("beans", split="train").cast_column("image", Image(decode=False)) >>> dataset[0]["image"] {'bytes': None, 'path': '/Users/username/.cache/huggingface/datasets/downloads/extracted/772e7c1fba622cff102b85dd74bcce46e8168634df4eaade7bedd3b8d91d3cd7/train/healthy/healthy_train.265.jpg'} ``` Depending on the dataset, you may get the path to the local downloaded image, or the content of the image as bytes if the dataset is not made of individual files. You can also define a dataset of images from numpy arrays: ```python >>> ds = Dataset.from_dict({"i": [np.zeros(shape=(16, 16, 3), dtype=np.uint8)]}, features=Features({"i": Image()})) ``` And in this case the numpy arrays are encoded into PNG (or TIFF if the pixels values precision is important). For multi-channels arrays like RGB or RGBA, only uint8 is supported. If you use a larger precision, you get a warning and the array is downcasted to uint8. For gray-scale images you can use the integer or float precision you want as long as it is compatible with `Pillow`. A warning is shown if your image integer or float precision is too high, and in this case the array is downcated: an int64 array is downcasted to int32, and a float64 array is downcasted to float32.
datasets/docs/source/about_dataset_features.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_features.mdx", "repo_id": "datasets", "token_count": 2334 }
60
# Cloud storage 🤗 Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations. You can save and load datasets from any cloud storage in a Pythonic way. Take a look at the following table for some example of supported cloud storage providers: | Storage provider | Filesystem implementation | |----------------------|---------------------------------------------------------------| | Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) | | Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) | | Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) | | Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)| | Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) | | Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) | This guide will show you how to save and load datasets with any cloud storage. Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage. ## Set up your cloud storage FileSystem ### Amazon S3 1. Install the S3 FileSystem implementation: ``` >>> pip install s3fs ``` 2. Define your credentials To use an anonymous connection, use `anon=True`. Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket. ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets # or use a botocore session >>> import aiobotocore.session >>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name") >>> storage_options = {"session": s3_session} ``` 3. Create your FileSystem instance ```py >>> import s3fs >>> fs = s3fs.S3FileSystem(**storage_options) ``` ### Google Cloud Storage 1. Install the Google Cloud Storage implementation: ``` >>> conda install -c conda-forge gcsfs # or install with pip >>> pip install gcsfs ``` 2. Define your credentials ```py >>> storage_options={"token": "anon"} # for anonymous connection # or use your credentials of your default gcloud credentials or from the google metadata service >>> storage_options={"project": "my-google-project"} # or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/ >>> storage_options={"project": "my-google-project", "token": TOKEN} ``` 3. Create your FileSystem instance ```py >>> import gcsfs >>> fs = gcsfs.GCSFileSystem(**storage_options) ``` ### Azure Blob Storage 1. Install the Azure Blob Storage implementation: ``` >>> conda install -c conda-forge adlfs # or install with pip >>> pip install adlfs ``` 2. Define your credentials ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem # or use your credentials with the gen 1 filesystem >>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET} ``` 3. Create your FileSystem instance ```py >>> import adlfs >>> fs = adlfs.AzureBlobFileSystem(**storage_options) ``` ### Oracle Cloud Object Storage 1. Install the OCI FileSystem implementation: ``` >>> pip install ocifs ``` 2. Define your credentials ```py >>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"} ``` 3. Create your FileSystem instance ```py >>> import ocifs >>> fs = ocifs.OCIFileSystem(**storage_options) ``` ## Load and Save your datasets using your cloud storage FileSystem ### Download and prepare a dataset into a cloud storage You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`. Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage. The `download_and_prepare` method works in two steps: 1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`] 2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files. Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("imdb") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Load a dataset builder using a loading script (see [how to load a local loading script](./loading#local-loading-script)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("path/to/local/loading_script/loading_script.py") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)): ```py >>> data_files = {"train": ["path/to/train.csv"]} >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("csv", data_files=data_files) >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`. Otherwise the dataset is saved as an uncompressed Arrow file. You can also specify the size of the shards using `max_shard_size` (default is 500MB): ```py >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB") ``` #### Dask Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel. Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel. Dask supports local data but also data from a cloud storage. Therefore you can load a dataset saved as sharded Parquet files in Dask with ```py import dask.dataframe as dd df = dd.read_parquet(output_dir, storage_options=storage_options) # or if your dataset is split into train/valid/test df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options) df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options) df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options) ``` You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html). ## Saving serialized datasets After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]: ```py # saves encoded_dataset to amazon s3 >>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to google cloud storage >>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to microsoft azure blob/datalake >>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options) ``` <Tip> Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage. </Tip> ## Listing serialized datasets List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`: ```py >>> fs.ls("my-private-datasets/imdb/train", detail=False) ["dataset_info.json.json","dataset.arrow","state.json"] ``` ### Load serialized datasets When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]: ```py >>> from datasets import load_from_disk # load encoded_dataset from cloud storage >>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options) >>> print(len(dataset)) 25000 ```
datasets/docs/source/filesystems.mdx/0
{ "file_path": "datasets/docs/source/filesystems.mdx", "repo_id": "datasets", "token_count": 2640 }
61
# Object detection Object detection models identify something in an image, and object detection datasets are used for applications such as autonomous driving and detecting natural hazards like wildfire. This guide will show you how to apply transformations to an object detection dataset following the [tutorial](https://albumentations.ai/docs/examples/example_bboxes/) from [Albumentations](https://albumentations.ai/docs/). To run these examples, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ``` pip install -U albumentations opencv-python ``` In this example, you'll use the [`cppe-5`](https://huggingface.co/datasets/cppe-5) dataset for identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cppe-5") >>> example = ds['train'][0] >>> example {'height': 663, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7FC3DC756250>, 'image_id': 15, 'objects': {'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0], 'id': [114, 115, 116, 117]}, 'width': 943} ``` The dataset has the following fields: - `image`: PIL.Image.Image object containing the image. - `image_id`: The image ID. - `height`: The image height. - `width`: The image width. - `objects`: A dictionary containing bounding box metadata for the objects in the image: - `id`: The annotation id. - `area`: The area of the bounding box. - `bbox`: The object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format). - `category`: The object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)`. You can visualize the `bboxes` on the image using some internal torch utilities. To do that, you will need to reference the [`~datasets.ClassLabel`] feature associated with the category IDs so you can look up the string labels: ```py >>> import torch >>> from torchvision.ops import box_convert >>> from torchvision.utils import draw_bounding_boxes >>> from torchvision.transforms.functional import pil_to_tensor, to_pil_image >>> categories = ds['train'].features['objects'].feature['category'] >>> boxes_xywh = torch.tensor(example['objects']['bbox']) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in example['objects']['category']] >>> to_pil_image( ... draw_bounding_boxes( ... pil_to_tensor(example['image']), ... boxes_xyxy, ... colors="red", ... labels=labels, ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example.png"> </div> With `albumentations`, you can apply transforms that will affect the image while also updating the `bboxes` accordingly. In this case, the image is resized to (480, 480), flipped horizontally, and brightened. ```py >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], bbox_params=albumentations.BboxParams(format='coco', label_fields=['category'])) >>> image = np.array(example['image']) >>> out = transform( ... image=image, ... bboxes=example['objects']['bbox'], ... category=example['objects']['category'], ... ) ``` Now when you visualize the result, the image should be flipped, but the `bboxes` should still be in the right places. ```py >>> image = torch.tensor(out['image']).permute(2, 0, 1) >>> boxes_xywh = torch.stack([torch.tensor(x) for x in out['bboxes']]) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in out['category']] >>> to_pil_image( ... draw_bounding_boxes( ... image, ... boxes_xyxy, ... colors='red', ... labels=labels ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed.png"> </div> Create a function to apply the transform to a batch of examples: ```py >>> def transforms(examples): ... images, bboxes, categories = [], [], [] ... for image, objects in zip(examples['image'], examples['objects']): ... image = np.array(image.convert("RGB")) ... out = transform( ... image=image, ... bboxes=objects['bbox'], ... category=objects['category'] ... ) ... images.append(torch.tensor(out['image']).permute(2, 0, 1)) ... bboxes.append(torch.tensor(out['bboxes'])) ... categories.append(out['category']) ... return {'image': images, 'bbox': bboxes, 'category': categories} ``` Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly which consumes less disk space. The randomness of data augmentation may return a different image if you access the same example twice. It is especially useful when training a model for several epochs. ```py >>> ds['train'].set_transform(transforms) ``` You can verify the transform works by visualizing the 10th example: ```py >>> example = ds['train'][10] >>> to_pil_image( ... draw_bounding_boxes( ... example['image'], ... box_convert(example['bbox'], 'xywh', 'xyxy'), ... colors='red', ... labels=[categories.int2str(x) for x in example['category']] ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed_2.png"> </div> <Tip> Now that you know how to process a dataset for object detection, learn [how to train an object detection model](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/YOLOS/Fine_tuning_YOLOS_for_object_detection_on_custom_dataset_(balloon).ipynb) and use it for inference. </Tip>
datasets/docs/source/object_detection.mdx/0
{ "file_path": "datasets/docs/source/object_detection.mdx", "repo_id": "datasets", "token_count": 2299 }
62
# Share a dataset to the Hub The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. ## Upload with the Hub UI The Hub's web-based interface allows users without any developer experience to upload a dataset. ### Create a repository A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. 1. Click on your profile and select **New Dataset** to create a new dataset repository. 2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/> </div> ### Upload dataset 1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/> </div> 2. Drag and drop your dataset files and add a brief descriptive commit message. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/> </div> 3. After uploading your dataset files, they are stored in your dataset repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/> </div> ### Create a Dataset card Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. 1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/> </div> 2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/> </div> 3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). ### Load dataset Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") ``` ## Upload with Python Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. 1. Begin by installing the library: ```bash pip install huggingface_hub ``` 2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: ```bash huggingface-cli login ``` 3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") # dataset = dataset.map(...) # do all your processing here >>> dataset.push_to_hub("stevhliu/processed_demo") ``` To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. ```py >>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) ``` To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. ### Privacy A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. Load a private dataset by providing your authentication token to the `token` parameter: ```py >>> from datasets import load_dataset # Load a private individual dataset >>> dataset = load_dataset("stevhliu/demo", token=True) # Load a private organization dataset >>> dataset = load_dataset("organization/dataset_name", token=True) ``` ## What's next? Congratulations, you've completed the tutorials! 🥳 From here, you can go on to: - Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). - [Stream large datasets](stream) without downloading it locally. - [Define your dataset splits and configurations](repository_structure) or [loading script](dataset_script) and share your dataset with the community. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/upload_dataset.mdx/0
{ "file_path": "datasets/docs/source/upload_dataset.mdx", "repo_id": "datasets", "token_count": 2010 }
63
# Copyright 2021 The HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from cer import CER cer = CER() class TestCER(unittest.TestCase): def test_cer_case_senstive(self): refs = ["White House"] preds = ["white house"] # S = 2, D = 0, I = 0, N = 11, CER = 2 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.1818181818) < 1e-6) def test_cer_whitespace(self): refs = ["were wolf"] preds = ["werewolf"] # S = 0, D = 0, I = 1, N = 9, CER = 1 / 9 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.1111111) < 1e-6) refs = ["werewolf"] preds = ["weae wolf"] # S = 1, D = 1, I = 0, N = 8, CER = 0.25 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.25) < 1e-6) # consecutive whitespaces case 1 refs = ["were wolf"] preds = ["were wolf"] # S = 0, D = 0, I = 0, N = 9, CER = 0 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.0) < 1e-6) # consecutive whitespaces case 2 refs = ["were wolf"] preds = ["were wolf"] # S = 0, D = 0, I = 0, N = 9, CER = 0 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.0) < 1e-6) def test_cer_sub(self): refs = ["werewolf"] preds = ["weaewolf"] # S = 1, D = 0, I = 0, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_del(self): refs = ["werewolf"] preds = ["wereawolf"] # S = 0, D = 1, I = 0, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_insert(self): refs = ["werewolf"] preds = ["wereolf"] # S = 0, D = 0, I = 1, N = 8, CER = 0.125 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.125) < 1e-6) def test_cer_equal(self): refs = ["werewolf"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertEqual(char_error_rate, 0.0) def test_cer_list_of_seqs(self): refs = ["werewolf", "I am your father"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertEqual(char_error_rate, 0.0) refs = ["werewolf", "I am your father", "doge"] preds = ["werxwolf", "I am your father", "doge"] # S = 1, D = 0, I = 0, N = 28, CER = 1 / 28 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.03571428) < 1e-6) def test_correlated_sentences(self): refs = ["My hovercraft", "is full of eels"] preds = ["My hovercraft is full", " of eels"] # S = 0, D = 0, I = 2, N = 28, CER = 2 / 28 # whitespace at the front of " of eels" will be strip during preporcessing # so need to insert 2 whitespaces char_error_rate = cer.compute(predictions=preds, references=refs, concatenate_texts=True) self.assertTrue(abs(char_error_rate - 0.071428) < 1e-6) def test_cer_unicode(self): refs = ["我能吞下玻璃而不伤身体"] preds = [" 能吞虾玻璃而 不霜身体啦"] # S = 3, D = 2, I = 0, N = 11, CER = 5 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.4545454545) < 1e-6) refs = ["我能吞下玻璃", "而不伤身体"] preds = ["我 能 吞 下 玻 璃", "而不伤身体"] # S = 0, D = 5, I = 0, N = 11, CER = 5 / 11 char_error_rate = cer.compute(predictions=preds, references=refs) self.assertTrue(abs(char_error_rate - 0.454545454545) < 1e-6) refs = ["我能吞下玻璃而不伤身体"] char_error_rate = cer.compute(predictions=refs, references=refs) self.assertFalse(char_error_rate, 0.0) def test_cer_empty(self): refs = [""] preds = ["Hypothesis"] with self.assertRaises(ValueError): cer.compute(predictions=preds, references=refs) if __name__ == "__main__": unittest.main()
datasets/metrics/cer/test_cer.py/0
{ "file_path": "datasets/metrics/cer/test_cer.py", "repo_id": "datasets", "token_count": 2407 }
64
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Exact Match metric.""" import re import string import numpy as np import datasets _DESCRIPTION = """ Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. """ _KWARGS_DESCRIPTION = """ Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It's like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It's like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 """ _CITATION = """ """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class ExactMatch(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("string", id="sequence"), "references": datasets.Value("string", id="sequence"), } ), reference_urls=[], ) def _compute( self, predictions, references, regexes_to_ignore=None, ignore_case=False, ignore_punctuation=False, ignore_numbers=False, ): if regexes_to_ignore is not None: for s in regexes_to_ignore: predictions = np.array([re.sub(s, "", x) for x in predictions]) references = np.array([re.sub(s, "", x) for x in references]) else: predictions = np.asarray(predictions) references = np.asarray(references) if ignore_case: predictions = np.char.lower(predictions) references = np.char.lower(references) if ignore_punctuation: repl_table = string.punctuation.maketrans("", "", string.punctuation) predictions = np.char.translate(predictions, table=repl_table) references = np.char.translate(references, table=repl_table) if ignore_numbers: repl_table = string.digits.maketrans("", "", string.digits) predictions = np.char.translate(predictions, table=repl_table) references = np.char.translate(references, table=repl_table) score_list = predictions == references return {"exact_match": np.mean(score_list) * 100}
datasets/metrics/exact_match/exact_match.py/0
{ "file_path": "datasets/metrics/exact_match/exact_match.py", "repo_id": "datasets", "token_count": 2111 }
65
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Matthews Correlation metric.""" from sklearn.metrics import matthews_corrcoef import datasets _DESCRIPTION = """ Compute the Matthews correlation coefficient (MCC) The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary and multiclass classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] """ _KWARGS_DESCRIPTION = """ Args: predictions (list of int): Predicted labels, as returned by a model. references (list of int): Ground truth labels. sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`. Returns: matthews_correlation (dict containing float): Matthews correlation. Examples: Example 1, a basic example with only predictions and references as inputs: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3]) >>> print(round(results['matthews_correlation'], 2)) 0.54 Example 2, the same example as above, but also including sample weights: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 3, 1, 1, 1, 2]) >>> print(round(results['matthews_correlation'], 2)) 0.1 Example 3, the same example as above, but with sample weights that cause a negative correlation: >>> matthews_metric = datasets.load_metric("matthews_correlation") >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2], ... predictions=[1, 2, 2, 0, 3, 3], ... sample_weight=[0.5, 1, 0, 0, 0, 1]) >>> print(round(results['matthews_correlation'], 2)) -0.25 """ _CITATION = """\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class MatthewsCorrelation(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=[ "https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html" ], ) def _compute(self, predictions, references, sample_weight=None): return { "matthews_correlation": float(matthews_corrcoef(references, predictions, sample_weight=sample_weight)), }
datasets/metrics/matthews_correlation/matthews_correlation.py/0
{ "file_path": "datasets/metrics/matthews_correlation/matthews_correlation.py", "repo_id": "datasets", "token_count": 1736 }
66
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Recall metric.""" from sklearn.metrics import recall_score import datasets _DESCRIPTION = """ Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation: Recall = TP / (TP + FN) Where TP is the true positives and FN is the false negatives. """ _KWARGS_DESCRIPTION = """ Args: - **predictions** (`list` of `int`): The predicted labels. - **references** (`list` of `int`): The ground truth labels. - **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None. - **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`. - **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary. - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives. - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall. - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). - **sample_weight** (`list` of `float`): Sample weights Defaults to `None`. - **zero_division** (): Sets the value to return when there is a zero division. Defaults to . - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised. - `0`: If there is a zero division, the return value is `0`. - `1`: If there is a zero division, the return value is `1`. Returns: - **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better. Examples: Example 1-A simple example with some errors >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1]) >>> print(results) {'recall': 0.6666666666666666} Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`. >>> recall_metric = datasets.load_metric('recall') >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0) >>> print(results) {'recall': 0.5} Example 3-The same example as Example 1, but with `sample_weight` included. >>> recall_metric = datasets.load_metric('recall') >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8] >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight) >>> print(results) {'recall': 0.55} Example 4-A multiclass example, using different averages. >>> recall_metric = datasets.load_metric('recall') >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted') >>> print(results) {'recall': 0.3333333333333333} >>> results = recall_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'recall': array([1., 0., 0.])} """ _CITATION = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION) class Recall(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32"), "references": datasets.Value("int32"), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"], ) def _compute( self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None, zero_division="warn", ): score = recall_score( references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight, zero_division=zero_division, ) return {"recall": float(score) if score.size == 1 else score}
datasets/metrics/recall/recall.py/0
{ "file_path": "datasets/metrics/recall/recall.py", "repo_id": "datasets", "token_count": 2604 }
67
# Metric Card for SQuAD v2 ## Metric description This metric wraps the official scoring script for version 2 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad_v2). SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. SQuAD 2.0 combines the 100,000 questions in SQuAD 1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ## How to use The metric takes two files or two lists - one representing model predictions and the other the references to compare them to. *Predictions* : List of triple for question-answers to score with the following key-value pairs: * `'id'`: the question-answer identification field of the question and answer pair * `'prediction_text'` : the text of the answer * `'no_answer_probability'` : the probability that the question has no answer *References*: List of question-answers dictionaries with the following key-value pairs: * `'id'`: id of the question-answer pair (see above), * `'answers'`: a list of Dict {'text': text of the answer as a string} * `'no_answer_threshold'`: the probability threshold to decide that a question has no answer. ```python from datasets import load_metric squad_metric = load_metric("squad_v2") results = squad_metric.compute(predictions=predictions, references=references) ``` ## Output values This metric outputs a dictionary with 13 values: * `'exact'`: Exact match (the normalized answer exactly match the gold answer) (see the `exact_match` metric (forthcoming)) * `'f1'`: The average F1-score of predicted tokens versus the gold answer (see the [F1 score](https://huggingface.co/metrics/f1) metric) * `'total'`: Number of scores considered * `'HasAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'HasAns_f1'`: The F-score of predicted tokens versus the gold answer * `'HasAns_total'`: How many of the questions have answers * `'NoAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'NoAns_f1'`: The F-score of predicted tokens versus the gold answer * `'NoAns_total'`: How many of the questions have no answers * `'best_exact'` : Best exact match (with varying threshold) * `'best_exact_thresh'`: No-answer probability threshold associated to the best exact match * `'best_f1'`: Best F1 score (with varying threshold) * `'best_f1_thresh'`: No-answer probability threshold associated to the best F1 The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched. The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. The range of `total` depends on the length of predictions/references: its minimal value is 0, and maximal value is the total number of questions in the predictions and references. ### Values from popular papers The [SQuAD v2 paper](https://arxiv.org/pdf/1806.03822.pdf) reported an F1 score of 66.3% and an Exact Match score of 63.4%. They also report that human performance on the dataset represents an F1 score of 89.5% and an Exact Match score of 86.9%. For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad). ## Examples Maximal values for both exact match and F1 (perfect match): ```python from datasets import load_metric squad_v2_ metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 100.0, 'f1': 100.0, 'total': 1, 'HasAns_exact': 100.0, 'HasAns_f1': 100.0, 'HasAns_total': 1, 'best_exact': 100.0, 'best_exact_thresh': 0.0, 'best_f1': 100.0, 'best_f1_thresh': 0.0} ``` Minimal values for both exact match and F1 (no match): ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 0.0, 'f1': 0.0, 'total': 1, 'HasAns_exact': 0.0, 'HasAns_f1': 0.0, 'HasAns_total': 1, 'best_exact': 0.0, 'best_exact_thresh': 0.0, 'best_f1': 0.0, 'best_f1_thresh': 0.0} ``` Partial match (2 out of 3 answers correct) : ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b', 'no_answer_probability': 0.}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 66.66666666666667, 'f1': 66.66666666666667, 'total': 3, 'HasAns_exact': 66.66666666666667, 'HasAns_f1': 66.66666666666667, 'HasAns_total': 3, 'best_exact': 66.66666666666667, 'best_exact_thresh': 0.0, 'best_f1': 66.66666666666667, 'best_f1_thresh': 0.0} ``` ## Limitations and bias This metric works only with the datasets in the same format as the [SQuAD v.2 dataset](https://huggingface.co/datasets/squad_v2). The SQuAD datasets do contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers. ## Citation ```bibtex @inproceedings{Rajpurkar2018SQuAD2, title={Know What You Don't Know: Unanswerable Questions for SQuAD}, author={Pranav Rajpurkar and Jian Zhang and Percy Liang}, booktitle={ACL 2018}, year={2018} } ``` ## Further References - [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/) - [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
datasets/metrics/squad_v2/README.md/0
{ "file_path": "datasets/metrics/squad_v2/README.md", "repo_id": "datasets", "token_count": 2372 }
68
<jupyter_start><jupyter_text>**⚠️ This notebook is deprecated in favor of the [Quickstart notebook](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)** HuggingFace 🤗 Datasets library - Quick overviewModels come and go (linear models, LSTM, Transformers, ...) but two core elements have consistently been the beating heart of Natural Language Processing: Datasets & Metrics🤗 Datasets is a fast and efficient library to easily share and load datasets, already providing access to the public datasets in the [Hugging Face Hub](https://huggingface.co/datasets).The library has several interesting features (besides easy access to datasets):- Build-in interoperability with PyTorch, Tensorflow 2, Pandas and Numpy- Lighweight and fast library with a transparent and pythonic API- Strive on large datasets: frees you from RAM memory limits, all datasets are memory-mapped on drive by default.- Smart caching with an intelligent `tf.data`-like cache: never wait for your data to process several times🤗 Datasets originated from a fork of the awesome Tensorflow-Datasets and the HuggingFace team want to deeply thank the team behind this amazing library and user API. We have tried to keep a layer of compatibility with `tfds` and can provide conversion from one format to the other.To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. Main datasets APIThis notebook is a quick dive in the main user API for loading datasets in `datasets`<jupyter_code># install datasets !pip install datasets # Let's import the library. We typically only need at most two methods: from datasets import list_datasets, load_dataset from pprint import pprint<jupyter_output><empty_output><jupyter_text>Listing the currently available datasets<jupyter_code># Currently available datasets datasets = list_datasets() print(f"🤩 Currently {len(datasets)} datasets are available on the hub:") pprint(datasets[:100] + [f"{len(datasets) - 100} more..."], compact=True) # You can access various attributes of the datasets before downloading them squad_dataset = list_datasets(with_details=True)[datasets.index('squad')] pprint(squad_dataset.__dict__) # It's a simple python dataclass<jupyter_output>{'_id': '621ffdd236468d709f181f95', 'author': None, 'cardData': {'annotations_creators': ['crowdsourced'], 'dataset_info': {'config_name': 'plain_text', 'dataset_size': 89789763, 'download_size': 35142551, 'features': [{'dtype': 'string', 'name': 'id'}, {'dtype': 'string', 'name': 'title'}, {'dtype': 'string', 'name': 'context'}, {'dtype': 'string', 'name': 'question'}, {'name': 'answers', 'sequence': [{'dtype': 'string', 'name': 'text'}, [...]<jupyter_text>An example with SQuAD<jupyter_code># Downloading and loading a dataset dataset = load_dataset('squad', split='validation[:10%]')<jupyter_output>WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453)<jupyter_text>This call to `datasets.load_dataset()` does the following steps under the hood:1. Download and import in the library the **SQuAD python processing script** from HuggingFace AWS bucket if it's not already stored in the library. You can find the SQuAD processing script [here](https://github.com/huggingface/datasets/tree/master/datasets/squad/squad.py) for instance. Processing scripts are small python scripts which define the info (citation, description) and format of the dataset and contain the URL to the original SQuAD JSON files and the code to load examples from the original SQuAD JSON files.2. Run the SQuAD python processing script which will: - **Download the SQuAD dataset** from the original URL (see the script) if it's not already downloaded and cached. - **Process and cache** all SQuAD in a structured Arrow table for each standard splits stored on the drive. Arrow table are arbitrarily long tables, typed with types that can be mapped to numpy/pandas/python standard types and can store nested objects. They can be directly access from drive, loaded in RAM or even streamed over the web. 3. Return a **dataset built from the splits** asked by the user (default: all); in the above example we create a dataset with the first 10% of the validation split.<jupyter_code># Informations on the dataset (description, citation, size, splits, format...) # are provided in `dataset.info` (a simple python dataclass) and also as direct attributes in the dataset object pprint(dataset.info.__dict__)<jupyter_output>{'builder_name': 'squad', 'citation': '@article{2016arXiv160605250R,\n' ' author = {{Rajpurkar}, Pranav and {Zhang}, Jian and ' '{Lopyrev},\n' ' Konstantin and {Liang}, Percy},\n' ' title = "{SQuAD: 100,000+ Questions for Machine ' 'Comprehension of Text}",\n' ' journal = {arXiv e-prints},\n' ' year = 2016,\n' ' eid = {arXiv:1606.05250},\n' ' pages = {arXiv:1606.05250},\n' 'archivePrefix = {arXiv},\n' ' eprint = {1606.05250},\n' '}\n', 'config_name': 'plain_text', 'dataset_size': 89819092, 'description': 'Stanford Question Answering Dataset (SQuAD) is a reading ' 'comprehension dataset, consisting of questions posed by ' 'crowdworkers on a set of Wikipedia articles, where the answer ' 'to every question is a segment of[...]<jupyter_text>Inspecting and using the dataset: elements, slices and columns The returned `Dataset` object is a memory mapped dataset that behaves similarly to a normal map-style dataset. It is backed by an Apache Arrow table which allows many interesting features.<jupyter_code>print(dataset)<jupyter_output>Dataset({ features: ['id', 'title', 'context', 'question', 'answers'], num_rows: 1057 })<jupyter_text>You can query it's length and get items or slices like you would do normally with a python mapping.<jupyter_code>print(f"👉 Dataset len(dataset): {len(dataset)}") print("\n👉 First item 'dataset[0]':") pprint(dataset[0]) # Or get slices with several examples: print("\n👉Slice of the two items 'dataset[10:12]':") pprint(dataset[10:12]) # You can get a full column of the dataset by indexing with its name as a string: print(dataset['question'][:10])<jupyter_output>['Which NFL team represented the AFC at Super Bowl 50?', 'Which NFL team represented the NFC at Super Bowl 50?', 'Where did Super Bowl 50 take place?', 'Which NFL team won Super Bowl 50?', 'What color was used to emphasize the 50th anniversary of the Super Bowl?', 'What was the theme of Super Bowl 50?', 'What day was the game played on?', 'What is the AFC short for?', 'What was the theme of Super Bowl 50?', 'What does AFC stand for?']<jupyter_text>The `__getitem__` method will return different format depending on the type of query:- Items like `dataset[0]` are returned as dict of elements.- Slices like `dataset[10:20]` are returned as dict of lists of elements.- Columns like `dataset['question']` are returned as a list of elements.This may seems surprising at first but in our experiments it's actually a lot easier to use for data processing than returning the same format for each of these views on the dataset. In particular, you can easily iterate along columns in slices, and also naturally permute consecutive indexings with identical results as showed here by permuting column indexing with elements and slices:<jupyter_code>print(dataset[0]['question'] == dataset['question'][0]) print(dataset[10:20]['context'] == dataset['context'][10:20])<jupyter_output>True True<jupyter_text>Dataset are internally typed and structuredThe dataset is backed by one (or several) Apache Arrow tables which are typed and allows for fast retrieval and access as well as arbitrary-size memory mapping.This means respectively that the format for the dataset is clearly defined and that you can load datasets of arbitrary size without worrying about RAM memory limitation (basically the dataset take no space in RAM, it's directly read from drive when needed with fast IO access).<jupyter_code># You can inspect the dataset column names and types print("Column names:") pprint(dataset.column_names) print("Features:") pprint(dataset.features)<jupyter_output>Column names: ['id', 'title', 'context', 'question', 'answers'] Features: {'answers': Sequence(feature={'answer_start': Value(dtype='int32', id=None), 'text': Value(dtype='string', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)}<jupyter_text>Additional misc properties<jupyter_code># Datasets also have shapes informations print("The number of rows", dataset.num_rows, "also available as len(dataset)", len(dataset)) print("The number of columns", dataset.num_columns) print("The shape (rows, columns)", dataset.shape)<jupyter_output>The number of rows 1057 also available as len(dataset) 1057 The number of columns 5 The shape (rows, columns) (1057, 5)<jupyter_text>Modifying the dataset with `dataset.map`Now that we know how to inspect our dataset we also want to update it. For that there is a powerful method `.map()` which is inspired by `tf.data` map method and that you can use to apply a function to each examples, independently or in batch.`.map()` takes a callable accepting a dict as argument (same dict as the one returned by `dataset[i]`) and iterate over the dataset by calling the function on each example.<jupyter_code># Let's print the length of each `context` string in our subset of the dataset # (10% of the validation i.e. 1057 examples) dataset.map(lambda example: print(len(example['context']), end=','))<jupyter_output><empty_output><jupyter_text>This is basically the same as doing```pythonfor example in dataset: function(example)``` The above examples was a bit verbose. We can control the logging level of 🤗 Datasets with it's logging module:<jupyter_code>from datasets import logging logging.set_verbosity_warning() dataset.map(lambda example: print(len(example['context']), end=',')) # Let's keep it verbose for our tutorial though from datasets import logging logging.set_verbosity_info()<jupyter_output><empty_output><jupyter_text>The above example had no effect on the dataset because the method we supplied to `.map()` didn't return a `dict` or a `abc.Mapping` that could be used to update the examples in the dataset.In such a case, `.map()` will return the same dataset (`self`).Now let's see how we can use a method that actually modify the dataset. Modifying the dataset example by example The main interest of `.map()` is to update and modify the content of the table and leverage smart caching and fast backend.To use `.map()` to update elements in the table you need to provide a function with the following signature: `function(example: dict) -> dict`.<jupyter_code># Let's add a prefix 'My cute title: ' to each of our titles def add_prefix_to_title(example): example['title'] = 'My cute title: ' + example['title'] return example prefixed_dataset = dataset.map(add_prefix_to_title) print(prefixed_dataset.unique('title')) # `.unique()` is a super fast way to print the unique elemnts in a column (see the doc for all the methods)<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-242ccd893f32bdf9.arrow<jupyter_text>This call to `.map()` compute and return the updated table. It will also store the updated table in a cache file indexed by the current state and the mapped function.A subsequent call to `.map()` (even in another python session) will reuse the cached file instead of recomputing the operation.You can test this by running again the previous cell, you will see that the result are directly loaded from the cache and not re-computed again.The updated dataset returned by `.map()` is (again) directly memory mapped from drive and not allocated in RAM. The function you provide to `.map()` should accept an input with the format of an item of the dataset: `function(dataset[0])` and return a python dict.The columns and type of the outputs can be different than the input dict. In this case the new keys will be added as additional columns in the dataset.Bascially each dataset example dict is updated with the dictionary returned by the function like this: `example.update(function(example))`.<jupyter_code># Since the input example dict is updated with our function output dict, # we can actually just return the updated 'title' field titled_dataset = dataset.map(lambda example: {'title': 'My cutest title: ' + example['title']}) print(titled_dataset.unique('title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-4f3eee21db868c87.arrow<jupyter_text>Removing columnsYou can also remove columns when running map with the `remove_columns=List[str]` argument.<jupyter_code># This will remove the 'title' column while doing the update (after having send it the the mapped function so you can use it in your function!) less_columns_dataset = dataset.map(lambda example: {'new_title': 'Wouhahh: ' + example['title']}, remove_columns=['title']) print(less_columns_dataset.column_names) print(less_columns_dataset.unique('new_title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-2800c1727354fbe2.arrow<jupyter_text>Using examples indicesWith `with_indices=True`, dataset indices (from `0` to `len(dataset)`) will be supplied to the function which must thus have the following signature: `function(example: dict, indice: int) -> dict`<jupyter_code># This will add the index in the dataset to the 'question' field with_indices_dataset = dataset.map(lambda example, idx: {'question': f'{idx}: ' + example['question']}, with_indices=True) pprint(with_indices_dataset['question'][:5])<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-e23b98819de39aea.arrow<jupyter_text>Modifying the dataset with batched updates `.map()` can also work with batch of examples (slices of the dataset).This is particularly interesting if you have a function that can handle batch of inputs like the tokenizers of HuggingFace `tokenizers`.To work on batched inputs set `batched=True` when calling `.map()` and supply a function with the following signature: `function(examples: Dict[List]) -> Dict[List]` or, if you use indices, `function(examples: Dict[List], indices: List[int]) -> Dict[List]`).Bascially, your function should accept an input with the format of a slice of the dataset: `function(dataset[:10])`.<jupyter_code>!pip install transformers # Let's import a fast tokenizer that can work on batched inputs # (the 'Fast' tokenizers in HuggingFace) from transformers import BertTokenizerFast, logging as transformers_logging transformers_logging.set_verbosity_warning() tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') # Now let's batch tokenize our dataset 'context' encoded_dataset = dataset.map(lambda example: tokenizer(example['context']), batched=True) print("encoded_dataset[0]") pprint(encoded_dataset[0], compact=True) # we have added additional columns pprint(encoded_dataset.column_names) # Let show a more complex processing with the full preparation of the SQuAD dataset # for training a model from Transformers def convert_to_features(batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(batch['context'], batch['question'], truncation=True) # Compute start and end tokens for labels start_positions, end_positions = [], [] for i, answer in enumerate(batch['answers']): first_char = answer['answer_start'][0] last_char = first_char + len(answer['text'][0]) - 1 start_positions.append(encodings.char_to_token(i, first_char)) end_positions.append(encodings.char_to_token(i, last_char)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True) # Now our dataset comprise the labels for the start and end position # as well as the offsets for converting back tokens # in span of the original string for evaluation print("column_names", encoded_dataset.column_names) print("start_positions", encoded_dataset[:5]['start_positions'])<jupyter_output>column_names ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] start_positions [34, 45, 80, 34, 98]<jupyter_text>Image datasets Images are loaded using Pillow:<jupyter_code>image_dataset = load_dataset("cats_vs_dogs", split="train") image_dataset[0] image_dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Audio datasets Audio files are decoded using torchaudio or librosa using to the sampling rate of your choice.To read mp3 files you need ffmpeg and restart your runtime<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 && apt update && apt install -y ffmpeg from datasets import load_dataset audio_dataset = load_dataset("common_voice", "fi", split="train") audio_dataset[0] audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Audio decoding and resampling is done in-the-fly when accessing examples. You can change the sampling rate this way:<jupyter_code>from datasets import Audio audio_dataset = audio_dataset.cast_column("audio", Audio(sampling_rate=16_000)) audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Formatting outputs for PyTorch, Tensorflow, Numpy, PandasNow that we have tokenized our inputs, we probably want to use this dataset in a `torch.Dataloader` or a `tf.data.Dataset`. There are various ways to approach this.Using the `set_format()` method, we can:- format the indexing (`__getitem__`) to return numpy/pytorch/tensorflow tensors, instead of python objects, and- format the indexing (`__getitem__`) to return only the subset of the columns that we need for our model inputs. We don't want the columns `id` or `title` as inputs to train our model, but we could still want to keep them in the dataset, for instance for the evaluation of the model. This is handled by the `.set_format(type: Union[None, str], columns: Union[None, str, List[str]])` where:- `type` define the return type for our dataset `__getitem__` method and is one of `[None, 'numpy', 'pandas', 'torch', 'tensorflow']` (`None` means return python objects), and- `columns` define the columns returned by `__getitem__` and takes the name of a column in the dataset or a list of columns to return (`None` means return all columns).<jupyter_code>columns_to_return = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Uncomment whichever one is appropriate for you # encoded_dataset.set_format(type='torch', columns=columns_to_return) encoded_dataset.set_format(type='tensorflow', columns=columns_to_return) # Our dataset indexing output is now ready for being used in a pytorch dataloader pprint(encoded_dataset[1], compact=True) # Note that the columns are not removed from the dataset, just not returned when calling __getitem__ # Similarly the inner type of the dataset is not changed to torch.Tensor, the conversion and filtering is done on-the-fly when querying the dataset print(encoded_dataset.column_names) # We can remove the formatting with `.reset_format()` # or, identically, a call to `.set_format()` with no arguments encoded_dataset.reset_format() pprint(encoded_dataset[1], compact=True) # The current format can be checked with `.format`, # which is a dict of the type and formatting pprint(encoded_dataset.format)<jupyter_output>{'columns': ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None}<jupyter_text>There is also a convenience method, `to_tf_dataset()`, for the creation of `tf.data.Dataset` objects directly from a HuggingFace `Dataset`. An example will be shown below - when using this method, it is sufficient to pass the `columns` argument and your `DataCollator` - make sure you set the `return_tensors` argument of your `DataCollator` to `tf` or `np`, though, because TensorFlow won't be happy if you start passing it PyTorch Tensors! Wrapping this all upLet's wrap this all up with the full code to load and prepare SQuAD for training a PyTorch or TensorFlow model from HuggingFace `transformers` library.<jupyter_code>!pip install transformers import torch from datasets import load_dataset from transformers import BertTokenizerFast # Load our training dataset and tokenizer dataset = load_dataset('squad') tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') def get_correct_alignement(context, answer): """ Some original examples in SQuAD have indices wrong by 1 or 2 character. We test and fix this here. """ gold_text = answer['text'][0] start_idx = answer['answer_start'][0] end_idx = start_idx + len(gold_text) if context[start_idx:end_idx] == gold_text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == gold_text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == gold_text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example_batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(example_batch['context'], example_batch['question'], truncation=True) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methods. start_positions, end_positions = [], [] for i, (context, answer) in enumerate(zip(example_batch['context'], example_batch['answers'])): start_idx, end_idx = get_correct_alignement(context, answer) start_positions.append(encodings.char_to_token(i, start_idx)) end_positions.append(encodings.char_to_token(i, end_idx-1)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True)<jupyter_output>INFO:datasets.builder:No config specified, defaulting to the single config: squad/plain_text INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/squad/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 INFO:datasets.builder:Overwrite dataset info from restored data version if exists. INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453) INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453<jupyter_text>That's the end of the shared preprocessing! Next, for Torch, we set our dataset format and create a `dataloader`. If you're using TensorFlow, skip to the next block.<jupyter_code># Format our dataset to outputs torch.Tensor to train a pytorch model columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] encoded_dataset.set_format(type='torch', columns=columns) # Instantiate a PyTorch Dataloader around our dataset # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='pt') dataloader = torch.utils.data.DataLoader(encoded_dataset['train'], collate_fn=collate_fn, batch_size=8)<jupyter_output><empty_output><jupyter_text>For TensorFlow, we use the `to_tf_dataset()` method to get a `tf.data.Dataset`.<jupyter_code>columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='np') # to_tf_dataset() returns a tf.data.Dataset that we can pass straight to model.fit(). encoded_tf_dataset = encoded_dataset['train'].to_tf_dataset( columns=columns, collate_fn=collate_fn, batch_size=8, shuffle=True, )<jupyter_output>You're using a BertTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.<jupyter_text>Next, we initialize our model. The next two blocks show model creation and training in Torch. For TensorFlow, skip ahead!<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import AutoModelForQuestionAnswering model = AutoModelForQuestionAnswering.from_pretrained('bert-base-cased', return_dict=True) optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) # Now let's train our model device = 'cuda' if torch.cuda.is_available() else 'cpu' model.train().to(device) for i, batch in enumerate(dataloader): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() model.zero_grad() print(f'Step {i} - loss: {loss:.3}') if i > 5: break<jupyter_output>Step 0 - loss: 5.65 Step 1 - loss: 5.63 Step 2 - loss: 5.18 Step 3 - loss: 5.6 Step 4 - loss: 5.29 Step 5 - loss: 5.51 Step 6 - loss: 5.49<jupyter_text>Next, we'll initialize and train our TensorFlow model. Note the lack of a loss argument when we `compile()` our model here! All Transformers models support computing loss internally. When no loss argument is provided, the model will use its internal loss - this is especially helpful for cases like QA models, when the loss can be quite complex.<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import TFAutoModelForQuestionAnswering import tensorflow as tf model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # No loss argument! model.compile(optimizer=tf.keras.optimizers.Adam(1e-5))<jupyter_output>All model checkpoint layers were used when initializing TFBertForQuestionAnswering. Some layers of TFBertForQuestionAnswering were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['qa_outputs'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>Now that all the preprocessing is done, training is an extremely comforting single line of Keras. We stop training early with the `steps_per_epoch` argument - you should probably leave that one out of your actual production code!<jupyter_code>model.fit(encoded_tf_dataset, epochs=1, steps_per_epoch=3)<jupyter_output>3/3 [==============================] - 73s 927ms/step - loss: 5.5575<jupyter_text>Example with a NER metric: `seqeval`<jupyter_code>!pip install evaluate seqeval import evaluate ner_metric = evaluate.load('seqeval') references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] ner_metric.compute(predictions=predictions, references=references)<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Requirement already satisfied: evaluate in /usr/local/lib/python3.10/dist-packages (0.4.0) Requirement already satisfied: seqeval in /usr/local/lib/python3.10/dist-packages (1.2.2) Requirement already satisfied: datasets>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.12.0) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.22.4) Requirement already satisfied: dill in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.3.6) Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.5.3) Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.27.1) Requirement already satisfied: tqdm>=4.62.1 in /usr/local/lib/python3.10/dist-packages (from evaluate) (4.65.0) Requirement already satisfied: xxhash in /usr/local/lib/py[...]
datasets/notebooks/Overview.ipynb/0
{ "file_path": "datasets/notebooks/Overview.ipynb", "repo_id": "datasets", "token_count": 10406 }
69
import logging import os from argparse import ArgumentParser from pathlib import Path from shutil import copyfile, rmtree from typing import Generator import datasets.config from datasets.builder import DatasetBuilder from datasets.commands import BaseDatasetsCLICommand from datasets.download.download_manager import DownloadMode from datasets.load import dataset_module_factory, import_main_class from datasets.utils.info_utils import VerificationMode from datasets.utils.logging import ERROR, get_logger logger = get_logger(__name__) def _test_command_factory(args): return TestCommand( args.dataset, args.name, args.cache_dir, args.data_dir, args.all_configs, args.save_info or args.save_infos, args.ignore_verifications, args.force_redownload, args.clear_cache, args.num_proc, ) class TestCommand(BaseDatasetsCLICommand): __test__ = False # to tell pytest it's not a test class @staticmethod def register_subcommand(parser: ArgumentParser): test_parser = parser.add_parser("test", help="Test dataset implementation.") test_parser.add_argument("--name", type=str, default=None, help="Dataset processing name") test_parser.add_argument( "--cache_dir", type=str, default=None, help="Cache directory where the datasets are stored.", ) test_parser.add_argument( "--data_dir", type=str, default=None, help="Can be used to specify a manual directory to get the files from.", ) test_parser.add_argument("--all_configs", action="store_true", help="Test all dataset configurations") test_parser.add_argument( "--save_info", action="store_true", help="Save the dataset infos in the dataset card (README.md)" ) test_parser.add_argument( "--ignore_verifications", action="store_true", help="Run the test without checksums and splits checks.", ) test_parser.add_argument("--force_redownload", action="store_true", help="Force dataset redownload") test_parser.add_argument( "--clear_cache", action="store_true", help="Remove downloaded files and cached datasets after each config test", ) test_parser.add_argument("--num_proc", type=int, default=None, help="Number of processes") # aliases test_parser.add_argument("--save_infos", action="store_true", help="alias to save_info") test_parser.add_argument("dataset", type=str, help="Name of the dataset to download") test_parser.set_defaults(func=_test_command_factory) def __init__( self, dataset: str, name: str, cache_dir: str, data_dir: str, all_configs: bool, save_infos: bool, ignore_verifications: bool, force_redownload: bool, clear_cache: bool, num_proc: int, ): self._dataset = dataset self._name = name self._cache_dir = cache_dir self._data_dir = data_dir self._all_configs = all_configs self._save_infos = save_infos self._ignore_verifications = ignore_verifications self._force_redownload = force_redownload self._clear_cache = clear_cache self._num_proc = num_proc if clear_cache and not cache_dir: print( "When --clear_cache is used, specifying a cache directory is mandatory.\n" "The 'download' folder of the cache directory and the dataset builder cache will be deleted after each configuration test.\n" "Please provide a --cache_dir that will be used to test the dataset script." ) exit(1) if save_infos: self._ignore_verifications = True def run(self): logging.getLogger("filelock").setLevel(ERROR) if self._name is not None and self._all_configs: print("Both parameters `config` and `all_configs` can't be used at once.") exit(1) path, config_name = self._dataset, self._name module = dataset_module_factory(path) builder_cls = import_main_class(module.module_path) n_builders = len(builder_cls.BUILDER_CONFIGS) if self._all_configs and builder_cls.BUILDER_CONFIGS else 1 def get_builders() -> Generator[DatasetBuilder, None, None]: if self._all_configs and builder_cls.BUILDER_CONFIGS: for i, config in enumerate(builder_cls.BUILDER_CONFIGS): if "config_name" in module.builder_kwargs: yield builder_cls( cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: yield builder_cls( config_name=config.name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) else: if "config_name" in module.builder_kwargs: yield builder_cls(cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs) else: yield builder_cls( config_name=config_name, cache_dir=self._cache_dir, data_dir=self._data_dir, **module.builder_kwargs, ) for j, builder in enumerate(get_builders()): print(f"Testing builder '{builder.config.name}' ({j + 1}/{n_builders})") builder._record_infos = os.path.exists( os.path.join(builder.get_imported_module_dir(), datasets.config.DATASETDICT_INFOS_FILENAME) ) # record checksums only if we need to update a (deprecated) dataset_infos.json builder.download_and_prepare( download_mode=DownloadMode.REUSE_CACHE_IF_EXISTS if not self._force_redownload else DownloadMode.FORCE_REDOWNLOAD, verification_mode=VerificationMode.NO_CHECKS if self._ignore_verifications else VerificationMode.ALL_CHECKS, try_from_hf_gcs=False, num_proc=self._num_proc, ) builder.as_dataset() if self._save_infos: builder._save_infos() # If save_infos=True, the dataset card (README.md) is created next to the loaded module file. # The dataset_infos are saved in the YAML part of the README.md # Let's move it to the original directory of the dataset script, to allow the user to # upload them on S3 at the same time afterwards. if self._save_infos: dataset_readme_path = os.path.join( builder_cls.get_imported_module_dir(), datasets.config.REPOCARD_FILENAME ) name = Path(path).name + ".py" combined_path = os.path.join(path, name) if os.path.isfile(path): dataset_dir = os.path.dirname(path) elif os.path.isfile(combined_path): dataset_dir = path elif os.path.isdir(path): # for local directories containing only data files dataset_dir = path else: # in case of a remote dataset dataset_dir = None print(f"Dataset card saved at {dataset_readme_path}") # Move dataset_info back to the user if dataset_dir is not None: user_dataset_readme_path = os.path.join(dataset_dir, datasets.config.REPOCARD_FILENAME) copyfile(dataset_readme_path, user_dataset_readme_path) print(f"Dataset card saved at {user_dataset_readme_path}") # If clear_cache=True, the download folder and the dataset builder cache directory are deleted if self._clear_cache: if os.path.isdir(builder._cache_dir): logger.warning(f"Clearing cache at {builder._cache_dir}") rmtree(builder._cache_dir) download_dir = os.path.join(self._cache_dir, datasets.config.DOWNLOADED_DATASETS_DIR) if os.path.isdir(download_dir): logger.warning(f"Clearing cache at {download_dir}") rmtree(download_dir) print("Test successful.")
datasets/src/datasets/commands/test.py/0
{ "file_path": "datasets/src/datasets/commands/test.py", "repo_id": "datasets", "token_count": 4202 }
70
import importlib import shutil import warnings from typing import List import fsspec import fsspec.asyn from fsspec.implementations.local import LocalFileSystem from ..utils.deprecation_utils import deprecated from . import compression _has_s3fs = importlib.util.find_spec("s3fs") is not None if _has_s3fs: from .s3filesystem import S3FileSystem # noqa: F401 COMPRESSION_FILESYSTEMS: List[compression.BaseCompressedFileFileSystem] = [ compression.Bz2FileSystem, compression.GzipFileSystem, compression.Lz4FileSystem, compression.XzFileSystem, compression.ZstdFileSystem, ] # Register custom filesystems for fs_class in COMPRESSION_FILESYSTEMS: if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class: warnings.warn(f"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.") fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True) @deprecated( "This function is deprecated and will be removed in a future version. Please use `fsspec.core.strip_protocol` instead." ) def extract_path_from_uri(dataset_path: str) -> str: """ Preprocesses `dataset_path` and removes remote filesystem (e.g. removing `s3://`). Args: dataset_path (`str`): Path (e.g. `dataset/train`) or remote uri (e.g. `s3://my-bucket/dataset/train`) of the dataset directory. """ if "://" in dataset_path: dataset_path = dataset_path.split("://")[1] return dataset_path def is_remote_filesystem(fs: fsspec.AbstractFileSystem) -> bool: """ Checks if `fs` is a remote filesystem. Args: fs (`fsspec.spec.AbstractFileSystem`): An abstract super-class for pythonic file-systems, e.g. `fsspec.filesystem(\'file\')` or [`datasets.filesystems.S3FileSystem`]. """ return not isinstance(fs, LocalFileSystem) def rename(fs: fsspec.AbstractFileSystem, src: str, dst: str): """ Renames the file `src` in `fs` to `dst`. """ if not is_remote_filesystem(fs): # LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory shutil.move(fs._strip_protocol(src), fs._strip_protocol(dst)) else: fs.mv(src, dst, recursive=True)
datasets/src/datasets/filesystems/__init__.py/0
{ "file_path": "datasets/src/datasets/filesystems/__init__.py", "repo_id": "datasets", "token_count": 866 }
71
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class GeneratorDatasetInputStream(AbstractDatasetInputStream): def __init__( self, generator: Callable, features: Optional[Features] = None, cache_dir: str = None, keep_in_memory: bool = False, streaming: bool = False, gen_kwargs: Optional[dict] = None, num_proc: Optional[int] = None, **kwargs, ): super().__init__( features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, num_proc=num_proc, **kwargs, ) self.builder = Generator( cache_dir=cache_dir, features=features, generator=generator, gen_kwargs=gen_kwargs, **kwargs, ) def read(self): # Build iterable dataset if self.streaming: dataset = self.builder.as_streaming_dataset(split="train") # Build regular (map-style) dataset else: download_config = None download_mode = None verification_mode = None base_path = None self.builder.download_and_prepare( download_config=download_config, download_mode=download_mode, verification_mode=verification_mode, base_path=base_path, num_proc=self.num_proc, ) dataset = self.builder.as_dataset( split="train", verification_mode=verification_mode, in_memory=self.keep_in_memory ) return dataset
datasets/src/datasets/io/generator.py/0
{ "file_path": "datasets/src/datasets/io/generator.py", "repo_id": "datasets", "token_count": 868 }
72
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, ClassLabel, Features from .base import TaskTemplate @dataclass(frozen=True) class AudioClassification(TaskTemplate): task: str = field(default="audio-classification", metadata={"include_in_asdict_even_if_is_default": True}) input_schema: ClassVar[Features] = Features({"audio": Audio()}) label_schema: ClassVar[Features] = Features({"labels": ClassLabel}) audio_column: str = "audio" label_column: str = "labels" def align_with_features(self, features): if self.label_column not in features: raise ValueError(f"Column {self.label_column} is not present in features.") if not isinstance(features[self.label_column], ClassLabel): raise ValueError(f"Column {self.label_column} is not a ClassLabel.") task_template = copy.deepcopy(self) label_schema = self.label_schema.copy() label_schema["labels"] = features[self.label_column] task_template.__dict__["label_schema"] = label_schema return task_template @property def column_mapping(self) -> Dict[str, str]: return { self.audio_column: "audio", self.label_column: "labels", }
datasets/src/datasets/tasks/audio_classification.py/0
{ "file_path": "datasets/src/datasets/tasks/audio_classification.py", "repo_id": "datasets", "token_count": 487 }
73
"""Contains utilities to flag a feature as "experimental" in datasets.""" import warnings from functools import wraps from typing import Callable def experimental(fn: Callable) -> Callable: """Decorator to flag a feature as experimental. An experimental feature trigger a warning when used as it might be subject to breaking changes in the future. Args: fn (`Callable`): The function to flag as experimental. Returns: `Callable`: The decorated function. Example: ```python >>> from datasets.utils import experimental >>> @experimental ... def my_function(): ... print("Hello world!") >>> my_function() UserWarning: 'my_function' is experimental and might be subject to breaking changes in the future. Hello world! ``` """ @wraps(fn) def _inner_fn(*args, **kwargs): warnings.warn( (f"'{fn.__name__}' is experimental and might be subject to breaking changes in the future."), UserWarning, ) return fn(*args, **kwargs) return _inner_fn
datasets/src/datasets/utils/experimental.py/0
{ "file_path": "datasets/src/datasets/utils/experimental.py", "repo_id": "datasets", "token_count": 386 }
74
[ "unknown", "n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M", "10M<n<100M", "100M<n<1B", "1B<n<10B", "10B<n<100B", "100B<n<1T", "n>1T" ]
datasets/src/datasets/utils/resources/size_categories.json/0
{ "file_path": "datasets/src/datasets/utils/resources/size_categories.json", "repo_id": "datasets", "token_count": 124 }
75
import pytest DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import json import os import datasets REPO_URL = "https://huggingface.co/datasets/hf-internal-testing/raw_jsonl/resolve/main/" URLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", ] ) ), "langs": datasets.Sequence(datasets.Value("string")), "spans": datasets.Sequence(datasets.Value("string")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}), ] def _generate_examples(self, filepath): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def dataset_loading_script_name(): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def dataset_loading_script_code(): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def dataset_loading_script_dir(dataset_loading_script_name, dataset_loading_script_code, tmp_path): script_name = dataset_loading_script_name script_dir = tmp_path / "datasets" / script_name script_dir.mkdir(parents=True) script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(dataset_loading_script_code) return str(script_dir)
datasets/tests/commands/conftest.py/0
{ "file_path": "datasets/tests/commands/conftest.py", "repo_id": "datasets", "token_count": 1193 }
76
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.packaged_modules.csv.csv import Csv from ..utils import require_pil @pytest.fixture def csv_file(tmp_path): filename = tmp_path / "file.csv" data = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def malformed_csv_file(tmp_path): filename = tmp_path / "malformed_file.csv" data = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_image(tmp_path, image_file): filename = tmp_path / "csv_with_image.csv" data = textwrap.dedent( f"""\ image {image_file} """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_label(tmp_path): filename = tmp_path / "csv_with_label.csv" data = textwrap.dedent( """\ label good bad good """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_int_list(tmp_path): filename = tmp_path / "csv_with_int_list.csv" data = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(filename, "w") as f: f.write(data) return str(filename) def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog): csv = Csv() generator = csv._generate_tables([[csv_file, malformed_csv_file]]) with pytest.raises(ValueError, match="Error tokenizing data"): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(malformed_csv_file) in record.message for record in caplog.records ) @require_pil def test_csv_cast_image(csv_file_with_image): with open(csv_file_with_image, encoding="utf-8") as f: image_file = f.read().splitlines()[1] csv = Csv(encoding="utf-8", features=Features({"image": Image()})) generator = csv._generate_tables([[csv_file_with_image]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("image").type == Image()() generated_content = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def test_csv_cast_label(csv_file_with_label): with open(csv_file_with_label, encoding="utf-8") as f: labels = f.read().splitlines()[1:] csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])})) generator = csv._generate_tables([[csv_file_with_label]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])() generated_content = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels] def test_csv_convert_int_list(csv_file_with_int_list): csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]}) generator = csv._generate_tables([[csv_file_with_int_list]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa.types.is_list(pa_table.schema.field("int_list").type) generated_content = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
datasets/tests/packaged_modules/test_csv.py/0
{ "file_path": "datasets/tests/packaged_modules/test_csv.py", "repo_id": "datasets", "token_count": 1672 }
77
import json import os from pathlib import Path import pytest from datasets.download.download_config import DownloadConfig from datasets.download.download_manager import DownloadManager from datasets.utils.file_utils import hash_url_to_filename URL = "http://www.mocksite.com/file1.txt" CONTENT = '"text": ["foo", "foo"]' HASH = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8" class MockResponse: status_code = 200 headers = {"Content-Length": "100"} cookies = {} def iter_content(self, **kwargs): return [bytes(CONTENT, "utf-8")] def mock_request(*args, **kwargs): return MockResponse() @pytest.mark.parametrize("urls_type", [str, list, dict]) def test_download_manager_download(urls_type, tmp_path, monkeypatch): import requests monkeypatch.setattr(requests, "request", mock_request) url = URL if issubclass(urls_type, str): urls = url elif issubclass(urls_type, list): urls = [url] elif issubclass(urls_type, dict): urls = {"train": url} dataset_name = "dummy" cache_subdir = "downloads" cache_dir_root = tmp_path download_config = DownloadConfig( cache_dir=os.path.join(cache_dir_root, cache_subdir), use_etag=False, ) dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) downloaded_paths = dl_manager.download(urls) input_urls = urls for downloaded_paths in [downloaded_paths]: if isinstance(urls, str): downloaded_paths = [downloaded_paths] input_urls = [urls] elif isinstance(urls, dict): assert "train" in downloaded_paths.keys() downloaded_paths = downloaded_paths.values() input_urls = urls.values() assert downloaded_paths for downloaded_path, input_url in zip(downloaded_paths, input_urls): assert downloaded_path == dl_manager.downloaded_paths[input_url] downloaded_path = Path(downloaded_path) parts = downloaded_path.parts assert parts[-1] == HASH assert parts[-2] == cache_subdir assert downloaded_path.exists() content = downloaded_path.read_text() assert content == CONTENT metadata_downloaded_path = downloaded_path.with_suffix(".json") assert metadata_downloaded_path.exists() metadata_content = json.loads(metadata_downloaded_path.read_text()) assert metadata_content == {"url": URL, "etag": None} @pytest.mark.parametrize("paths_type", [str, list, dict]) def test_download_manager_extract(paths_type, xz_file, text_file): filename = str(xz_file) if issubclass(paths_type, str): paths = filename elif issubclass(paths_type, list): paths = [filename] elif issubclass(paths_type, dict): paths = {"train": filename} dataset_name = "dummy" cache_dir = xz_file.parent extracted_subdir = "extracted" download_config = DownloadConfig( cache_dir=cache_dir, use_etag=False, ) dl_manager = DownloadManager(dataset_name=dataset_name, download_config=download_config) extracted_paths = dl_manager.extract(paths) input_paths = paths for extracted_paths in [extracted_paths]: if isinstance(paths, str): extracted_paths = [extracted_paths] input_paths = [paths] elif isinstance(paths, dict): assert "train" in extracted_paths.keys() extracted_paths = extracted_paths.values() input_paths = paths.values() assert extracted_paths for extracted_path, input_path in zip(extracted_paths, input_paths): assert extracted_path == dl_manager.extracted_paths[input_path] extracted_path = Path(extracted_path) parts = extracted_path.parts assert parts[-1] == hash_url_to_filename(input_path, etag=None) assert parts[-2] == extracted_subdir assert extracted_path.exists() extracted_file_content = extracted_path.read_text() expected_file_content = text_file.read_text() assert extracted_file_content == expected_file_content def _test_jsonl(path, file): assert path.endswith(".jsonl") for num_items, line in enumerate(file, start=1): item = json.loads(line.decode("utf-8")) assert item.keys() == {"col_1", "col_2", "col_3"} assert num_items == 4 @pytest.mark.parametrize("archive_jsonl", ["tar_jsonl_path", "zip_jsonl_path"]) def test_iter_archive_path(archive_jsonl, request): archive_jsonl_path = request.getfixturevalue(archive_jsonl) dl_manager = DownloadManager() for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(archive_jsonl_path), start=1): _test_jsonl(path, file) assert num_jsonl == 2 @pytest.mark.parametrize("archive_nested_jsonl", ["tar_nested_jsonl_path", "zip_nested_jsonl_path"]) def test_iter_archive_file(archive_nested_jsonl, request): archive_nested_jsonl_path = request.getfixturevalue(archive_nested_jsonl) dl_manager = DownloadManager() for num_tar, (path, file) in enumerate(dl_manager.iter_archive(archive_nested_jsonl_path), start=1): for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(file), start=1): _test_jsonl(subpath, subfile) assert num_tar == 1 assert num_jsonl == 2 def test_iter_files(data_dir_with_hidden_files): dl_manager = DownloadManager() for num_file, file in enumerate(dl_manager.iter_files(data_dir_with_hidden_files), start=1): assert os.path.basename(file) == ("test.txt" if num_file == 1 else "train.txt") assert num_file == 2
datasets/tests/test_download_manager.py/0
{ "file_path": "datasets/tests/test_download_manager.py", "repo_id": "datasets", "token_count": 2407 }
78
import os import pickle import tempfile import time from multiprocessing import Pool from unittest import TestCase import pytest from datasets.features import Features, Sequence, Value from datasets.metric import Metric, MetricInfo from .utils import require_tf, require_torch class DummyMetric(Metric): def _info(self): return MetricInfo( description="dummy metric for tests", citation="insert citation here", features=Features({"predictions": Value("int64"), "references": Value("int64")}), ) def _compute(self, predictions, references): return ( { "accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions), "set_equality": set(predictions) == set(references), } if predictions else {} ) @classmethod def predictions_and_references(cls): return ([1, 2, 3, 4], [1, 2, 4, 3]) @classmethod def expected_results(cls): return {"accuracy": 0.5, "set_equality": True} @classmethod def other_predictions_and_references(cls): return ([1, 3, 4, 5], [1, 2, 3, 4]) @classmethod def other_expected_results(cls): return {"accuracy": 0.25, "set_equality": False} @classmethod def distributed_predictions_and_references(cls): return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4]) @classmethod def distributed_expected_results(cls): return {"accuracy": 0.75, "set_equality": False} @classmethod def separate_predictions_and_references(cls): return ([1, 2, 3, 4], [1, 2, 3, 4]), ([1, 2, 4, 5], [1, 2, 3, 4]) @classmethod def separate_expected_results(cls): return [{"accuracy": 1.0, "set_equality": True}, {"accuracy": 0.5, "set_equality": False}] def properly_del_metric(metric): """properly delete a metric on windows if the process is killed during multiprocessing""" if metric is not None: if metric.filelock is not None: metric.filelock.release() if metric.rendez_vous_lock is not None: metric.rendez_vous_lock.release() del metric.writer del metric.data del metric def metric_compute(arg): """Thread worker function for distributed evaluation testing. On base level to be pickable. """ metric = None try: num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg metric = DummyMetric( num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5 ) time.sleep(wait) results = metric.compute(predictions=preds, references=refs) return results finally: properly_del_metric(metric) def metric_add_batch_and_compute(arg): """Thread worker function for distributed evaluation testing. On base level to be pickable. """ metric = None try: num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg metric = DummyMetric( num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5 ) metric.add_batch(predictions=preds, references=refs) time.sleep(wait) results = metric.compute() return results finally: properly_del_metric(metric) def metric_add_and_compute(arg): """Thread worker function for distributed evaluation testing. On base level to be pickable. """ metric = None try: num_process, process_id, preds, refs, exp_id, cache_dir, wait = arg metric = DummyMetric( num_process=num_process, process_id=process_id, experiment_id=exp_id, cache_dir=cache_dir, timeout=5 ) for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) time.sleep(wait) results = metric.compute() return results finally: properly_del_metric(metric) @pytest.mark.filterwarnings("ignore:Metric is deprecated:FutureWarning") class TestMetric(TestCase): def test_dummy_metric(self): preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() metric = DummyMetric(experiment_id="test_dummy_metric") self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric metric = DummyMetric(experiment_id="test_dummy_metric") metric.add_batch(predictions=preds, references=refs) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(experiment_id="test_dummy_metric") for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) self.assertDictEqual(expected_results, metric.compute()) del metric # With keep_in_memory metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric") self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric") metric.add_batch(predictions=preds, references=refs) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric") for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric") self.assertDictEqual({}, metric.compute(predictions=[], references=[])) del metric metric = DummyMetric(keep_in_memory=True, experiment_id="test_dummy_metric") with self.assertRaisesRegex(ValueError, "Mismatch in the number"): metric.add_batch(predictions=[1, 2, 3], references=[1, 2, 3, 4]) del metric def test_metric_with_cache_dir(self): preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() with tempfile.TemporaryDirectory() as tmp_dir: metric = DummyMetric(experiment_id="test_dummy_metric", cache_dir=tmp_dir) self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric def test_concurrent_metrics(self): preds, refs = DummyMetric.predictions_and_references() other_preds, other_refs = DummyMetric.other_predictions_and_references() expected_results = DummyMetric.expected_results() other_expected_results = DummyMetric.other_expected_results() metric = DummyMetric(experiment_id="test_concurrent_metrics") other_metric = DummyMetric( experiment_id="test_concurrent_metrics", ) self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) self.assertDictEqual( other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs) ) del metric, other_metric metric = DummyMetric( experiment_id="test_concurrent_metrics", ) other_metric = DummyMetric( experiment_id="test_concurrent_metrics", ) metric.add_batch(predictions=preds, references=refs) other_metric.add_batch(predictions=other_preds, references=other_refs) self.assertDictEqual(expected_results, metric.compute()) self.assertDictEqual(other_expected_results, other_metric.compute()) for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs): metric.add(prediction=pred, reference=ref) other_metric.add(prediction=other_pred, reference=other_ref) self.assertDictEqual(expected_results, metric.compute()) self.assertDictEqual(other_expected_results, other_metric.compute()) del metric, other_metric # With keep_in_memory metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True) other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True) self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) self.assertDictEqual( other_expected_results, other_metric.compute(predictions=other_preds, references=other_refs) ) metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True) other_metric = DummyMetric(experiment_id="test_concurrent_metrics", keep_in_memory=True) metric.add_batch(predictions=preds, references=refs) other_metric.add_batch(predictions=other_preds, references=other_refs) self.assertDictEqual(expected_results, metric.compute()) self.assertDictEqual(other_expected_results, other_metric.compute()) for pred, ref, other_pred, other_ref in zip(preds, refs, other_preds, other_refs): metric.add(prediction=pred, reference=ref) other_metric.add(prediction=other_pred, reference=other_ref) self.assertDictEqual(expected_results, metric.compute()) self.assertDictEqual(other_expected_results, other_metric.compute()) del metric, other_metric def test_separate_experiments_in_parallel(self): with tempfile.TemporaryDirectory() as tmp_dir: (preds_0, refs_0), (preds_1, refs_1) = DummyMetric.separate_predictions_and_references() expected_results = DummyMetric.separate_expected_results() pool = Pool(processes=4) results = pool.map( metric_compute, [ (1, 0, preds_0, refs_0, None, tmp_dir, 0), (1, 0, preds_1, refs_1, None, tmp_dir, 0), ], ) self.assertDictEqual(expected_results[0], results[0]) self.assertDictEqual(expected_results[1], results[1]) del results # more than one sec of waiting so that the second metric has to sample a new hashing name results = pool.map( metric_compute, [ (1, 0, preds_0, refs_0, None, tmp_dir, 2), (1, 0, preds_1, refs_1, None, tmp_dir, 2), ], ) self.assertDictEqual(expected_results[0], results[0]) self.assertDictEqual(expected_results[1], results[1]) del results results = pool.map( metric_add_and_compute, [ (1, 0, preds_0, refs_0, None, tmp_dir, 0), (1, 0, preds_1, refs_1, None, tmp_dir, 0), ], ) self.assertDictEqual(expected_results[0], results[0]) self.assertDictEqual(expected_results[1], results[1]) del results results = pool.map( metric_add_batch_and_compute, [ (1, 0, preds_0, refs_0, None, tmp_dir, 0), (1, 0, preds_1, refs_1, None, tmp_dir, 0), ], ) self.assertDictEqual(expected_results[0], results[0]) self.assertDictEqual(expected_results[1], results[1]) del results def test_distributed_metrics(self): with tempfile.TemporaryDirectory() as tmp_dir: (preds_0, refs_0), (preds_1, refs_1) = DummyMetric.distributed_predictions_and_references() expected_results = DummyMetric.distributed_expected_results() pool = Pool(processes=4) results = pool.map( metric_compute, [ (2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0), (2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0.5), ], ) self.assertDictEqual(expected_results, results[0]) self.assertIsNone(results[1]) del results results = pool.map( metric_compute, [ (2, 0, preds_0, refs_0, "test_distributed_metrics_0", tmp_dir, 0.5), (2, 1, preds_1, refs_1, "test_distributed_metrics_0", tmp_dir, 0), ], ) self.assertDictEqual(expected_results, results[0]) self.assertIsNone(results[1]) del results results = pool.map( metric_add_and_compute, [ (2, 0, preds_0, refs_0, "test_distributed_metrics_1", tmp_dir, 0), (2, 1, preds_1, refs_1, "test_distributed_metrics_1", tmp_dir, 0), ], ) self.assertDictEqual(expected_results, results[0]) self.assertIsNone(results[1]) del results results = pool.map( metric_add_batch_and_compute, [ (2, 0, preds_0, refs_0, "test_distributed_metrics_2", tmp_dir, 0), (2, 1, preds_1, refs_1, "test_distributed_metrics_2", tmp_dir, 0), ], ) self.assertDictEqual(expected_results, results[0]) self.assertIsNone(results[1]) del results # To use several distributed metrics on the same local file system, need to specify an experiment_id try: results = pool.map( metric_add_and_compute, [ (2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0), (2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0), (2, 0, preds_0, refs_0, "test_distributed_metrics_3", tmp_dir, 0), (2, 1, preds_1, refs_1, "test_distributed_metrics_3", tmp_dir, 0), ], ) except ValueError: # We are fine with either raising a ValueError or computing well the metric # Being sure we raise the error would means making the dummy dataset bigger # and the test longer... pass else: self.assertDictEqual(expected_results, results[0]) self.assertDictEqual(expected_results, results[2]) self.assertIsNone(results[1]) self.assertIsNone(results[3]) del results results = pool.map( metric_add_and_compute, [ (2, 0, preds_0, refs_0, "exp_0", tmp_dir, 0), (2, 1, preds_1, refs_1, "exp_0", tmp_dir, 0), (2, 0, preds_0, refs_0, "exp_1", tmp_dir, 0), (2, 1, preds_1, refs_1, "exp_1", tmp_dir, 0), ], ) self.assertDictEqual(expected_results, results[0]) self.assertDictEqual(expected_results, results[2]) self.assertIsNone(results[1]) self.assertIsNone(results[3]) del results # With keep_in_memory is not allowed with self.assertRaises(ValueError): DummyMetric( experiment_id="test_distributed_metrics_4", keep_in_memory=True, num_process=2, process_id=0, cache_dir=tmp_dir, ) def test_dummy_metric_pickle(self): with tempfile.TemporaryDirectory() as tmp_dir: tmp_file = os.path.join(tmp_dir, "metric.pt") preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() metric = DummyMetric(experiment_id="test_dummy_metric_pickle") with open(tmp_file, "wb") as f: pickle.dump(metric, f) del metric with open(tmp_file, "rb") as f: metric = pickle.load(f) self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric def test_input_numpy(self): import numpy as np preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() preds, refs = np.array(preds), np.array(refs) metric = DummyMetric(experiment_id="test_input_numpy") self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric metric = DummyMetric(experiment_id="test_input_numpy") metric.add_batch(predictions=preds, references=refs) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(experiment_id="test_input_numpy") for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) self.assertDictEqual(expected_results, metric.compute()) del metric @require_torch def test_input_torch(self): import torch preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() preds, refs = torch.tensor(preds), torch.tensor(refs) metric = DummyMetric(experiment_id="test_input_torch") self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric metric = DummyMetric(experiment_id="test_input_torch") metric.add_batch(predictions=preds, references=refs) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(experiment_id="test_input_torch") for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) self.assertDictEqual(expected_results, metric.compute()) del metric @require_tf def test_input_tf(self): import tensorflow as tf preds, refs = DummyMetric.predictions_and_references() expected_results = DummyMetric.expected_results() preds, refs = tf.constant(preds), tf.constant(refs) metric = DummyMetric(experiment_id="test_input_tf") self.assertDictEqual(expected_results, metric.compute(predictions=preds, references=refs)) del metric metric = DummyMetric(experiment_id="test_input_tf") metric.add_batch(predictions=preds, references=refs) self.assertDictEqual(expected_results, metric.compute()) del metric metric = DummyMetric(experiment_id="test_input_tf") for pred, ref in zip(preds, refs): metric.add(prediction=pred, reference=ref) self.assertDictEqual(expected_results, metric.compute()) del metric class MetricWithMultiLabel(Metric): def _info(self): return MetricInfo( description="dummy metric for tests", citation="insert citation here", features=Features( {"predictions": Sequence(Value("int64")), "references": Sequence(Value("int64"))} if self.config_name == "multilabel" else {"predictions": Value("int64"), "references": Value("int64")} ), ) def _compute(self, predictions=None, references=None): return ( { "accuracy": sum(i == j for i, j in zip(predictions, references)) / len(predictions), } if predictions else {} ) @pytest.mark.parametrize( "config_name, predictions, references, expected", [ (None, [1, 2, 3, 4], [1, 2, 4, 3], 0.5), # Multiclass: Value("int64") ( "multilabel", [[1, 0], [1, 0], [1, 0], [1, 0]], [[1, 0], [0, 1], [1, 1], [0, 0]], 0.25, ), # Multilabel: Sequence(Value("int64")) ], ) def test_metric_with_multilabel(config_name, predictions, references, expected, tmp_path): cache_dir = tmp_path / "cache" metric = MetricWithMultiLabel(config_name, cache_dir=cache_dir) results = metric.compute(predictions=predictions, references=references) assert results["accuracy"] == expected def test_safety_checks_process_vars(): with pytest.raises(ValueError): _ = DummyMetric(process_id=-2) with pytest.raises(ValueError): _ = DummyMetric(num_process=2, process_id=3) class AccuracyWithNonStandardFeatureNames(Metric): def _info(self): return MetricInfo( description="dummy metric for tests", citation="insert citation here", features=Features({"inputs": Value("int64"), "targets": Value("int64")}), ) def _compute(self, inputs, targets): return ( { "accuracy": sum(i == j for i, j in zip(inputs, targets)) / len(targets), } if targets else {} ) @classmethod def inputs_and_targets(cls): return ([1, 2, 3, 4], [1, 2, 4, 3]) @classmethod def expected_results(cls): return {"accuracy": 0.5} def test_metric_with_non_standard_feature_names_add(tmp_path): cache_dir = tmp_path / "cache" inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets() metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir) for input, target in zip(inputs, targets): metric.add(inputs=input, targets=target) results = metric.compute() assert results == AccuracyWithNonStandardFeatureNames.expected_results() def test_metric_with_non_standard_feature_names_add_batch(tmp_path): cache_dir = tmp_path / "cache" inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets() metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir) metric.add_batch(inputs=inputs, targets=targets) results = metric.compute() assert results == AccuracyWithNonStandardFeatureNames.expected_results() def test_metric_with_non_standard_feature_names_compute(tmp_path): cache_dir = tmp_path / "cache" inputs, targets = AccuracyWithNonStandardFeatureNames.inputs_and_targets() metric = AccuracyWithNonStandardFeatureNames(cache_dir=cache_dir) results = metric.compute(inputs=inputs, targets=targets) assert results == AccuracyWithNonStandardFeatureNames.expected_results()
datasets/tests/test_metric.py/0
{ "file_path": "datasets/tests/test_metric.py", "repo_id": "datasets", "token_count": 10533 }
79
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def mock_emitted_deprecation_warnings(monkeypatch): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings", set()) # Used by list_metrics @pytest.fixture def mock_hfh(monkeypatch): class MetricMock: def __init__(self, metric_id): self.id = metric_id class HfhMock: _metrics = [MetricMock(metric_id) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def list_metrics(self): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub", HfhMock()) @pytest.mark.parametrize( "func, args", [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def test_metric_deprecation_warning(func, args, mock_emitted_deprecation_warnings, mock_hfh, tmp_path): if "tmp_path" in args: args = tuple(arg if arg != "tmp_path" else tmp_path for arg in args) with pytest.warns(FutureWarning, match="https://huggingface.co/docs/evaluate"): func(*args)
datasets/tests/test_warnings.py/0
{ "file_path": "datasets/tests/test_warnings.py", "repo_id": "datasets", "token_count": 473 }
80
<jupyter_start><jupyter_text>Unit 6: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym 🤖In this notebook, you'll learn to use A2C with [Panda-Gym](https://github.com/qgallouedec/panda-gym). You're going **to train a robotic arm** (Franka Emika Panda robot) to perform a task:- `Reach`: the robot must place its end-effector at a target position.After that, you'll be able **to train in other robotics tasks**. 🎮 Environments:- [Panda-Gym](https://github.com/qgallouedec/panda-gym)📚 RL-Library:- [Stable-Baselines3](https://stable-baselines3.readthedocs.io/) We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). Objectives of this notebook 🏆At the end of the notebook, you will:- Be able to use **Panda-Gym**, the environment library.- Be able to **train robots using A2C**.- Understand why **we need to normalize the input**.- Be able to **push your trained agent and the code to the Hub** with a nice video replay and an evaluation score 🔥. This notebook is from the Deep Reinforcement Learning CourseIn this free course, you will:- 📖 Study Deep Reinforcement Learning in **theory and practice**.- 🧑‍💻 Learn to **use famous Deep RL libraries** such as Stable Baselines3, RL Baselines3 Zoo, CleanRL and Sample Factory 2.0.- 🤖 Train **agents in unique environments**And more check 📚 the syllabus 👉 https://simoninithomas.github.io/deep-rl-courseDon’t forget to **sign up to the course** (we are collecting your email to be able to **send you the links when each Unit is published and give you information about the challenges and updates).**The best way to keep in touch is to join our discord server to exchange with the community and with us 👉🏻 https://discord.gg/ydHrjt3WP5 Prerequisites 🏗️Before diving into the notebook, you need to:🔲 📚 Study [Actor-Critic methods by reading Unit 6](https://huggingface.co/deep-rl-course/unit6/introduction) 🤗 Let's train our first robots 🤖 To validate this hands-on for the [certification process](https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process), you need to push your trained model to the Hub and get the following results:- `PandaReachDense-v3` get a result of >= -3.5.To find your result, go to the [leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) and find your model, **the result = mean_reward - std of reward**For more information about the certification process, check this section 👉 https://huggingface.co/deep-rl-course/en/unit0/introductioncertification-process Set the GPU 💪- To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` - `Hardware Accelerator > GPU` Create a virtual display 🔽During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames).Hence the following cell will install the librairies and create and run a virtual screen 🖥<jupyter_code>%%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip3 install pyvirtualdisplay # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start()<jupyter_output><empty_output><jupyter_text>Install dependencies 🔽The first step is to install the dependencies, we’ll install multiple ones:- `gymnasium`- `panda-gym`: Contains the robotics arm environments.- `stable-baselines3`: The SB3 deep reinforcement learning library.- `huggingface_sb3`: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub.- `huggingface_hub`: Library allowing anyone to work with the Hub repositories.⏲ The installation can **take 10 minutes**.<jupyter_code>!pip install stable-baselines3[extra] !pip install gymnasium !pip install huggingface_sb3 !pip install huggingface_hub !pip install panda_gym<jupyter_output><empty_output><jupyter_text>Import the packages 📦<jupyter_code>import os import gymnasium as gym import panda_gym from huggingface_sb3 import load_from_hub, package_to_hub from stable_baselines3 import A2C from stable_baselines3.common.evaluation import evaluate_policy from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize from stable_baselines3.common.env_util import make_vec_env from huggingface_hub import notebook_login<jupyter_output><empty_output><jupyter_text>PandaReachDense-v3 🦾The agent we're going to train is a robotic arm that needs to do controls (moving the arm and using the end-effector).In robotics, the *end-effector* is the device at the end of a robotic arm designed to interact with the environment.In `PandaReach`, the robot must place its end-effector at a target position (green ball).We're going to use the dense version of this environment. It means we'll get a *dense reward function* that **will provide a reward at each timestep** (the closer the agent is to completing the task, the higher the reward). Contrary to a *sparse reward function* where the environment **return a reward if and only if the task is completed**.Also, we're going to use the *End-effector displacement control*, it means the **action corresponds to the displacement of the end-effector**. We don't control the individual motion of each joint (joint control).This way **the training will be easier**. Create the environment The environment 🎮In `PandaReachDense-v3` the robotic arm must place its end-effector at a target position (green ball).<jupyter_code>env_id = "PandaReachDense-v3" # Create the env env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape a_size = env.action_space print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation<jupyter_output><empty_output><jupyter_text>The observation space **is a dictionary with 3 different elements**:- `achieved_goal`: (x,y,z) position of the goal.- `desired_goal`: (x,y,z) distance between the goal position and the current object position.- `observation`: position (x,y,z) and velocity of the end-effector (vx, vy, vz).Given it's a dictionary as observation, **we will need to use a MultiInputPolicy policy instead of MlpPolicy**.<jupyter_code>print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action<jupyter_output><empty_output><jupyter_text>The action space is a vector with 3 values:- Control x, y, z movement Normalize observation and rewards A good practice in reinforcement learning is to [normalize input features](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html).For that purpose, there is a wrapper that will compute a running average and standard deviation of input features.We also normalize rewards with this same wrapper by adding `norm_reward = True`[You should check the documentation to fill this cell](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)<jupyter_code>env = make_vec_env(env_id, n_envs=4) # Adding this wrapper to normalize the observation and the reward env = # TODO: Add the wrapper<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>env = make_vec_env(env_id, n_envs=4) env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.)<jupyter_output><empty_output><jupyter_text>Create the A2C Model 🤖For more information about A2C implementation with StableBaselines3 check: https://stable-baselines3.readthedocs.io/en/master/modules/a2c.htmlnotesTo find the best parameters I checked the [official trained agents by Stable-Baselines3 team](https://huggingface.co/sb3).<jupyter_code>model = # Create the A2C model and try to find the best parameters<jupyter_output><empty_output><jupyter_text>Solution<jupyter_code>model = A2C(policy = "MultiInputPolicy", env = env, verbose=1)<jupyter_output><empty_output><jupyter_text>Train the A2C agent 🏃- Let's train our agent for 1,000,000 timesteps, don't forget to use GPU on Colab. It will take approximately ~25-40min<jupyter_code>model.learn(1_000_000) # Save the model and VecNormalize statistics when saving the agent model.save("a2c-PandaReachDense-v3") env.save("vec_normalize.pkl")<jupyter_output><empty_output><jupyter_text>Evaluate the agent 📈- Now that's our agent is trained, we need to **check its performance**.- Stable-Baselines3 provides a method to do that: `evaluate_policy`<jupyter_code>from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaReachDense-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # We need to override the render_mode eval_env.render_mode = "rgb_array" # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load("a2c-PandaReachDense-v3") mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}")<jupyter_output><empty_output><jupyter_text>Publish your trained model on the Hub 🔥Now that we saw we got good results after the training, we can publish our trained model on the Hub with one line of code.📚 The libraries documentation 👉 https://github.com/huggingface/huggingface_sb3/tree/mainhugging-face--x-stable-baselines3-v20 By using `package_to_hub`, as we already mentionned in the former units, **you evaluate, record a replay, generate a model card of your agent and push it to the hub**.This way:- You can **showcase our work** 🔥- You can **visualize your agent playing** 👀- You can **share with the community an agent that others can use** 💾- You can **access a leaderboard 🏆 to see how well your agent is performing compared to your classmates** 👉 https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow:1️⃣ (If it's not already done) create an account to HF ➡ https://huggingface.co/join2️⃣ Sign in and then, you need to store your authentication token from the Hugging Face website.- Create a new token (https://huggingface.co/settings/tokens) **with write role**- Copy the token- Run the cell below and paste the token<jupyter_code>notebook_login() !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>If you don't want to use a Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` 3️⃣ We're now ready to push our trained agent to the 🤗 Hub 🔥 using `package_to_hub()` function For this environment, **running this cell can take approximately 10min**<jupyter_code>from huggingface_sb3 import package_to_hub package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # Change the username commit_message="Initial commit", )<jupyter_output><empty_output><jupyter_text>Some additional challenges 🏆The best way to learn **is to try things by your own**! Why not trying `PandaPickAndPlace-v3`?If you want to try more advanced tasks for panda-gym, you need to check what was done using **TQC or SAC** (a more sample-efficient algorithm suited for robotics tasks). In real robotics, you'll use a more sample-efficient algorithm for a simple reason: contrary to a simulation **if you move your robotic arm too much, you have a risk of breaking it**.PandaPickAndPlace-v1 (this model uses the v1 version of the environment): https://huggingface.co/sb3/tqc-PandaPickAndPlace-v1And don't hesitate to check panda-gym documentation here: https://panda-gym.readthedocs.io/en/latest/usage/train_with_sb3.htmlWe provide you the steps to train another agent (optional):1. Define the environment called "PandaPickAndPlace-v3"2. Make a vectorized environment3. Add a wrapper to normalize the observations and rewards. [Check the documentation](https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.htmlvecnormalize)4. Create the A2C Model (don't forget verbose=1 to print the training logs).5. Train it for 1M Timesteps6. Save the model and VecNormalize statistics when saving the agent7. Evaluate your agent8. Publish your trained model on the Hub 🔥 with `package_to_hub` Solution (optional)<jupyter_code># 1 - 2 env_id = "PandaPickAndPlace-v3" env = make_vec_env(env_id, n_envs=4) # 3 env = VecNormalize(env, norm_obs=True, norm_reward=True, clip_obs=10.) # 4 model = A2C(policy = "MultiInputPolicy", env = env, verbose=1) # 5 model.learn(1_000_000) # 6 model_name = "a2c-PandaPickAndPlace-v3"; model.save(model_name) env.save("vec_normalize.pkl") # 7 from stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize # Load the saved statistics eval_env = DummyVecEnv([lambda: gym.make("PandaPickAndPlace-v3")]) eval_env = VecNormalize.load("vec_normalize.pkl", eval_env) # do not update them at test time eval_env.training = False # reward normalization is not needed at test time eval_env.norm_reward = False # Load the agent model = A2C.load(model_name) mean_reward, std_reward = evaluate_policy(model, eval_env) print(f"Mean reward = {mean_reward:.2f} +/- {std_reward:.2f}") # 8 package_to_hub( model=model, model_name=f"a2c-{env_id}", model_architecture="A2C", env_id=env_id, eval_env=eval_env, repo_id=f"ThomasSimonini/a2c-{env_id}", # TODO: Change the username commit_message="Initial commit", )<jupyter_output><empty_output>
deep-rl-class/notebooks/unit6/unit6.ipynb/0
{ "file_path": "deep-rl-class/notebooks/unit6/unit6.ipynb", "repo_id": "deep-rl-class", "token_count": 4384 }
81
# Introduction to Deep Reinforcement Learning [[introduction-to-deep-reinforcement-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Unit 1 thumbnail" width="100%"> Welcome to the most fascinating topic in Artificial Intelligence: **Deep Reinforcement Learning.** Deep RL is a type of Machine Learning where an agent learns **how to behave** in an environment **by performing actions** and **seeing the results.** In this first unit, **you'll learn the foundations of Deep Reinforcement Learning.** Then, you'll **train your Deep Reinforcement Learning agent, a lunar lander to land correctly on the Moon** using <a href="https://stable-baselines3.readthedocs.io/en/master/"> Stable-Baselines3 </a>, a Deep Reinforcement Learning library. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/lunarLander.gif" alt="LunarLander"> And finally, you'll **upload this trained agent to the Hugging Face Hub 🤗, a free, open platform where people can share ML models, datasets, and demos.** It's essential **to master these elements** before diving into implementing Deep Reinforcement Learning agents. The goal of this chapter is to give you solid foundations. After this unit, in a bonus unit, you'll be **able to train Huggy the Dog 🐶 to fetch the stick and play with him 🤗**. <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/huggy.mp4" type="video/mp4" controls autoplay loop mute /> So let's get started! 🚀
deep-rl-class/units/en/unit1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 477 }
82
# A Q-Learning example [[q-learning-example]] To better understand Q-Learning, let's take a simple example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Maze-Example-2.jpg" alt="Maze-Example"/> - You're a mouse in this tiny maze. You always **start at the same starting point.** - The goal is **to eat the big pile of cheese at the bottom right-hand corner** and avoid the poison. After all, who doesn't like cheese? - The episode ends if we eat the poison, **eat the big pile of cheese**, or if we take more than five steps. - The learning rate is 0.1 - The discount rate (gamma) is 0.99 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-1.jpg" alt="Maze-Example"/> The reward function goes like this: - **+0:** Going to a state with no cheese in it. - **+1:** Going to a state with a small cheese in it. - **+10:** Going to the state with the big pile of cheese. - **-10:** Going to the state with the poison and thus dying. - **+0** If we take more than five steps. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-2.jpg" alt="Maze-Example"/> To train our agent to have an optimal policy (so a policy that goes right, right, down), **we will use the Q-Learning algorithm**. ## Step 1: Initialize the Q-table [[step1]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-1.jpg" alt="Maze-Example"/> So, for now, **our Q-table is useless**; we need **to train our Q-function using the Q-Learning algorithm.** Let's do it for 2 training timesteps: Training timestep 1: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2]] Because epsilon is big (= 1.0), I take a random action. In this case, I go right. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-3.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3]] By going right, I get a small cheese, so \\(R_{t+1} = 1\\) and I'm in a new state. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-4.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4]] We can now update \\(Q(S_t, A_t)\\) using our formula. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-5.jpg" alt="Maze-Example"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Example-4.jpg" alt="Maze-Example"/> Training timestep 2: ## Step 2: Choose an action using the Epsilon Greedy Strategy [[step2-2]] **I take a random action again, since epsilon=0.99 is big**. (Notice we decay epsilon a little bit because, as the training progress, we want less and less exploration). I took the action 'down'. **This is not a good action since it leads me to the poison.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-6.jpg" alt="Maze-Example"/> ## Step 3: Perform action At, get Rt+1 and St+1 [[step3-3]] Because I ate poison, **I get \\(R_{t+1} = -10\\), and I die.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-7.jpg" alt="Maze-Example"/> ## Step 4: Update Q(St, At) [[step4-4]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-ex-8.jpg" alt="Maze-Example"/> Because we're dead, we start a new episode. But what we see here is that, **with two explorations steps, my agent became smarter.** As we continue exploring and exploiting the environment and updating Q-values using the TD target, the **Q-table will give us a better and better approximation. At the end of the training, we'll get an estimate of the optimal Q-function.**
deep-rl-class/units/en/unit2/q-learning-example.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/q-learning-example.mdx", "repo_id": "deep-rl-class", "token_count": 1402 }
83
# The advantages and disadvantages of policy-gradient methods At this point, you might ask, "but Deep Q-Learning is excellent! Why use policy-gradient methods?". To answer this question, let's study the **advantages and disadvantages of policy-gradient methods**. ## Advantages There are multiple advantages over value-based methods. Let's see some of them: ### The simplicity of integration We can estimate the policy directly without storing additional data (action values). ### Policy-gradient methods can learn a stochastic policy Policy-gradient methods can **learn a stochastic policy while value functions can't**. This has two consequences: 1. We **don't need to implement an exploration/exploitation trade-off by hand**. Since we output a probability distribution over actions, the agent explores **the state space without always taking the same trajectory.** 2. We also get rid of the problem of **perceptual aliasing**. Perceptual aliasing is when two states seem (or are) the same but need different actions. Let's take an example: we have an intelligent vacuum cleaner whose goal is to suck the dust and avoid killing the hamsters. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster1.jpg" alt="Hamster 1"/> </figure> Our vacuum cleaner can only perceive where the walls are. The problem is that the **two red (colored) states are aliased states because the agent perceives an upper and lower wall for each**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster2.jpg" alt="Hamster 1"/> </figure> Under a deterministic policy, the policy will either always move right when in a red state or always move left. **Either case will cause our agent to get stuck and never suck the dust**. Under a value-based Reinforcement learning algorithm, we learn a **quasi-deterministic policy** ("greedy epsilon strategy"). Consequently, our agent can **spend a lot of time before finding the dust**. On the other hand, an optimal stochastic policy **will randomly move left or right in red (colored) states**. Consequently, **it will not be stuck and will reach the goal state with a high probability**. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/hamster3.jpg" alt="Hamster 1"/> </figure> ### Policy-gradient methods are more effective in high-dimensional action spaces and continuous actions spaces The problem with Deep Q-learning is that their **predictions assign a score (maximum expected future reward) for each possible action**, at each time step, given the current state. But what if we have an infinite possibility of actions? For instance, with a self-driving car, at each state, you can have a (near) infinite choice of actions (turning the wheel at 15°, 17.2°, 19,4°, honking, etc.). **We'll need to output a Q-value for each possible action**! And **taking the max action of a continuous output is an optimization problem itself**! Instead, with policy-gradient methods, we output a **probability distribution over actions.** ### Policy-gradient methods have better convergence properties In value-based methods, we use an aggressive operator to **change the value function: we take the maximum over Q-estimates**. Consequently, the action probabilities may change dramatically for an arbitrarily small change in the estimated action values if that change results in a different action having the maximal value. For instance, if during the training, the best action was left (with a Q-value of 0.22) and the training step after it's right (since the right Q-value becomes 0.23), we dramatically changed the policy since now the policy will take most of the time right instead of left. On the other hand, in policy-gradient methods, stochastic policy action preferences (probability of taking action) **change smoothly over time**. ## Disadvantages Naturally, policy-gradient methods also have some disadvantages: - **Frequently, policy-gradient methods converges to a local maximum instead of a global optimum.** - Policy-gradient goes slower, **step by step: it can take longer to train (inefficient).** - Policy-gradient can have high variance. We'll see in the actor-critic unit why, and how we can solve this problem. 👉 If you want to go deeper into the advantages and disadvantages of policy-gradient methods, [you can check this video](https://youtu.be/y3oqOjHilio).
deep-rl-class/units/en/unit4/advantages-disadvantages.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/advantages-disadvantages.mdx", "repo_id": "deep-rl-class", "token_count": 1184 }
84
# Quiz The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: Which of the following tools are specifically designed for video games development? <Question choices={[ { text: "Unity (C#)", explain: "", correct: true, }, { text: "Unreal Engine (C++)", explain: "", correct: true, }, { text: "Godot (GDScript, C++, C#)", explain: "", correct: true, }, { text: "JetBrains' Rider", explain: "Although useful for its support of C# for Unity, it's not a video games development IDE", correct: false, }, { text: "JetBrains' CLion", explain: "Although useful for its support of C++ for Unreal Engine, it's not a video games development IDE", correct: false, }, { text: "Microsoft Visual Studio and Visual Studio Code", explain: "Including support for both Unity and Unreal, they are generic IDEs, not video games oriented.", correct: false, }, ]} /> ### Q2: What of the following statements are true about Unity ML-Agents? <Question choices={[ { text: "Unity ´Scene´ objects can be used to create learning environments", explain: "", correct: true, }, { text: "Unit ML-Agents allows you to create and train your agents using Reinforcement Learning", explain: "", correct: true, }, { text: "Its `Communicator` component manages the communication between Unity's C# Environments/Agents and a Python back-end", explain: "", correct: true, }, { text: "The training process uses Reinforcement Learning algorithms, implemented in Pytorch", explain: "", correct: true, }, { text: "Unity ML-Agents only support Proximal Policy Optimization (PPO)", explain: "No, Unity ML-Agents supports several families of algorithms, including Actor-Critic which is going to be explained in the next section", correct: false, }, { text: "It includes a Gym Wrapper and a multi-agent version of it called `PettingZoo`", explain: "", correct: true, }, ]} /> ### Q3: Fill the missing letters - In Unity ML-Agents, the Policy of an Agent is called a b \_ \_ \_ n - The component in charge of orchestrating the agents is called the \_ c \_ \_ \_ m \_ <details> <summary>Solution</summary> <ul> <li>b r a i n</li> <li>a c a d e m y</li> </ul> </details> ### Q4: Define with your own words what is a `raycast` <details> <summary>Solution</summary> A raycast is (most of the times) a linear projection, as a `laser` which aims to detect collisions through objects. </details> ### Q5: Which are the differences between capturing the environment using `frames` or `raycasts`? <Question choices={[ { text: "By using `frames`, the environment is defined by each of the pixels of the screen. By using `raycasts`, we only send a sample of those pixels.", explain: "`Raycasts` don't have anything to do with pixels. They are linear projections (lasers) that we spawn to look for collisions.", correct: false, }, { text: "By using `raycasts`, the environment is defined by each of the pixels of the screen. By using `frames`, we spawn a (usually) line to check what objects it collides with", explain: "It's the other way around - `frames` collect pixels, `raycasts` check for collisions.", correct: false, }, { text: "By using `frames`, we collect all the pixels of the screen, which define the environment. By using `raycast`, we don't use pixels, we spawn (normally) lines and check their collisions", explain: "", correct: true, }, ]} /> ### Q6: Name several environment and agent input variables used to train the agent in the Snowball or Pyramid environments <details> <summary>Solution</summary> - Collisions of the raycasts spawned from the agent detecting blocks, (invisible) walls, stones, our target, switches, etc. - Traditional inputs describing agent features, as its speed - Boolean vars, as the switch (on/off) in Pyramids or the `can I shoot?` in the SnowballTarget. </details> Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read the chapter again to reinforce (😏) your knowledge.
deep-rl-class/units/en/unit5/quiz.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/quiz.mdx", "repo_id": "deep-rl-class", "token_count": 1539 }
85
# Self-Play: a classic technique to train competitive agents in adversarial games Now that we've studied the basics of multi-agents, we're ready to go deeper. As mentioned in the introduction, we're going **to train agents in an adversarial game with SoccerTwos, a 2vs2 game**. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> ## What is Self-Play? Training agents correctly in an adversarial game can be **quite complex**. On the one hand, we need to find how to get a well-trained opponent to play against your training agent. And on the other hand, if you find a very good trained opponent, how will your agent improve its policy when the opponent is too strong? Think of a child that just started to learn soccer. Playing against a very good soccer player will be useless since it will be too hard to win or at least get the ball from time to time. So the child will continuously lose without having time to learn a good policy. The best solution would be **to have an opponent that is on the same level as the agent and will upgrade its level as the agent upgrades its own**. Because if the opponent is too strong, we’ll learn nothing; if it is too weak, we’ll overlearn useless behavior against a stronger opponent then. This solution is called *self-play*. In self-play, **the agent uses former copies of itself (of its policy) as an opponent**. This way, the agent will play against an agent of the same level (challenging but not too much), have opportunities to gradually improve its policy, and then update its opponent as it becomes better. It’s a way to bootstrap an opponent and progressively increase the opponent's complexity. It’s the same way humans learn in competition: - We start to train against an opponent of similar level - Then we learn from it, and when we acquire some skills, we can move further with stronger opponents. We do the same with self-play: - We **start with a copy of our agent as an opponent** this way, this opponent is on a similar level. - We **learn from it** and, when we acquire some skills, we **update our opponent with a more recent copy of our training policy**. The theory behind self-play is not something new. It was already used by Arthur Samuel’s checker player system in the fifties and by Gerald Tesauro’s TD-Gammon in 1995. If you want to learn more about the history of self-play [check out this very good blogpost by Andrew Cohen](https://blog.unity.com/technology/training-intelligent-adversaries-using-self-play-with-ml-agents) ## Self-Play in MLAgents Self-Play is integrated into the MLAgents library and is managed by multiple hyperparameters that we’re going to study. But the main focus, as explained in the documentation, is the **tradeoff between the skill level and generality of the final policy and the stability of learning**. Training against a set of slowly changing or unchanging adversaries with low diversity **results in more stable training. But a risk to overfit if the change is too slow.** So we need to control: - How **often we change opponents** with the `swap_steps` and `team_change` parameters. - The **number of opponents saved** with the `window` parameter. A larger value of `window`  means that an agent's pool of opponents will contain a larger diversity of behaviors since it will contain policies from earlier in the training run. - The **probability of playing against the current self vs opponent** sampled from the pool with `play_against_latest_model_ratio`. A larger value of `play_against_latest_model_ratio`  indicates that an agent will be playing against the current opponent more often. - The **number of training steps before saving a new opponent** with `save_steps` parameters. A larger value of `save_steps`  will yield a set of opponents that cover a wider range of skill levels and possibly play styles since the policy receives more training. To get more details about these hyperparameters, you definitely need [to check out this part of the documentation](https://github.com/Unity-Technologies/ml-agents/blob/develop/docs/Training-Configuration-File.md#self-play) ## The ELO Score to evaluate our agent ### What is ELO Score? In adversarial games, tracking the **cumulative reward is not always a meaningful metric to track the learning progress:** because this metric is **dependent only on the skill of the opponent.** Instead, we’re using an ***ELO rating system*** (named after Arpad Elo) that calculates the **relative skill level** between 2 players from a given population in a zero-sum game. In a zero-sum game: one agent wins, and the other agent loses. It’s a mathematical representation of a situation in which each participant’s gain or loss of utility **is exactly balanced by the gain or loss of the utility of the other participants.** We talk about zero-sum games because the sum of utility is equal to zero. This ELO (starting at a specific score: frequently 1200) can decrease initially but should increase progressively during the training. The Elo system is **inferred from the losses and draws against other players.** It means that player ratings depend **on the ratings of their opponents and the results scored against them.** Elo defines an Elo score that is the relative skills of a player in a zero-sum game. **We say relative because it depends on the performance of opponents.** The central idea is to think of the performance of a player **as a random variable that is normally distributed.** The difference in rating between 2 players serves as **the predictor of the outcomes of a match.** If the player wins, but the probability of winning is high, it will only win a few points from its opponent since it means that it is much stronger than it. After every game: - The winning player takes **points from the losing one.** - The number of points is determined **by the difference in the 2 players ratings (hence relative).** - If the higher-rated player wins → few points will be taken from the lower-rated player. - If the lower-rated player wins → a lot of points will be taken from the high-rated player. - If it’s a draw → the lower-rated player gains a few points from the higher. So if A and B have rating Ra, and Rb, then the **expected scores are** given by: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo1.png" alt="ELO Score"/> Then, at the end of the game, we need to update the player’s actual Elo score. We use a linear adjustment **proportional to the amount by which the player over-performed or under-performed.** We also define a maximum adjustment rating per game: K-factor. - K=16 for master. - K=32 for weaker players. If Player A has Ea points but scored Sa points, then the player’s rating is updated using the formula: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/elo2.png" alt="ELO Score"/> ### Example If we take an example: Player A has a rating of 2600 Player B has a rating of 2300 - We first calculate the expected score: \\(E_{A} = \frac{1}{1+10^{(2300-2600)/400}} = 0.849 \\) \\(E_{B} = \frac{1}{1+10^{(2600-2300)/400}} = 0.151 \\) - If the organizers determined that K=16 and A wins, the new rating would be: \\(ELO_A = 2600 + 16*(1-0.849) = 2602 \\) \\(ELO_B = 2300 + 16*(0-0.151) = 2298 \\) - If the organizers determined that K=16 and B wins, the new rating would be: \\(ELO_A = 2600 + 16*(0-0.849) = 2586 \\) \\(ELO_B = 2300 + 16 *(1-0.151) = 2314 \\) ### The Advantages Using the ELO score has multiple advantages: - Points are **always balanced** (more points are exchanged when there is an unexpected outcome, but the sum is always the same). - It is a **self-corrected system** since if a player wins against a weak player, they will only win a few points. - It **works with team games**: we calculate the average for each team and use it in Elo. ### The Disadvantages - ELO **does not take into account the individual contribution** of each people in the team. - Rating deflation: **a good rating requires skill over time to keep the same rating**. - **Can’t compare rating in history**.
deep-rl-class/units/en/unit7/self-play.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/self-play.mdx", "repo_id": "deep-rl-class", "token_count": 2245 }
86
# Hands-on [[hands-on]] Now that you've learned to use Optuna, here are some ideas to apply what you've learned: 1️⃣ **Beat your LunarLander-v2 agent results**, by using Optuna to find a better set of hyperparameters. You can also try with another environment, such as MountainCar-v0 and CartPole-v1. 2️⃣ **Beat your SpaceInvaders agent results**. By doing this, you'll see how valuable and powerful Optuna can be in training better agents. Have fun! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please 👉 [fill out this form](https://forms.gle/BzKXWzLAGZESGNaE9) ### Keep Learning, stay awesome 🤗
deep-rl-class/units/en/unitbonus2/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus2/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 207 }
87
import argparse import sys sys.path.append(".") from base_classes import ControlNetBenchmark, ControlNetSDXLBenchmark # noqa: E402 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--ckpt", type=str, default="lllyasviel/sd-controlnet-canny", choices=["lllyasviel/sd-controlnet-canny", "diffusers/controlnet-canny-sdxl-1.0"], ) parser.add_argument("--batch_size", type=int, default=1) parser.add_argument("--num_inference_steps", type=int, default=50) parser.add_argument("--model_cpu_offload", action="store_true") parser.add_argument("--run_compile", action="store_true") args = parser.parse_args() benchmark_pipe = ( ControlNetBenchmark(args) if args.ckpt == "lllyasviel/sd-controlnet-canny" else ControlNetSDXLBenchmark(args) ) benchmark_pipe.benchmark(args)
diffusers/benchmarks/benchmark_controlnet.py/0
{ "file_path": "diffusers/benchmarks/benchmark_controlnet.py", "repo_id": "diffusers", "token_count": 352 }
88
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # UNet Some training methods - like LoRA and Custom Diffusion - typically target the UNet's attention layers, but these training methods can also target other non-attention layers. Instead of training all of a model's parameters, only a subset of the parameters are trained, which is faster and more efficient. This class is useful if you're *only* loading weights into a UNet. If you need to load weights into the text encoder or a text encoder and UNet, try using the [`~loaders.LoraLoaderMixin.load_lora_weights`] function instead. The [`UNet2DConditionLoadersMixin`] class provides functions for loading and saving weights, fusing and unfusing LoRAs, disabling and enabling LoRAs, and setting and deleting adapters. <Tip> To learn more about how to load LoRA weights, see the [LoRA](../../using-diffusers/loading_adapters#lora) loading guide. </Tip> ## UNet2DConditionLoadersMixin [[autodoc]] loaders.unet.UNet2DConditionLoadersMixin
diffusers/docs/source/en/api/loaders/unet.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/unet.md", "repo_id": "diffusers", "token_count": 402 }
89
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DDPM [Denoising Diffusion Probabilistic Models](https://huggingface.co/papers/2006.11239) (DDPM) by Jonathan Ho, Ajay Jain and Pieter Abbeel proposes a diffusion based model of the same name. In the 🤗 Diffusers library, DDPM refers to the *discrete denoising scheduler* from the paper as well as the pipeline. The abstract from the paper is: *We present high quality image synthesis results using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics. Our best results are obtained by training on a weighted variational bound designed according to a novel connection between diffusion probabilistic models and denoising score matching with Langevin dynamics, and our models naturally admit a progressive lossy decompression scheme that can be interpreted as a generalization of autoregressive decoding. On the unconditional CIFAR10 dataset, we obtain an Inception score of 9.46 and a state-of-the-art FID score of 3.17. On 256x256 LSUN, we obtain sample quality similar to ProgressiveGAN.* The original codebase can be found at [hohonathanho/diffusion](https://github.com/hojonathanho/diffusion). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> # DDPMPipeline [[autodoc]] DDPMPipeline - all - __call__ ## ImagePipelineOutput [[autodoc]] pipelines.ImagePipelineOutput
diffusers/docs/source/en/api/pipelines/ddpm.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/ddpm.md", "repo_id": "diffusers", "token_count": 572 }
90
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://huggingface.co/papers/2211.09800) is by Tim Brooks, Aleksander Holynski and Alexei A. Efros. The abstract from the paper is: *We propose a method for editing images from human instructions: given an input image and a written instruction that tells the model what to do, our model follows these instructions to edit the image. To obtain training data for this problem, we combine the knowledge of two large pretrained models -- a language model (GPT-3) and a text-to-image model (Stable Diffusion) -- to generate a large dataset of image editing examples. Our conditional diffusion model, InstructPix2Pix, is trained on our generated data, and generalizes to real images and user-written instructions at inference time. Since it performs edits in the forward pass and does not require per example fine-tuning or inversion, our model edits images quickly, in a matter of seconds. We show compelling editing results for a diverse collection of input images and written instructions.* You can find additional information about InstructPix2Pix on the [project page](https://www.timothybrooks.com/instruct-pix2pix), [original codebase](https://github.com/timothybrooks/instruct-pix2pix), and try it out in a [demo](https://huggingface.co/spaces/timbrooks/instruct-pix2pix). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## StableDiffusionInstructPix2PixPipeline [[autodoc]] StableDiffusionInstructPix2PixPipeline - __call__ - all - load_textual_inversion - load_lora_weights - save_lora_weights ## StableDiffusionXLInstructPix2PixPipeline [[autodoc]] StableDiffusionXLInstructPix2PixPipeline - __call__ - all
diffusers/docs/source/en/api/pipelines/pix2pix.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/pix2pix.md", "repo_id": "diffusers", "token_count": 707 }
91
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # SDXL Turbo Stable Diffusion XL (SDXL) Turbo was proposed in [Adversarial Diffusion Distillation](https://stability.ai/research/adversarial-diffusion-distillation) by Axel Sauer, Dominik Lorenz, Andreas Blattmann, and Robin Rombach. The abstract from the paper is: *We introduce Adversarial Diffusion Distillation (ADD), a novel training approach that efficiently samples large-scale foundational image diffusion models in just 1–4 steps while maintaining high image quality. We use score distillation to leverage large-scale off-the-shelf image diffusion models as a teacher signal in combination with an adversarial loss to ensure high image fidelity even in the low-step regime of one or two sampling steps. Our analyses show that our model clearly outperforms existing few-step methods (GANs,Latent Consistency Models) in a single step and reaches the performance of state-of-the-art diffusion models (SDXL) in only four steps. ADD is the first method to unlock single-step, real-time image synthesis with foundation models.* ## Tips - SDXL Turbo uses the exact same architecture as [SDXL](./stable_diffusion_xl), which means it also has the same API. Please refer to the [SDXL](./stable_diffusion_xl) API reference for more details. - SDXL Turbo should disable guidance scale by setting `guidance_scale=0.0`. - SDXL Turbo should use `timestep_spacing='trailing'` for the scheduler and use between 1 and 4 steps. - SDXL Turbo has been trained to generate images of size 512x512. - SDXL Turbo is open-access, but not open-source meaning that one might have to buy a model license in order to use it for commercial applications. Make sure to read the [official model card](https://huggingface.co/stabilityai/sdxl-turbo) to learn more. <Tip> To learn how to use SDXL Turbo for various tasks, how to optimize performance, and other usage examples, take a look at the [SDXL Turbo](../../../using-diffusers/sdxl_turbo) guide. Check out the [Stability AI](https://huggingface.co/stabilityai) Hub organization for the official base and refiner model checkpoints! </Tip>
diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/sdxl_turbo.md", "repo_id": "diffusers", "token_count": 677 }
92
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DDIMScheduler [Denoising Diffusion Implicit Models](https://huggingface.co/papers/2010.02502) (DDIM) by Jiaming Song, Chenlin Meng and Stefano Ermon. The abstract from the paper is: *Denoising diffusion probabilistic models (DDPMs) have achieved high quality image generation without adversarial training, yet they require simulating a Markov chain for many steps to produce a sample. To accelerate sampling, we present denoising diffusion implicit models (DDIMs), a more efficient class of iterative implicit probabilistic models with the same training procedure as DDPMs. In DDPMs, the generative process is defined as the reverse of a Markovian diffusion process. We construct a class of non-Markovian diffusion processes that lead to the same training objective, but whose reverse process can be much faster to sample from. We empirically demonstrate that DDIMs can produce high quality samples 10× to 50× faster in terms of wall-clock time compared to DDPMs, allow us to trade off computation for sample quality, and can perform semantically meaningful image interpolation directly in the latent space.* The original codebase of this paper can be found at [ermongroup/ddim](https://github.com/ermongroup/ddim), and you can contact the author on [tsong.me](https://tsong.me/). ## Tips The paper [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) claims that a mismatch between the training and inference settings leads to suboptimal inference generation results for Stable Diffusion. To fix this, the authors propose: <Tip warning={true}> 🧪 This is an experimental feature! </Tip> 1. rescale the noise schedule to enforce zero terminal signal-to-noise ratio (SNR) ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, rescale_betas_zero_snr=True) ``` 2. train a model with `v_prediction` (add the following argument to the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) or [train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py) scripts) ```bash --prediction_type="v_prediction" ``` 3. change the sampler to always start from the last timestep ```py pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing") ``` 4. rescale classifier-free guidance to prevent over-exposure ```py image = pipe(prompt, guidance_rescale=0.7).images[0] ``` For example: ```py from diffusers import DiffusionPipeline, DDIMScheduler import torch pipe = DiffusionPipeline.from_pretrained("ptx0/pseudo-journey-v2", torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config( pipe.scheduler.config, rescale_betas_zero_snr=True, timestep_spacing="trailing" ) pipe.to("cuda") prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" image = pipe(prompt, guidance_rescale=0.7).images[0] image ``` ## DDIMScheduler [[autodoc]] DDIMScheduler ## DDIMSchedulerOutput [[autodoc]] schedulers.scheduling_ddim.DDIMSchedulerOutput
diffusers/docs/source/en/api/schedulers/ddim.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/ddim.md", "repo_id": "diffusers", "token_count": 1122 }
93
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DPMSolverMultistepInverse `DPMSolverMultistepInverse` is the inverted scheduler from [DPM-Solver: A Fast ODE Solver for Diffusion Probabilistic Model Sampling in Around 10 Steps](https://huggingface.co/papers/2206.00927) and [DPM-Solver++: Fast Solver for Guided Sampling of Diffusion Probabilistic Models](https://huggingface.co/papers/2211.01095) by Cheng Lu, Yuhao Zhou, Fan Bao, Jianfei Chen, Chongxuan Li, and Jun Zhu. The implementation is mostly based on the DDIM inversion definition of [Null-text Inversion for Editing Real Images using Guided Diffusion Models](https://huggingface.co/papers/2211.09794) and notebook implementation of the [`DiffEdit`] latent inversion from [Xiang-cd/DiffEdit-stable-diffusion](https://github.com/Xiang-cd/DiffEdit-stable-diffusion/blob/main/diffedit.ipynb). ## Tips Dynamic thresholding from [Imagen](https://huggingface.co/papers/2205.11487) is supported, and for pixel-space diffusion models, you can set both `algorithm_type="dpmsolver++"` and `thresholding=True` to use the dynamic thresholding. This thresholding method is unsuitable for latent-space diffusion models such as Stable Diffusion. ## DPMSolverMultistepInverseScheduler [[autodoc]] DPMSolverMultistepInverseScheduler ## SchedulerOutput [[autodoc]] schedulers.scheduling_utils.SchedulerOutput
diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md/0
{ "file_path": "diffusers/docs/source/en/api/schedulers/multistep_dpm_solver_inverse.md", "repo_id": "diffusers", "token_count": 547 }
94
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> [[open-in-colab]] # Quicktour Diffusion models are trained to denoise random Gaussian noise step-by-step to generate a sample of interest, such as an image or audio. This has sparked a tremendous amount of interest in generative AI, and you have probably seen examples of diffusion generated images on the internet. 🧨 Diffusers is a library aimed at making diffusion models widely accessible to everyone. Whether you're a developer or an everyday user, this quicktour will introduce you to 🧨 Diffusers and help you get up and generating quickly! There are three main components of the library to know about: * The [`DiffusionPipeline`] is a high-level end-to-end class designed to rapidly generate samples from pretrained diffusion models for inference. * Popular pretrained [model](./api/models) architectures and modules that can be used as building blocks for creating diffusion systems. * Many different [schedulers](./api/schedulers/overview) - algorithms that control how noise is added for training, and how to generate denoised images during inference. The quicktour will show you how to use the [`DiffusionPipeline`] for inference, and then walk you through how to combine a model and scheduler to replicate what's happening inside the [`DiffusionPipeline`]. <Tip> The quicktour is a simplified version of the introductory 🧨 Diffusers [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/diffusers_intro.ipynb) to help you get started quickly. If you want to learn more about 🧨 Diffusers' goal, design philosophy, and additional details about its core API, check out the notebook! </Tip> Before you begin, make sure you have all the necessary libraries installed: ```py # uncomment to install the necessary libraries in Colab #!pip install --upgrade diffusers accelerate transformers ``` - [🤗 Accelerate](https://huggingface.co/docs/accelerate/index) speeds up model loading for inference and training. - [🤗 Transformers](https://huggingface.co/docs/transformers/index) is required to run the most popular diffusion models, such as [Stable Diffusion](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/overview). ## DiffusionPipeline The [`DiffusionPipeline`] is the easiest way to use a pretrained diffusion system for inference. It is an end-to-end system containing the model and the scheduler. You can use the [`DiffusionPipeline`] out-of-the-box for many tasks. Take a look at the table below for some supported tasks, and for a complete list of supported tasks, check out the [🧨 Diffusers Summary](./api/pipelines/overview#diffusers-summary) table. | **Task** | **Description** | **Pipeline** |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------| | Unconditional Image Generation | generate an image from Gaussian noise | [unconditional_image_generation](./using-diffusers/unconditional_image_generation) | | Text-Guided Image Generation | generate an image given a text prompt | [conditional_image_generation](./using-diffusers/conditional_image_generation) | | Text-Guided Image-to-Image Translation | adapt an image guided by a text prompt | [img2img](./using-diffusers/img2img) | | Text-Guided Image-Inpainting | fill the masked part of an image given the image, the mask and a text prompt | [inpaint](./using-diffusers/inpaint) | | Text-Guided Depth-to-Image Translation | adapt parts of an image guided by a text prompt while preserving structure via depth estimation | [depth2img](./using-diffusers/depth2img) | Start by creating an instance of a [`DiffusionPipeline`] and specify which pipeline checkpoint you would like to download. You can use the [`DiffusionPipeline`] for any [checkpoint](https://huggingface.co/models?library=diffusers&sort=downloads) stored on the Hugging Face Hub. In this quicktour, you'll load the [`stable-diffusion-v1-5`](https://huggingface.co/runwayml/stable-diffusion-v1-5) checkpoint for text-to-image generation. <Tip warning={true}> For [Stable Diffusion](https://huggingface.co/CompVis/stable-diffusion) models, please carefully read the [license](https://huggingface.co/spaces/CompVis/stable-diffusion-license) first before running the model. 🧨 Diffusers implements a [`safety_checker`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/safety_checker.py) to prevent offensive or harmful content, but the model's improved image generation capabilities can still produce potentially harmful content. </Tip> Load the model with the [`~DiffusionPipeline.from_pretrained`] method: ```python >>> from diffusers import DiffusionPipeline >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) ``` The [`DiffusionPipeline`] downloads and caches all modeling, tokenization, and scheduling components. You'll see that the Stable Diffusion pipeline is composed of the [`UNet2DConditionModel`] and [`PNDMScheduler`] among other things: ```py >>> pipeline StableDiffusionPipeline { "_class_name": "StableDiffusionPipeline", "_diffusers_version": "0.21.4", ..., "scheduler": [ "diffusers", "PNDMScheduler" ], ..., "unet": [ "diffusers", "UNet2DConditionModel" ], "vae": [ "diffusers", "AutoencoderKL" ] } ``` We strongly recommend running the pipeline on a GPU because the model consists of roughly 1.4 billion parameters. You can move the generator object to a GPU, just like you would in PyTorch: ```python >>> pipeline.to("cuda") ``` Now you can pass a text prompt to the `pipeline` to generate an image, and then access the denoised image. By default, the image output is wrapped in a [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) object. ```python >>> image = pipeline("An image of a squirrel in Picasso style").images[0] >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/image_of_squirrel_painting.png"/> </div> Save the image by calling `save`: ```python >>> image.save("image_of_squirrel_painting.png") ``` ### Local pipeline You can also use the pipeline locally. The only difference is you need to download the weights first: ```bash !git lfs install !git clone https://huggingface.co/runwayml/stable-diffusion-v1-5 ``` Then load the saved weights into the pipeline: ```python >>> pipeline = DiffusionPipeline.from_pretrained("./stable-diffusion-v1-5", use_safetensors=True) ``` Now, you can run the pipeline as you would in the section above. ### Swapping schedulers Different schedulers come with different denoising speeds and quality trade-offs. The best way to find out which one works best for you is to try them out! One of the main features of 🧨 Diffusers is to allow you to easily switch between schedulers. For example, to replace the default [`PNDMScheduler`] with the [`EulerDiscreteScheduler`], load it with the [`~diffusers.ConfigMixin.from_config`] method: ```py >>> from diffusers import EulerDiscreteScheduler >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", use_safetensors=True) >>> pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) ``` Try generating an image with the new scheduler and see if you notice a difference! In the next section, you'll take a closer look at the components - the model and scheduler - that make up the [`DiffusionPipeline`] and learn how to use these components to generate an image of a cat. ## Models Most models take a noisy sample, and at each timestep it predicts the *noise residual* (other models learn to predict the previous sample directly or the velocity or [`v-prediction`](https://github.com/huggingface/diffusers/blob/5e5ce13e2f89ac45a0066cb3f369462a3cf1d9ef/src/diffusers/schedulers/scheduling_ddim.py#L110)), the difference between a less noisy image and the input image. You can mix and match models to create other diffusion systems. Models are initiated with the [`~ModelMixin.from_pretrained`] method which also locally caches the model weights so it is faster the next time you load the model. For the quicktour, you'll load the [`UNet2DModel`], a basic unconditional image generation model with a checkpoint trained on cat images: ```py >>> from diffusers import UNet2DModel >>> repo_id = "google/ddpm-cat-256" >>> model = UNet2DModel.from_pretrained(repo_id, use_safetensors=True) ``` To access the model parameters, call `model.config`: ```py >>> model.config ``` The model configuration is a 🧊 frozen 🧊 dictionary, which means those parameters can't be changed after the model is created. This is intentional and ensures that the parameters used to define the model architecture at the start remain the same, while other parameters can still be adjusted during inference. Some of the most important parameters are: * `sample_size`: the height and width dimension of the input sample. * `in_channels`: the number of input channels of the input sample. * `down_block_types` and `up_block_types`: the type of down- and upsampling blocks used to create the UNet architecture. * `block_out_channels`: the number of output channels of the downsampling blocks; also used in reverse order for the number of input channels of the upsampling blocks. * `layers_per_block`: the number of ResNet blocks present in each UNet block. To use the model for inference, create the image shape with random Gaussian noise. It should have a `batch` axis because the model can receive multiple random noises, a `channel` axis corresponding to the number of input channels, and a `sample_size` axis for the height and width of the image: ```py >>> import torch >>> torch.manual_seed(0) >>> noisy_sample = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) >>> noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` For inference, pass the noisy image and a `timestep` to the model. The `timestep` indicates how noisy the input image is, with more noise at the beginning and less at the end. This helps the model determine its position in the diffusion process, whether it is closer to the start or the end. Use the `sample` method to get the model output: ```py >>> with torch.no_grad(): ... noisy_residual = model(sample=noisy_sample, timestep=2).sample ``` To generate actual examples though, you'll need a scheduler to guide the denoising process. In the next section, you'll learn how to couple a model with a scheduler. ## Schedulers Schedulers manage going from a noisy sample to a less noisy sample given the model output - in this case, it is the `noisy_residual`. <Tip> 🧨 Diffusers is a toolbox for building diffusion systems. While the [`DiffusionPipeline`] is a convenient way to get started with a pre-built diffusion system, you can also choose your own model and scheduler components separately to build a custom diffusion system. </Tip> For the quicktour, you'll instantiate the [`DDPMScheduler`] with its [`~diffusers.ConfigMixin.from_config`] method: ```py >>> from diffusers import DDPMScheduler >>> scheduler = DDPMScheduler.from_pretrained(repo_id) >>> scheduler DDPMScheduler { "_class_name": "DDPMScheduler", "_diffusers_version": "0.21.4", "beta_end": 0.02, "beta_schedule": "linear", "beta_start": 0.0001, "clip_sample": true, "clip_sample_range": 1.0, "dynamic_thresholding_ratio": 0.995, "num_train_timesteps": 1000, "prediction_type": "epsilon", "sample_max_value": 1.0, "steps_offset": 0, "thresholding": false, "timestep_spacing": "leading", "trained_betas": null, "variance_type": "fixed_small" } ``` <Tip> 💡 Unlike a model, a scheduler does not have trainable weights and is parameter-free! </Tip> Some of the most important parameters are: * `num_train_timesteps`: the length of the denoising process or, in other words, the number of timesteps required to process random Gaussian noise into a data sample. * `beta_schedule`: the type of noise schedule to use for inference and training. * `beta_start` and `beta_end`: the start and end noise values for the noise schedule. To predict a slightly less noisy image, pass the following to the scheduler's [`~diffusers.DDPMScheduler.step`] method: model output, `timestep`, and current `sample`. ```py >>> less_noisy_sample = scheduler.step(model_output=noisy_residual, timestep=2, sample=noisy_sample).prev_sample >>> less_noisy_sample.shape torch.Size([1, 3, 256, 256]) ``` The `less_noisy_sample` can be passed to the next `timestep` where it'll get even less noisy! Let's bring it all together now and visualize the entire denoising process. First, create a function that postprocesses and displays the denoised image as a `PIL.Image`: ```py >>> import PIL.Image >>> import numpy as np >>> def display_sample(sample, i): ... image_processed = sample.cpu().permute(0, 2, 3, 1) ... image_processed = (image_processed + 1.0) * 127.5 ... image_processed = image_processed.numpy().astype(np.uint8) ... image_pil = PIL.Image.fromarray(image_processed[0]) ... display(f"Image at step {i}") ... display(image_pil) ``` To speed up the denoising process, move the input and model to a GPU: ```py >>> model.to("cuda") >>> noisy_sample = noisy_sample.to("cuda") ``` Now create a denoising loop that predicts the residual of the less noisy sample, and computes the less noisy sample with the scheduler: ```py >>> import tqdm >>> sample = noisy_sample >>> for i, t in enumerate(tqdm.tqdm(scheduler.timesteps)): ... # 1. predict noise residual ... with torch.no_grad(): ... residual = model(sample, t).sample ... # 2. compute less noisy image and set x_t -> x_t-1 ... sample = scheduler.step(residual, t, sample).prev_sample ... # 3. optionally look at image ... if (i + 1) % 50 == 0: ... display_sample(sample, i + 1) ``` Sit back and watch as a cat is generated from nothing but noise! 😻 <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/diffusion-quicktour.png"/> </div> ## Next steps Hopefully, you generated some cool images with 🧨 Diffusers in this quicktour! For your next steps, you can: * Train or finetune a model to generate your own images in the [training](./tutorials/basic_training) tutorial. * See example official and community [training or finetuning scripts](https://github.com/huggingface/diffusers/tree/main/examples#-diffusers-examples) for a variety of use cases. * Learn more about loading, accessing, changing, and comparing schedulers in the [Using different Schedulers](./using-diffusers/schedulers) guide. * Explore prompt engineering, speed and memory optimizations, and tips and tricks for generating higher-quality images with the [Stable Diffusion](./stable_diffusion) guide. * Dive deeper into speeding up 🧨 Diffusers with guides on [optimized PyTorch on a GPU](./optimization/fp16), and inference guides for running [Stable Diffusion on Apple Silicon (M1/M2)](./optimization/mps) and [ONNX Runtime](./optimization/onnx).
diffusers/docs/source/en/quicktour.md/0
{ "file_path": "diffusers/docs/source/en/quicktour.md", "repo_id": "diffusers", "token_count": 4836 }
95
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-to-image <Tip warning={true}> The text-to-image script is experimental, and it's easy to overfit and run into issues like catastrophic forgetting. Try exploring different hyperparameters to get the best results on your dataset. </Tip> Text-to-image models like Stable Diffusion are conditioned to generate images given a text prompt. Training a model can be taxing on your hardware, but if you enable `gradient_checkpointing` and `mixed_precision`, it is possible to train a model on a single 24GB GPU. If you're training with larger batch sizes or want to train faster, it's better to use GPUs with more than 30GB of memory. You can reduce your memory footprint by enabling memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing, gradient accumulation or xFormers. A GPU with at least 30GB of memory or a TPU v3 is recommended for training with Flax. This guide will explore the [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) training script to help you become familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then navigate to the example folder containing the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/text_to_image pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/text_to_image pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```bash from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. ## Script parameters <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) and let us know if you have any questions or concerns. </Tip> The training script provides many parameters to help you customize your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L193) function. This function provides default values for each parameter, such as the training batch size and learning rate, but you can also set your own values in the training command if you'd like. For example, to speedup training with mixed precision using the fp16 format, add the `--mixed_precision` parameter to the training command: ```bash accelerate launch train_text_to_image.py \ --mixed_precision="fp16" ``` Some basic and important parameters include: - `--pretrained_model_name_or_path`: the name of the model on the Hub or a local path to the pretrained model - `--dataset_name`: the name of the dataset on the Hub or a local path to the dataset to train on - `--image_column`: the name of the image column in the dataset to train on - `--caption_column`: the name of the text column in the dataset to train on - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if for some reason training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_text_to_image.py \ --snr_gamma=5.0 ``` You can compare the loss surfaces for different `snr_gamma` values in this [Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) report. For smaller datasets, the effects of Min-SNR may not be as obvious compared to larger datasets. ## Training script The dataset preprocessing code and training loop are found in the [`main()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L490) function. If you need to adapt the training script, this is where you'll need to make your changes. The `train_text_to_image` script starts by [loading a scheduler](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L543) and tokenizer. You can choose to use a different scheduler here if you want: ```py noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) ``` Then the script [loads the UNet](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L619) model: ```py load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) ``` Next, the text and image columns of the dataset need to be preprocessed. The [`tokenize_captions`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L724) function handles tokenizing the inputs, and the [`train_transforms`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L742) function specifies the type of transforms to apply to the image. Both of these functions are bundled into `preprocess_train`: ```py def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples ``` Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L878) handles everything else. It encodes images into latent space, adds noise to the latents, computes the text embeddings to condition on, updates the model parameters, and saves and pushes the model to the Hub. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script Once you've made all your changes or you're okay with the default configuration, you're ready to launch the training script! 🚀 <hfoptions id="training-inference"> <hfoption id="PyTorch"> Let's train on the [Pokémon BLIP captions](https://huggingface.co/datasets/lambdalabs/pokemon-blip-captions) dataset to generate your own Pokémon. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). If you're training on more than one GPU, add the `--multi_gpu` parameter to the `accelerate launch` command. <Tip> To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. </Tip> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export dataset_name="lambdalabs/pokemon-blip-captions" accelerate launch --mixed_precision="fp16" train_text_to_image.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --use_ema \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --enable_xformers_memory_efficient_attention --lr_scheduler="constant" --lr_warmup_steps=0 \ --output_dir="sd-pokemon-model" \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> Training with Flax can be faster on TPUs and GPUs thanks to [@duongna211](https://github.com/duongna21). Flax is more efficient on a TPU, but GPU performance is also great. Set the environment variables `MODEL_NAME` and `dataset_name` to the model and the dataset (either from the Hub or a local path). <Tip> To train on a local dataset, set the `TRAIN_DIR` and `OUTPUT_DIR` environment variables to the path of the dataset and where to save the model to. </Tip> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export dataset_name="lambdalabs/pokemon-blip-captions" python train_text_to_image_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$dataset_name \ --resolution=512 --center_crop --random_flip \ --train_batch_size=1 \ --max_train_steps=15000 \ --learning_rate=1e-05 \ --max_grad_norm=1 \ --output_dir="sd-pokemon-model" \ --push_to_hub ``` </hfoption> </hfoptions> Once training is complete, you can use your newly trained model for inference: <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import StableDiffusionPipeline import torch pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline(prompt="yoda").images[0] image.save("yoda-pokemon.png") ``` </hfoption> <hfoption id="Flax"> ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16) prompt = "yoda pokemon" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("yoda-pokemon.png") ``` </hfoption> </hfoptions> ## Next steps Congratulations on training your own text-to-image model! To learn more about how to use your new model, the following guides may be helpful: - Learn how to [load LoRA weights](../using-diffusers/loading_adapters#LoRA) for inference if you trained your model with LoRA. - Learn more about how certain parameters like guidance scale or techniques such as prompt weighting can help you control inference in the [Text-to-image](../using-diffusers/conditional_image_generation) task guide.
diffusers/docs/source/en/training/text2image.md/0
{ "file_path": "diffusers/docs/source/en/training/text2image.md", "repo_id": "diffusers", "token_count": 4032 }
96
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load community pipelines and components [[open-in-colab]] ## Community pipelines Community pipelines are any [`DiffusionPipeline`] class that are different from the original implementation as specified in their paper (for example, the [`StableDiffusionControlNetPipeline`] corresponds to the [Text-to-Image Generation with ControlNet Conditioning](https://arxiv.org/abs/2302.05543) paper). They provide additional functionality or extend the original implementation of a pipeline. There are many cool community pipelines like [Speech to Image](https://github.com/huggingface/diffusers/tree/main/examples/community#speech-to-image) or [Composable Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#composable-stable-diffusion), and you can find all the official community pipelines [here](https://github.com/huggingface/diffusers/tree/main/examples/community). To load any community pipeline on the Hub, pass the repository id of the community pipeline to the `custom_pipeline` argument and the model repository where you'd like to load the pipeline weights and components from. For example, the example below loads a dummy pipeline from [`hf-internal-testing/diffusers-dummy-pipeline`](https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py) and the pipeline weights and components from [`google/ddpm-cifar10-32`](https://huggingface.co/google/ddpm-cifar10-32): <Tip warning={true}> 🔒 By loading a community pipeline from the Hugging Face Hub, you are trusting that the code you are loading is safe. Make sure to inspect the code online before loading and running it automatically! </Tip> ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline", use_safetensors=True ) ``` Loading an official community pipeline is similar, but you can mix loading weights from an official repository id and pass pipeline components directly. The example below loads the community [CLIP Guided Stable Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/community#clip-guided-stable-diffusion) pipeline, and you can pass the CLIP model components directly to it: ```py from diffusers import DiffusionPipeline from transformers import CLIPImageProcessor, CLIPModel clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K" feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id) clip_model = CLIPModel.from_pretrained(clip_model_id) pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="clip_guided_stable_diffusion", clip_model=clip_model, feature_extractor=feature_extractor, use_safetensors=True, ) ``` ### Load from a local file Community pipelines can also be loaded from a local file if you pass a file path instead. The path to the passed directory must contain a `pipeline.py` file that contains the pipeline class in order to successfully load it. ```py pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="./path/to/pipeline_directory/", clip_model=clip_model, feature_extractor=feature_extractor, use_safetensors=True, ) ``` ### Load from a specific version By default, community pipelines are loaded from the latest stable version of Diffusers. To load a community pipeline from another version, use the `custom_revision` parameter. <hfoptions id="version"> <hfoption id="main"> For example, to load from the `main` branch: ```py pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="clip_guided_stable_diffusion", custom_revision="main", clip_model=clip_model, feature_extractor=feature_extractor, use_safetensors=True, ) ``` </hfoption> <hfoption id="older version"> For example, to load from a previous version of Diffusers like `v0.25.0`: ```py pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", custom_pipeline="clip_guided_stable_diffusion", custom_revision="v0.25.0", clip_model=clip_model, feature_extractor=feature_extractor, use_safetensors=True, ) ``` </hfoption> </hfoptions> For more information about community pipelines, take a look at the [Community pipelines](custom_pipeline_examples) guide for how to use them and if you're interested in adding a community pipeline check out the [How to contribute a community pipeline](contribute_pipeline) guide! ## Community components Community components allow users to build pipelines that may have customized components that are not a part of Diffusers. If your pipeline has custom components that Diffusers doesn't already support, you need to provide their implementations as Python modules. These customized components could be a VAE, UNet, and scheduler. In most cases, the text encoder is imported from the Transformers library. The pipeline code itself can also be customized. This section shows how users should use community components to build a community pipeline. You'll use the [showlab/show-1-base](https://huggingface.co/showlab/show-1-base) pipeline checkpoint as an example. So, let's start loading the components: 1. Import and load the text encoder from Transformers: ```python from transformers import T5Tokenizer, T5EncoderModel pipe_id = "showlab/show-1-base" tokenizer = T5Tokenizer.from_pretrained(pipe_id, subfolder="tokenizer") text_encoder = T5EncoderModel.from_pretrained(pipe_id, subfolder="text_encoder") ``` 2. Load a scheduler: ```python from diffusers import DPMSolverMultistepScheduler scheduler = DPMSolverMultistepScheduler.from_pretrained(pipe_id, subfolder="scheduler") ``` 3. Load an image processor: ```python from transformers import CLIPFeatureExtractor feature_extractor = CLIPFeatureExtractor.from_pretrained(pipe_id, subfolder="feature_extractor") ``` <Tip warning={true}> In steps 4 and 5, the custom [UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py) and [pipeline](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) implementation must match the format shown in their files for this example to work. </Tip> 4. Now you'll load a [custom UNet](https://github.com/showlab/Show-1/blob/main/showone/models/unet_3d_condition.py), which in this example, has already been implemented in the `showone_unet_3d_condition.py` [script](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py) for your convenience. You'll notice the `UNet3DConditionModel` class name is changed to `ShowOneUNet3DConditionModel` because [`UNet3DConditionModel`] already exists in Diffusers. Any components needed for the `ShowOneUNet3DConditionModel` class should be placed in the `showone_unet_3d_condition.py` script. Once this is done, you can initialize the UNet: ```python from showone_unet_3d_condition import ShowOneUNet3DConditionModel unet = ShowOneUNet3DConditionModel.from_pretrained(pipe_id, subfolder="unet") ``` 5. Finally, you'll load the custom pipeline code. For this example, it has already been created for you in the `pipeline_t2v_base_pixel.py` [script](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/pipeline_t2v_base_pixel.py). This script contains a custom `TextToVideoIFPipeline` class for generating videos from text. Just like the custom UNet, any code needed for the custom pipeline to work should go in the `pipeline_t2v_base_pixel.py` script. Once everything is in place, you can initialize the `TextToVideoIFPipeline` with the `ShowOneUNet3DConditionModel`: ```python from pipeline_t2v_base_pixel import TextToVideoIFPipeline import torch pipeline = TextToVideoIFPipeline( unet=unet, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, feature_extractor=feature_extractor ) pipeline = pipeline.to(device="cuda") pipeline.torch_dtype = torch.float16 ``` Push the pipeline to the Hub to share with the community! ```python pipeline.push_to_hub("custom-t2v-pipeline") ``` After the pipeline is successfully pushed, you need a couple of changes: 1. Change the `_class_name` attribute in [`model_index.json`](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/model_index.json#L2) to `"pipeline_t2v_base_pixel"` and `"TextToVideoIFPipeline"`. 2. Upload `showone_unet_3d_condition.py` to the `unet` [directory](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py). 3. Upload `pipeline_t2v_base_pixel.py` to the pipeline base [directory](https://huggingface.co/sayakpaul/show-1-base-with-code/blob/main/unet/showone_unet_3d_condition.py). To run inference, simply add the `trust_remote_code` argument while initializing the pipeline to handle all the "magic" behind the scenes. ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "<change-username>/<change-id>", trust_remote_code=True, torch_dtype=torch.float16 ).to("cuda") prompt = "hello" # Text embeds prompt_embeds, negative_embeds = pipeline.encode_prompt(prompt) # Keyframes generation (8x64x40, 2fps) video_frames = pipeline( prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_embeds, num_frames=8, height=40, width=64, num_inference_steps=2, guidance_scale=9.0, output_type="pt" ).frames ``` As an additional reference example, you can refer to the repository structure of [stabilityai/japanese-stable-diffusion-xl](https://huggingface.co/stabilityai/japanese-stable-diffusion-xl/), that makes use of the `trust_remote_code` feature: ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/japanese-stable-diffusion-xl", trust_remote_code=True ) pipeline.to("cuda") # if using torch < 2.0 # pipeline.enable_xformers_memory_efficient_attention() prompt = "柴犬、カラフルアート" image = pipeline(prompt=prompt).images[0] ``` > [!TIP] > When using `trust_remote_code=True`, it is also strongly encouraged to pass a commit hash as a `revision` to make sure the author of the models did not update the code with some malicious new lines (unless you fully trust the authors of the models).
diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/custom_pipeline_overview.md", "repo_id": "diffusers", "token_count": 3520 }
97
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Load different Stable Diffusion formats [[open-in-colab]] Stable Diffusion models are available in different formats depending on the framework they're trained and saved with, and where you download them from. Converting these formats for use in 🤗 Diffusers allows you to use all the features supported by the library, such as [using different schedulers](schedulers) for inference, [building your custom pipeline](write_own_pipeline), and a variety of techniques and methods for [optimizing inference speed](../optimization/opt_overview). <Tip> We highly recommend using the `.safetensors` format because it is more secure than traditional pickled files which are vulnerable and can be exploited to execute any code on your machine (learn more in the [Load safetensors](using_safetensors) guide). </Tip> This guide will show you how to convert other Stable Diffusion formats to be compatible with 🤗 Diffusers. ## PyTorch .ckpt The checkpoint - or `.ckpt` - format is commonly used to store and save models. The `.ckpt` file contains the entire model and is typically several GBs in size. While you can load and use a `.ckpt` file directly with the [`~StableDiffusionPipeline.from_single_file`] method, it is generally better to convert the `.ckpt` file to 🤗 Diffusers so both formats are available. There are two options for converting a `.ckpt` file: use a Space to convert the checkpoint or convert the `.ckpt` file with a script. ### Convert with a Space The easiest and most convenient way to convert a `.ckpt` file is to use the [SD to Diffusers](https://huggingface.co/spaces/diffusers/sd-to-diffusers) Space. You can follow the instructions on the Space to convert the `.ckpt` file. This approach works well for basic models, but it may struggle with more customized models. You'll know the Space failed if it returns an empty pull request or error. In this case, you can try converting the `.ckpt` file with a script. ### Convert with a script 🤗 Diffusers provides a [conversion script](https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py) for converting `.ckpt` files. This approach is more reliable than the Space above. Before you start, make sure you have a local clone of 🤗 Diffusers to run the script and log in to your Hugging Face account so you can open pull requests and push your converted model to the Hub. ```bash huggingface-cli login ``` To use the script: 1. Git clone the repository containing the `.ckpt` file you want to convert. For this example, let's convert this [TemporalNet](https://huggingface.co/CiaraRowles/TemporalNet) `.ckpt` file: ```bash git lfs install git clone https://huggingface.co/CiaraRowles/TemporalNet ``` 2. Open a pull request on the repository where you're converting the checkpoint from: ```bash cd TemporalNet && git fetch origin refs/pr/13:pr/13 git checkout pr/13 ``` 3. There are several input arguments to configure in the conversion script, but the most important ones are: - `checkpoint_path`: the path to the `.ckpt` file to convert. - `original_config_file`: a YAML file defining the configuration of the original architecture. If you can't find this file, try searching for the YAML file in the GitHub repository where you found the `.ckpt` file. - `dump_path`: the path to the converted model. For example, you can take the `cldm_v15.yaml` file from the [ControlNet](https://github.com/lllyasviel/ControlNet/tree/main/models) repository because the TemporalNet model is a Stable Diffusion v1.5 and ControlNet model. 4. Now you can run the script to convert the `.ckpt` file: ```bash python ../diffusers/scripts/convert_original_stable_diffusion_to_diffusers.py --checkpoint_path temporalnetv3.ckpt --original_config_file cldm_v15.yaml --dump_path ./ --controlnet ``` 5. Once the conversion is done, upload your converted model and test out the resulting [pull request](https://huggingface.co/CiaraRowles/TemporalNet/discussions/13)! ```bash git push origin pr/13:refs/pr/13 ``` ## Keras .pb or .h5 <Tip warning={true}> 🧪 This is an experimental feature. Only Stable Diffusion v1 checkpoints are supported by the Convert KerasCV Space at the moment. </Tip> [KerasCV](https://keras.io/keras_cv/) supports training for [Stable Diffusion](https://github.com/keras-team/keras-cv/blob/master/keras_cv/models/stable_diffusion) v1 and v2. However, it offers limited support for experimenting with Stable Diffusion models for inference and deployment whereas 🤗 Diffusers has a more complete set of features for this purpose, such as different [noise schedulers](https://huggingface.co/docs/diffusers/using-diffusers/schedulers), [flash attention](https://huggingface.co/docs/diffusers/optimization/xformers), and [other optimization techniques](https://huggingface.co/docs/diffusers/optimization/fp16). The [Convert KerasCV](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) Space converts `.pb` or `.h5` files to PyTorch, and then wraps them in a [`StableDiffusionPipeline`] so it is ready for inference. The converted checkpoint is stored in a repository on the Hugging Face Hub. For this example, let's convert the [`sayakpaul/textual-inversion-kerasio`](https://huggingface.co/sayakpaul/textual-inversion-kerasio/tree/main) checkpoint which was trained with Textual Inversion. It uses the special token `<my-funny-cat>` to personalize images with cats. The Convert KerasCV Space allows you to input the following: * Your Hugging Face token. * Paths to download the UNet and text encoder weights from. Depending on how the model was trained, you don't necessarily need to provide the paths to both the UNet and text encoder. For example, Textual Inversion only requires the embeddings from the text encoder and a text-to-image model only requires the UNet weights. * Placeholder token is only applicable for textual inversion models. * The `output_repo_prefix` is the name of the repository where the converted model is stored. Click the **Submit** button to automatically convert the KerasCV checkpoint! Once the checkpoint is successfully converted, you'll see a link to the new repository containing the converted checkpoint. Follow the link to the new repository, and you'll see the Convert KerasCV Space generated a model card with an inference widget to try out the converted model. If you prefer to run inference with code, click on the **Use in Diffusers** button in the upper right corner of the model card to copy and paste the code snippet: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True ) ``` Then, you can generate an image like: ```py from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "sayakpaul/textual-inversion-cat-kerascv_sd_diffusers_pipeline", use_safetensors=True ) pipeline.to("cuda") placeholder_token = "<my-funny-cat-token>" prompt = f"two {placeholder_token} getting married, photorealistic, high quality" image = pipeline(prompt, num_inference_steps=50).images[0] ``` ## A1111 LoRA files [Automatic1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui) (A1111) is a popular web UI for Stable Diffusion that supports model sharing platforms like [Civitai](https://civitai.com/). Models trained with the Low-Rank Adaptation (LoRA) technique are especially popular because they're fast to train and have a much smaller file size than a fully finetuned model. 🤗 Diffusers supports loading A1111 LoRA checkpoints with [`~loaders.LoraLoaderMixin.load_lora_weights`]: ```py from diffusers import StableDiffusionXLPipeline import torch pipeline = StableDiffusionXLPipeline.from_pretrained( "Lykon/dreamshaper-xl-1-0", torch_dtype=torch.float16, variant="fp16" ).to("cuda") ``` Download a LoRA checkpoint from Civitai; this example uses the [Blueprintify SD XL 1.0](https://civitai.com/models/150986/blueprintify-sd-xl-10) checkpoint, but feel free to try out any LoRA checkpoint! ```py # uncomment to download the safetensor weights #!wget https://civitai.com/api/download/models/168776 -O blueprintify.safetensors ``` Load the LoRA checkpoint into the pipeline with the [`~loaders.LoraLoaderMixin.load_lora_weights`] method: ```py pipeline.load_lora_weights(".", weight_name="blueprintify.safetensors") ``` Now you can use the pipeline to generate images: ```py prompt = "bl3uprint, a highly detailed blueprint of the empire state building, explaining how to build all parts, many txt, blueprint grid backdrop" negative_prompt = "lowres, cropped, worst quality, low quality, normal quality, artifacts, signature, watermark, username, blurry, more than one bridge, bad architecture" image = pipeline( prompt=prompt, negative_prompt=negative_prompt, generator=torch.manual_seed(0), ).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/blueprint-lora.png"/> </div>
diffusers/docs/source/en/using-diffusers/other-formats.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/other-formats.md", "repo_id": "diffusers", "token_count": 2824 }
98
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Prompt weighting [[open-in-colab]] Prompt weighting provides a way to emphasize or de-emphasize certain parts of a prompt, allowing for more control over the generated image. A prompt can include several concepts, which gets turned into contextualized text embeddings. The embeddings are used by the model to condition its cross-attention layers to generate an image (read the Stable Diffusion [blog post](https://huggingface.co/blog/stable_diffusion) to learn more about how it works). Prompt weighting works by increasing or decreasing the scale of the text embedding vector that corresponds to its concept in the prompt because you may not necessarily want the model to focus on all concepts equally. The easiest way to prepare the prompt-weighted embeddings is to use [Compel](https://github.com/damian0815/compel), a text prompt-weighting and blending library. Once you have the prompt-weighted embeddings, you can pass them to any pipeline that has a [`prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.prompt_embeds) (and optionally [`negative_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/text2img#diffusers.StableDiffusionPipeline.__call__.negative_prompt_embeds)) parameter, such as [`StableDiffusionPipeline`], [`StableDiffusionControlNetPipeline`], and [`StableDiffusionXLPipeline`]. <Tip> If your favorite pipeline doesn't have a `prompt_embeds` parameter, please open an [issue](https://github.com/huggingface/diffusers/issues/new/choose) so we can add it! </Tip> This guide will show you how to weight and blend your prompts with Compel in 🤗 Diffusers. Before you begin, make sure you have the latest version of Compel installed: ```py # uncomment to install in Colab #!pip install compel --upgrade ``` For this guide, let's generate an image with the prompt `"a red cat playing with a ball"` using the [`StableDiffusionPipeline`]: ```py from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler import torch pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_safetensors=True) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") prompt = "a red cat playing with a ball" generator = torch.Generator(device="cpu").manual_seed(33) image = pipe(prompt, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_0.png"/> </div> ## Weighting You'll notice there is no "ball" in the image! Let's use compel to upweight the concept of "ball" in the prompt. Create a [`Compel`](https://github.com/damian0815/compel/blob/main/doc/compel.md#compel-objects) object, and pass it a tokenizer and text encoder: ```py from compel import Compel compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) ``` compel uses `+` or `-` to increase or decrease the weight of a word in the prompt. To increase the weight of "ball": <Tip> `+` corresponds to the value `1.1`, `++` corresponds to `1.1^2`, and so on. Similarly, `-` corresponds to `0.9` and `--` corresponds to `0.9^2`. Feel free to experiment with adding more `+` or `-` in your prompt! </Tip> ```py prompt = "a red cat playing with a ball++" ``` Pass the prompt to `compel_proc` to create the new prompt embeddings which are passed to the pipeline: ```py prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/forest_1.png"/> </div> To downweight parts of the prompt, use the `-` suffix: ```py prompt = "a red------- cat playing with a ball" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"/> </div> You can even up or downweight multiple concepts in the same prompt: ```py prompt = "a red cat++ playing with a ball----" prompt_embeds = compel_proc(prompt) generator = torch.manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-pos-neg.png"/> </div> ## Blending You can also create a weighted *blend* of prompts by adding `.blend()` to a list of prompts and passing it some weights. Your blend may not always produce the result you expect because it breaks some assumptions about how the text encoder functions, so just have fun and experiment with it! ```py prompt_embeds = compel_proc('("a red cat playing with a ball", "jungle").blend(0.7, 0.8)') generator = torch.Generator(device="cuda").manual_seed(33) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-blend.png"/> </div> ## Conjunction A conjunction diffuses each prompt independently and concatenates their results by their weighted sum. Add `.and()` to the end of a list of prompts to create a conjunction: ```py prompt_embeds = compel_proc('["a red cat", "playing with a", "ball"].and()') generator = torch.Generator(device="cuda").manual_seed(55) image = pipe(prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-conj.png"/> </div> ## Textual inversion [Textual inversion](../training/text_inversion) is a technique for learning a specific concept from some images which you can use to generate new images conditioned on that concept. Create a pipeline and use the [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] function to load the textual inversion embeddings (feel free to browse the [Stable Diffusion Conceptualizer](https://huggingface.co/spaces/sd-concepts-library/stable-diffusion-conceptualizer) for 100+ trained concepts): ```py import torch from diffusers import StableDiffusionPipeline from compel import Compel, DiffusersTextualInversionManager pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda") pipe.load_textual_inversion("sd-concepts-library/midjourney-style") ``` Compel provides a `DiffusersTextualInversionManager` class to simplify prompt weighting with textual inversion. Instantiate `DiffusersTextualInversionManager` and pass it to the `Compel` class: ```py textual_inversion_manager = DiffusersTextualInversionManager(pipe) compel_proc = Compel( tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder, textual_inversion_manager=textual_inversion_manager) ``` Incorporate the concept to condition a prompt with using the `<concept>` syntax: ```py prompt_embeds = compel_proc('("A red cat++ playing with a ball <midjourney-style>")') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-text-inversion.png"/> </div> ## DreamBooth [DreamBooth](../training/dreambooth) is a technique for generating contextualized images of a subject given just a few images of the subject to train on. It is similar to textual inversion, but DreamBooth trains the full model whereas textual inversion only fine-tunes the text embeddings. This means you should use [`~DiffusionPipeline.from_pretrained`] to load the DreamBooth model (feel free to browse the [Stable Diffusion Dreambooth Concepts Library](https://huggingface.co/sd-dreambooth-library) for 100+ trained models): ```py import torch from diffusers import DiffusionPipeline, UniPCMultistepScheduler from compel import Compel pipe = DiffusionPipeline.from_pretrained("sd-dreambooth-library/dndcoverart-v1", torch_dtype=torch.float16).to("cuda") pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) ``` Create a `Compel` class with a tokenizer and text encoder, and pass your prompt to it. Depending on the model you use, you'll need to incorporate the model's unique identifier into your prompt. For example, the `dndcoverart-v1` model uses the identifier `dndcoverart`: ```py compel_proc = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder) prompt_embeds = compel_proc('("magazine cover of a dndcoverart dragon, high quality, intricate details, larry elmore art style").and()') image = pipe(prompt_embeds=prompt_embeds).images[0] image ``` <div class="flex justify-center"> <img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-dreambooth.png"/> </div> ## Stable Diffusion XL Stable Diffusion XL (SDXL) has two tokenizers and text encoders so it's usage is a bit different. To address this, you should pass both tokenizers and encoders to the `Compel` class: ```py from compel import Compel, ReturnedEmbeddingsType from diffusers import DiffusionPipeline from diffusers.utils import make_image_grid import torch pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", variant="fp16", use_safetensors=True, torch_dtype=torch.float16 ).to("cuda") compel = Compel( tokenizer=[pipeline.tokenizer, pipeline.tokenizer_2] , text_encoder=[pipeline.text_encoder, pipeline.text_encoder_2], returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, requires_pooled=[False, True] ) ``` This time, let's upweight "ball" by a factor of 1.5 for the first prompt, and downweight "ball" by 0.6 for the second prompt. The [`StableDiffusionXLPipeline`] also requires [`pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.pooled_prompt_embeds) (and optionally [`negative_pooled_prompt_embeds`](https://huggingface.co/docs/diffusers/en/api/pipelines/stable_diffusion/stable_diffusion_xl#diffusers.StableDiffusionXLInpaintPipeline.__call__.negative_pooled_prompt_embeds)) so you should pass those to the pipeline along with the conditioning tensors: ```py # apply weights prompt = ["a red cat playing with a (ball)1.5", "a red cat playing with a (ball)0.6"] conditioning, pooled = compel(prompt) # generate image generator = [torch.Generator().manual_seed(33) for _ in range(len(prompt))] images = pipeline(prompt_embeds=conditioning, pooled_prompt_embeds=pooled, generator=generator, num_inference_steps=30).images make_image_grid(images, rows=1, cols=2) ``` <div class="flex gap-4"> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball1.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)1.5"</figcaption> </div> <div> <img class="rounded-xl" src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/compel/sdxl_ball2.png"/> <figcaption class="mt-2 text-center text-sm text-gray-500">"a red cat playing with a (ball)0.6"</figcaption> </div> </div>
diffusers/docs/source/en/using-diffusers/weighted_prompts.md/0
{ "file_path": "diffusers/docs/source/en/using-diffusers/weighted_prompts.md", "repo_id": "diffusers", "token_count": 4105 }
99
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Habana Gaudi에서 Stable Diffusion을 사용하는 방법 🤗 Diffusers는 🤗 [Optimum Habana](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)를 통해서 Habana Gaudi와 호환됩니다. ## 요구 사항 - Optimum Habana 1.4 또는 이후, [여기](https://huggingface.co/docs/optimum/habana/installation)에 설치하는 방법이 있습니다. - SynapseAI 1.8. ## 추론 파이프라인 Gaudi에서 Stable Diffusion 1 및 2로 이미지를 생성하려면 두 인스턴스를 인스턴스화해야 합니다: - [`GaudiStableDiffusionPipeline`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline)이 포함된 파이프라인. 이 파이프라인은 *텍스트-이미지 생성*을 지원합니다. - [`GaudiDDIMScheduler`](https://huggingface.co/docs/optimum/habana/package_reference/stable_diffusion_pipeline#optimum.habana.diffusers.GaudiDDIMScheduler)이 포함된 스케줄러. 이 스케줄러는 Habana Gaudi에 최적화되어 있습니다. 파이프라인을 초기화할 때, HPU에 배포하기 위해 `use_habana=True`를 지정해야 합니다. 또한 가능한 가장 빠른 생성을 위해 `use_hpu_graphs=True`로 **HPU 그래프**를 활성화해야 합니다. 마지막으로, [Hugging Face Hub](https://huggingface.co/Habana)에서 다운로드할 수 있는 [Gaudi configuration](https://huggingface.co/docs/optimum/habana/package_reference/gaudi_config)을 지정해야 합니다. ```python from optimum.habana import GaudiConfig from optimum.habana.diffusers import GaudiDDIMScheduler, GaudiStableDiffusionPipeline model_name = "stabilityai/stable-diffusion-2-base" scheduler = GaudiDDIMScheduler.from_pretrained(model_name, subfolder="scheduler") pipeline = GaudiStableDiffusionPipeline.from_pretrained( model_name, scheduler=scheduler, use_habana=True, use_hpu_graphs=True, gaudi_config="Habana/stable-diffusion", ) ``` 파이프라인을 호출하여 하나 이상의 프롬프트에서 배치별로 이미지를 생성할 수 있습니다. ```python outputs = pipeline( prompt=[ "High quality photo of an astronaut riding a horse in space", "Face of a yellow cat, high resolution, sitting on a park bench", ], num_images_per_prompt=10, batch_size=4, ) ``` 더 많은 정보를 얻기 위해, Optimum Habana의 [문서](https://huggingface.co/docs/optimum/habana/usage_guides/stable_diffusion)와 공식 Github 저장소에 제공된 [예시](https://github.com/huggingface/optimum-habana/tree/main/examples/stable-diffusion)를 확인하세요. ## 벤치마크 다음은 [Habana/stable-diffusion](https://huggingface.co/Habana/stable-diffusion) Gaudi 구성(혼합 정밀도 bf16/fp32)을 사용하는 Habana first-generation Gaudi 및 Gaudi2의 지연 시간입니다: | | Latency (배치 크기 = 1) | Throughput (배치 크기 = 8) | | ---------------------- |:------------------------:|:---------------------------:| | first-generation Gaudi | 4.29s | 0.283 images/s | | Gaudi2 | 1.54s | 0.904 images/s |
diffusers/docs/source/ko/optimization/habana.md/0
{ "file_path": "diffusers/docs/source/ko/optimization/habana.md", "repo_id": "diffusers", "token_count": 1911 }
100
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # InstructPix2Pix [InstructPix2Pix](https://arxiv.org/abs/2211.09800)는 text-conditioned diffusion 모델이 한 이미지에 편집을 따를 수 있도록 파인튜닝하는 방법입니다. 이 방법을 사용하여 파인튜닝된 모델은 다음을 입력으로 사용합니다: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/evaluation_diffusion_models/edit-instruction.png" alt="instructpix2pix-inputs" width=600/> </p> 출력은 입력 이미지에 편집 지시가 반영된 "수정된" 이미지입니다: <p align="center"> <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/output-gs%407-igs%401-steps%4050.png" alt="instructpix2pix-output" width=600/> </p> `train_instruct_pix2pix.py` 스크립트([여기](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py)에서 찾을 수 있습니다.)는 학습 절차를 설명하고 Stable Diffusion에 적용할 수 있는 방법을 보여줍니다. *** `train_instruct_pix2pix.py`는 [원래 구현](https://github.com/timothybrooks/instruct-pix2pix)에 충실하면서 InstructPix2Pix 학습 절차를 구현하고 있지만, [소규모 데이터셋](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)에서만 테스트를 했습니다. 이는 최종 결과에 영향을 끼칠 수 있습니다. 더 나은 결과를 위해, 더 큰 데이터셋에서 더 길게 학습하는 것을 권장합니다. [여기](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)에서 InstructPix2Pix 학습을 위해 큰 데이터셋을 찾을 수 있습니다. *** ## PyTorch로 로컬에서 실행하기 ### 종속성(dependencies) 설치하기 이 스크립트를 실행하기 전에, 라이브러리의 학습 종속성을 설치하세요: **중요** 최신 버전의 예제 스크립트를 성공적으로 실행하기 위해, **원본으로부터 설치**하는 것과 예제 스크립트를 자주 업데이트하고 예제별 요구사항을 설치하기 때문에 최신 상태로 유지하는 것을 권장합니다. 이를 위해, 새로운 가상 환경에서 다음 스텝을 실행하세요: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` cd 명령어로 예제 폴더로 이동하세요. ```bash cd examples/instruct_pix2pix ``` 이제 실행하세요. ```bash pip install -r requirements.txt ``` 그리고 [🤗Accelerate](https://github.com/huggingface/accelerate/) 환경에서 초기화하세요: ```bash accelerate config ``` 혹은 환경에 대한 질문 없이 기본적인 accelerate 구성을 사용하려면 다음을 실행하세요. ```bash accelerate config default ``` 혹은 사용 중인 환경이 notebook과 같은 대화형 쉘은 지원하지 않는 경우는 다음 절차를 따라주세요. ```python from accelerate.utils import write_basic_config write_basic_config() ``` ### 예시 이전에 언급했듯이, 학습을 위해 [작은 데이터셋](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples)을 사용할 것입니다. 그 데이터셋은 InstructPix2Pix 논문에서 사용된 [원래의 데이터셋](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered)보다 작은 버전입니다. 자신의 데이터셋을 사용하기 위해, [학습을 위한 데이터셋 만들기](create_dataset) 가이드를 참고하세요. `MODEL_NAME` 환경 변수(허브 모델 레포지토리 또는 모델 가중치가 포함된 폴더 경로)를 지정하고 [`pretrained_model_name_or_path`](https://huggingface.co/docs/diffusers/en/api/diffusion_pipeline#diffusers.DiffusionPipeline.from_pretrained.pretrained_model_name_or_path) 인수에 전달합니다. `DATASET_ID`에 데이터셋 이름을 지정해야 합니다: ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export DATASET_ID="fusing/instructpix2pix-1000-samples" ``` 지금, 학습을 실행할 수 있습니다. 스크립트는 레포지토리의 하위 폴더의 모든 구성요소(`feature_extractor`, `scheduler`, `text_encoder`, `unet` 등)를 저장합니다. ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` 추가적으로, 가중치와 바이어스를 학습 과정에 모니터링하여 검증 추론을 수행하는 것을 지원합니다. `report_to="wandb"`와 이 기능을 사용할 수 있습니다: ```bash accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --dataset_name=$DATASET_ID \ --enable_xformers_memory_efficient_attention \ --resolution=256 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --max_grad_norm=1 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --val_image_url="https://hf.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png" \ --validation_prompt="make the mountains snowy" \ --seed=42 \ --report_to=wandb \ --push_to_hub ``` 모델 디버깅에 유용한 이 평가 방법 권장합니다. 이를 사용하기 위해 `wandb`를 설치하는 것을 주목해주세요. `pip install wandb`로 실행해 `wandb`를 설치할 수 있습니다. [여기](https://wandb.ai/sayakpaul/instruct-pix2pix/runs/ctr3kovq), 몇 가지 평가 방법과 학습 파라미터를 포함하는 예시를 볼 수 있습니다. ***참고: 원본 논문에서, 저자들은 256x256 이미지 해상도로 학습한 모델로 512x512와 같은 더 큰 해상도로 잘 일반화되는 것을 볼 수 있었습니다. 이는 학습에 사용한 큰 데이터셋을 사용했기 때문입니다.*** ## 다수의 GPU로 학습하기 `accelerate`는 원활한 다수의 GPU로 학습을 가능하게 합니다. `accelerate`로 분산 학습을 실행하는 [여기](https://huggingface.co/docs/accelerate/basic_tutorials/launch) 설명을 따라 해 주시기 바랍니다. 예시의 명령어 입니다: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_instruct_pix2pix.py \ --pretrained_model_name_or_path=runwayml/stable-diffusion-v1-5 \ --dataset_name=sayakpaul/instructpix2pix-1000-samples \ --use_ema \ --enable_xformers_memory_efficient_attention \ --resolution=512 --random_flip \ --train_batch_size=4 --gradient_accumulation_steps=4 --gradient_checkpointing \ --max_train_steps=15000 \ --checkpointing_steps=5000 --checkpoints_total_limit=1 \ --learning_rate=5e-05 --lr_warmup_steps=0 \ --conditioning_dropout_prob=0.05 \ --mixed_precision=fp16 \ --seed=42 \ --push_to_hub ``` ## 추론하기 일단 학습이 완료되면, 추론 할 수 있습니다: ```python import PIL import requests import torch from diffusers import StableDiffusionInstructPix2PixPipeline model_id = "your_model_id" # <- 이를 수정하세요. pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") generator = torch.Generator("cuda").manual_seed(0) url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png" def download_image(url): image = PIL.Image.open(requests.get(url, stream=True).raw) image = PIL.ImageOps.exif_transpose(image) image = image.convert("RGB") return image image = download_image(url) prompt = "wipe out the lake" num_inference_steps = 20 image_guidance_scale = 1.5 guidance_scale = 10 edited_image = pipe( prompt, image=image, num_inference_steps=num_inference_steps, image_guidance_scale=image_guidance_scale, guidance_scale=guidance_scale, generator=generator, ).images[0] edited_image.save("edited_image.png") ``` 학습 스크립트를 사용해 얻은 예시의 모델 레포지토리는 여기 [sayakpaul/instruct-pix2pix](https://huggingface.co/sayakpaul/instruct-pix2pix)에서 확인할 수 있습니다. 성능을 위한 속도와 품질을 제어하기 위해 세 가지 파라미터를 사용하는 것이 좋습니다: * `num_inference_steps` * `image_guidance_scale` * `guidance_scale` 특히, `image_guidance_scale`와 `guidance_scale`는 생성된("수정된") 이미지에서 큰 영향을 미칠 수 있습니다.([여기](https://twitter.com/RisingSayak/status/1628392199196151808?s=20)예시를 참고해주세요.) 만약 InstructPix2Pix 학습 방법을 사용해 몇 가지 흥미로운 방법을 찾고 있다면, 이 블로그 게시물[Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd)을 확인해주세요.
diffusers/docs/source/ko/training/instructpix2pix.md/0
{ "file_path": "diffusers/docs/source/ko/training/instructpix2pix.md", "repo_id": "diffusers", "token_count": 5652 }
101
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text-guided 이미지 인페인팅(inpainting) [[open-in-colab]] [`StableDiffusionInpaintPipeline`]은 마스크와 텍스트 프롬프트를 제공하여 이미지의 특정 부분을 편집할 수 있도록 합니다. 이 기능은 인페인팅 작업을 위해 특별히 훈련된 [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting)과 같은 Stable Diffusion 버전을 사용합니다. 먼저 [`StableDiffusionInpaintPipeline`] 인스턴스를 불러옵니다: ```python import PIL import requests import torch from io import BytesIO from diffusers import StableDiffusionInpaintPipeline pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, ) pipeline = pipeline.to("cuda") ``` 나중에 교체할 강아지 이미지와 마스크를 다운로드하세요: ```python def download_image(url): response = requests.get(url) return PIL.Image.open(BytesIO(response.content)).convert("RGB") img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) ``` 이제 마스크를 다른 것으로 교체하라는 프롬프트를 만들 수 있습니다: ```python prompt = "Face of a yellow cat, high resolution, sitting on a park bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] ``` `image` | `mask_image` | `prompt` | output | :-------------------------:|:-------------------------:|:-------------------------:|-------------------------:| <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" alt="drawing" width="250"/> | <img src="https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" alt="drawing" width="250"/> | ***Face of a yellow cat, high resolution, sitting on a park bench*** | <img src="https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/in_paint/yellow_cat_sitting_on_a_park_bench.png" alt="drawing" width="250"/> | <Tip warning={true}> 이전의 실험적인 인페인팅 구현에서는 품질이 낮은 다른 프로세스를 사용했습니다. 이전 버전과의 호환성을 보장하기 위해 새 모델이 포함되지 않은 사전학습된 파이프라인을 불러오면 이전 인페인팅 방법이 계속 적용됩니다. </Tip> 아래 Space에서 이미지 인페인팅을 직접 해보세요! <iframe src="https://runwayml-stable-diffusion-inpainting.hf.space" frameborder="0" width="850" height="500" ></iframe>
diffusers/docs/source/ko/using-diffusers/inpaint.md/0
{ "file_path": "diffusers/docs/source/ko/using-diffusers/inpaint.md", "repo_id": "diffusers", "token_count": 1656 }
102
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Instalação 🤗 Diffusers é testado no Python 3.8+, PyTorch 1.7.0+, e Flax. Siga as instruções de instalação abaixo para a biblioteca de deep learning que você está utilizando: - [PyTorch](https://pytorch.org/get-started/locally/) instruções de instalação - [Flax](https://flax.readthedocs.io/en/latest/) instruções de instalação ## Instalação com pip Recomenda-se instalar 🤗 Diffusers em um [ambiente virtual](https://docs.python.org/3/library/venv.html). Se você não está familiarizado com ambiente virtuals, veja o [guia](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Um ambiente virtual deixa mais fácil gerenciar diferentes projetos e evitar problemas de compatibilidade entre dependências. Comece criando um ambiente virtual no diretório do projeto: ```bash python -m venv .env ``` Ative o ambiente virtual: ```bash source .env/bin/activate ``` Recomenda-se a instalação do 🤗 Transformers porque 🤗 Diffusers depende de seus modelos: <frameworkcontent> <pt> ```bash pip install diffusers["torch"] transformers ``` </pt> <jax> ```bash pip install diffusers["flax"] transformers ``` </jax> </frameworkcontent> ## Instalação a partir do código fonte Antes da instalação do 🤗 Diffusers a partir do código fonte, certifique-se de ter o PyTorch e o 🤗 Accelerate instalados. Para instalar o 🤗 Accelerate: ```bash pip install accelerate ``` então instale o 🤗 Diffusers do código fonte: ```bash pip install git+https://github.com/huggingface/diffusers ``` Esse comando instala a última versão em desenvolvimento `main` em vez da última versão estável `stable`. A versão `main` é útil para se manter atualizado com os últimos desenvolvimentos. Por exemplo, se um bug foi corrigido desde o último lançamento estável, mas um novo lançamento ainda não foi lançado. No entanto, isso significa que a versão `main` pode não ser sempre estável. Nós nos esforçamos para manter a versão `main` operacional, e a maioria dos problemas geralmente são resolvidos em algumas horas ou um dia. Se você encontrar um problema, por favor abra uma [Issue](https://github.com/huggingface/diffusers/issues/new/choose), assim conseguimos arrumar o quanto antes! ## Instalação editável Você precisará de uma instalação editável se você: - Usar a versão `main` do código fonte. - Contribuir para o 🤗 Diffusers e precisa testar mudanças no código. Clone o repositório e instale o 🤗 Diffusers com os seguintes comandos: ```bash git clone https://github.com/huggingface/diffusers.git cd diffusers ``` <frameworkcontent> <pt> ```bash pip install -e ".[torch]" ``` </pt> <jax> ```bash pip install -e ".[flax]" ``` </jax> </frameworkcontent> Esses comandos irá linkar a pasta que você clonou o repositório e os caminhos das suas bibliotecas Python. Python então irá procurar dentro da pasta que você clonou além dos caminhos normais das bibliotecas. Por exemplo, se o pacote python for tipicamente instalado no `~/anaconda3/envs/main/lib/python3.8/site-packages/`, o Python também irá procurar na pasta `~/diffusers/` que você clonou. <Tip warning={true}> Você deve deixar a pasta `diffusers` se você quiser continuar usando a biblioteca. </Tip> Agora você pode facilmente atualizar seu clone para a última versão do 🤗 Diffusers com o seguinte comando: ```bash cd ~/diffusers/ git pull ``` Seu ambiente Python vai encontrar a versão `main` do 🤗 Diffusers na próxima execução. ## Cache Os pesos e os arquivos dos modelos são baixados do Hub para o cache que geralmente é o seu diretório home. Você pode mudar a localização do cache especificando as variáveis de ambiente `HF_HOME` ou `HUGGINFACE_HUB_CACHE` ou configurando o parâmetro `cache_dir` em métodos como [`~DiffusionPipeline.from_pretrained`]. Aquivos em cache permitem que você rode 🤗 Diffusers offline. Para prevenir que o 🤗 Diffusers se conecte à internet, defina a variável de ambiente `HF_HUB_OFFLINE` para `True` e o 🤗 Diffusers irá apenas carregar arquivos previamente baixados em cache. ```shell export HF_HUB_OFFLINE=True ``` Para mais detalhes de como gerenciar e limpar o cache, olhe o guia de [caching](https://huggingface.co/docs/huggingface_hub/guides/manage-cache). ## Telemetria Nossa biblioteca coleta informações de telemetria durante as requisições [`~DiffusionPipeline.from_pretrained`]. O dado coletado inclui a versão do 🤗 Diffusers e PyTorch/Flax, o modelo ou classe de pipeline requisitado, e o caminho para um checkpoint pré-treinado se ele estiver hospedado no Hugging Face Hub. Esse dado de uso nos ajuda a debugar problemas e priorizar novas funcionalidades. Telemetria é enviada apenas quando é carregado modelos e pipelines do Hub, e não é coletado se você estiver carregando arquivos locais. Nos entendemos que nem todo mundo quer compartilhar informações adicionais, e nós respeitamos sua privacidade. Você pode desabilitar a coleta de telemetria definindo a variável de ambiente `DISABLE_TELEMETRY` do seu terminal: No Linux/MacOS: ```bash export DISABLE_TELEMETRY=YES ``` No Windows: ```bash set DISABLE_TELEMETRY=YES ```
diffusers/docs/source/pt/installation.md/0
{ "file_path": "diffusers/docs/source/pt/installation.md", "repo_id": "diffusers", "token_count": 2101 }
103
import glob import os from typing import Dict, List, Union import safetensors.torch import torch from huggingface_hub import snapshot_download from huggingface_hub.utils import validate_hf_hub_args from diffusers import DiffusionPipeline, __version__ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME class CheckpointMergerPipeline(DiffusionPipeline): """ A class that supports merging diffusion models based on the discussion here: https://github.com/huggingface/diffusers/issues/877 Example usage:- pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py") merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True) merged_pipe.to('cuda') prompt = "An astronaut riding a unicycle on Mars" results = merged_pipe(prompt) ## For more details, see the docstring for the merge method. """ def __init__(self): self.register_to_config() super().__init__() def _compare_model_configs(self, dict0, dict1): if dict0 == dict1: return True else: config0, meta_keys0 = self._remove_meta_keys(dict0) config1, meta_keys1 = self._remove_meta_keys(dict1) if config0 == config1: print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.") return True return False def _remove_meta_keys(self, config_dict: Dict): meta_keys = [] temp_dict = config_dict.copy() for key in config_dict.keys(): if key.startswith("_"): temp_dict.pop(key) meta_keys.append(key) return (temp_dict, meta_keys) @torch.no_grad() @validate_hf_hub_args def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs): """ Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed in the argument 'pretrained_model_name_or_path_list' as a list. Parameters: ----------- pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format. **kwargs: Supports all the default DiffusionPipeline.get_config_dict kwargs viz.. cache_dir, resume_download, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map. alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2 interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None. Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported. force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False. variant - which variant of a pretrained model to load, e.g. "fp16" (None) """ # Default kwargs from DiffusionPipeline cache_dir = kwargs.pop("cache_dir", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) token = kwargs.pop("token", None) variant = kwargs.pop("variant", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) device_map = kwargs.pop("device_map", None) alpha = kwargs.pop("alpha", 0.5) interp = kwargs.pop("interp", None) print("Received list", pretrained_model_name_or_path_list) print(f"Combining with alpha={alpha}, interpolation mode={interp}") checkpoint_count = len(pretrained_model_name_or_path_list) # Ignore result from model_index_json comparision of the two checkpoints force = kwargs.pop("force", False) # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now. if checkpoint_count > 3 or checkpoint_count < 2: raise ValueError( "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being" " passed." ) print("Received the right number of checkpoints") # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2] # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None # Validate that the checkpoints can be merged # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_' config_dicts = [] for pretrained_model_name_or_path in pretrained_model_name_or_path_list: config_dict = DiffusionPipeline.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, force_download=force_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, ) config_dicts.append(config_dict) comparison_result = True for idx in range(1, len(config_dicts)): comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx]) if not force and comparison_result is False: raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.") print(config_dicts[0], config_dicts[1]) print("Compatible model_index.json files found") # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files. cached_folders = [] for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts): folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names] allow_patterns += [ WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, ONNX_WEIGHTS_NAME, DiffusionPipeline.config_name, ] requested_pipeline_class = config_dict.get("_class_name") user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class} cached_folder = ( pretrained_model_name_or_path if os.path.isdir(pretrained_model_name_or_path) else snapshot_download( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, token=token, revision=revision, allow_patterns=allow_patterns, user_agent=user_agent, ) ) print("Cached Folder", cached_folder) cached_folders.append(cached_folder) # Step 3:- # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place final_pipe = DiffusionPipeline.from_pretrained( cached_folders[0], torch_dtype=torch_dtype, device_map=device_map, variant=variant, ) final_pipe.to(self.device) checkpoint_path_2 = None if len(cached_folders) > 2: checkpoint_path_2 = os.path.join(cached_folders[2]) if interp == "sigmoid": theta_func = CheckpointMergerPipeline.sigmoid elif interp == "inv_sigmoid": theta_func = CheckpointMergerPipeline.inv_sigmoid elif interp == "add_diff": theta_func = CheckpointMergerPipeline.add_difference else: theta_func = CheckpointMergerPipeline.weighted_sum # Find each module's state dict. for attr in final_pipe.config.keys(): if not attr.startswith("_"): checkpoint_path_1 = os.path.join(cached_folders[1], attr) if os.path.exists(checkpoint_path_1): files = [ *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")), *glob.glob(os.path.join(checkpoint_path_1, "*.bin")), ] checkpoint_path_1 = files[0] if len(files) > 0 else None if len(cached_folders) < 3: checkpoint_path_2 = None else: checkpoint_path_2 = os.path.join(cached_folders[2], attr) if os.path.exists(checkpoint_path_2): files = [ *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")), *glob.glob(os.path.join(checkpoint_path_2, "*.bin")), ] checkpoint_path_2 = files[0] if len(files) > 0 else None # For an attr if both checkpoint_path_1 and 2 are None, ignore. # If atleast one is present, deal with it according to interp method, of course only if the state_dict keys match. if checkpoint_path_1 is None and checkpoint_path_2 is None: print(f"Skipping {attr}: not present in 2nd or 3d model") continue try: module = getattr(final_pipe, attr) if isinstance(module, bool): # ignore requires_safety_checker boolean continue theta_0 = getattr(module, "state_dict") theta_0 = theta_0() update_theta_0 = getattr(module, "load_state_dict") theta_1 = ( safetensors.torch.load_file(checkpoint_path_1) if (checkpoint_path_1.endswith(".safetensors")) else torch.load(checkpoint_path_1, map_location="cpu") ) theta_2 = None if checkpoint_path_2: theta_2 = ( safetensors.torch.load_file(checkpoint_path_2) if (checkpoint_path_2.endswith(".safetensors")) else torch.load(checkpoint_path_2, map_location="cpu") ) if not theta_0.keys() == theta_1.keys(): print(f"Skipping {attr}: key mismatch") continue if theta_2 and not theta_1.keys() == theta_2.keys(): print(f"Skipping {attr}:y mismatch") except Exception as e: print(f"Skipping {attr} do to an unexpected error: {str(e)}") continue print(f"MERGING {attr}") for key in theta_0.keys(): if theta_2: theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha) else: theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha) del theta_1 del theta_2 update_theta_0(theta_0) del theta_0 return final_pipe @staticmethod def weighted_sum(theta0, theta1, theta2, alpha): return ((1 - alpha) * theta0) + (alpha * theta1) # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep) @staticmethod def sigmoid(theta0, theta1, theta2, alpha): alpha = alpha * alpha * (3 - (2 * alpha)) return theta0 + ((theta1 - theta0) * alpha) # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep) @staticmethod def inv_sigmoid(theta0, theta1, theta2, alpha): import math alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0) return theta0 + ((theta1 - theta0) * alpha) @staticmethod def add_difference(theta0, theta1, theta2, alpha): return theta0 + (theta1 - theta2) * (1.0 - alpha)
diffusers/examples/community/checkpoint_merger.py/0
{ "file_path": "diffusers/examples/community/checkpoint_merger.py", "repo_id": "diffusers", "token_count": 6172 }
104
import inspect from typing import Any, Callable, Dict, List, Optional, Union import numpy as np import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models import AutoencoderKL, UNet2DConditionModel from diffusers.models.lora import adjust_lora_scale_text_encoder from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker from diffusers.schedulers import LCMScheduler from diffusers.utils import ( USE_PEFT_BACKEND, deprecate, logging, replace_example_docstring, scale_lora_layers, unscale_lora_layers, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> import torch >>> import numpy as np >>> from diffusers import DiffusionPipeline >>> pipe = DiffusionPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", custom_pipeline="latent_consistency_interpolate") >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. >>> pipe.to(torch_device="cuda", torch_dtype=torch.float32) >>> prompts = ["A cat", "A dog", "A horse"] >>> num_inference_steps = 4 >>> num_interpolation_steps = 24 >>> seed = 1337 >>> torch.manual_seed(seed) >>> np.random.seed(seed) >>> images = pipe( prompt=prompts, height=512, width=512, num_inference_steps=num_inference_steps, num_interpolation_steps=num_interpolation_steps, guidance_scale=8.0, embedding_interpolation_type="lerp", latent_interpolation_type="slerp", process_batch_size=4, # Make it higher or lower based on your GPU memory generator=torch.Generator(seed), ) >>> # Save the images as a video >>> import imageio >>> from PIL import Image >>> def pil_to_video(images: List[Image.Image], filename: str, fps: int = 60) -> None: frames = [np.array(image) for image in images] with imageio.get_writer(filename, fps=fps) as video_writer: for frame in frames: video_writer.append_data(frame) >>> pil_to_video(images, "lcm_interpolate.mp4", fps=24) ``` """ def lerp( v0: Union[torch.Tensor, np.ndarray], v1: Union[torch.Tensor, np.ndarray], t: Union[float, torch.Tensor, np.ndarray], ) -> Union[torch.Tensor, np.ndarray]: """ Linearly interpolate between two vectors/tensors. Args: v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor. v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor. t: (`float`, `torch.Tensor`, or `np.ndarray`): Interpolation factor. If float, must be between 0 and 1. If np.ndarray or torch.Tensor, must be one dimensional with values between 0 and 1. Returns: Union[torch.Tensor, np.ndarray] Interpolated vector/tensor between v0 and v1. """ inputs_are_torch = False t_is_float = False if isinstance(v0, torch.Tensor): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): inputs_are_torch = True input_device = t.device t = t.cpu().numpy() elif isinstance(t, float): t_is_float = True t = np.array([t]) t = t[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = (1 - t) * v0 + t * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 def slerp( v0: Union[torch.Tensor, np.ndarray], v1: Union[torch.Tensor, np.ndarray], t: Union[float, torch.Tensor, np.ndarray], DOT_THRESHOLD=0.9995, ) -> Union[torch.Tensor, np.ndarray]: """ Spherical linear interpolation between two vectors/tensors. Args: v0 (`torch.Tensor` or `np.ndarray`): First vector/tensor. v1 (`torch.Tensor` or `np.ndarray`): Second vector/tensor. t: (`float`, `torch.Tensor`, or `np.ndarray`): Interpolation factor. If float, must be between 0 and 1. If np.ndarray or torch.Tensor, must be one dimensional with values between 0 and 1. DOT_THRESHOLD (`float`, *optional*, default=0.9995): Threshold for when to use linear interpolation instead of spherical interpolation. Returns: `torch.Tensor` or `np.ndarray`: Interpolated vector/tensor between v0 and v1. """ inputs_are_torch = False t_is_float = False if isinstance(v0, torch.Tensor): inputs_are_torch = True input_device = v0.device v0 = v0.cpu().numpy() v1 = v1.cpu().numpy() if isinstance(t, torch.Tensor): inputs_are_torch = True input_device = t.device t = t.cpu().numpy() elif isinstance(t, float): t_is_float = True t = np.array([t], dtype=v0.dtype) dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1))) if np.abs(dot) > DOT_THRESHOLD: # v1 and v2 are close to parallel # Use linear interpolation instead v2 = lerp(v0, v1, t) else: theta_0 = np.arccos(dot) sin_theta_0 = np.sin(theta_0) theta_t = theta_0 * t sin_theta_t = np.sin(theta_t) s0 = np.sin(theta_0 - theta_t) / sin_theta_0 s1 = sin_theta_t / sin_theta_0 s0 = s0[..., None] s1 = s1[..., None] v0 = v0[None, ...] v1 = v1[None, ...] v2 = s0 * v0 + s1 * v1 if t_is_float and v0.ndim > 1: assert v2.shape[0] == 1 v2 = np.squeeze(v2, axis=0) if inputs_are_torch: v2 = torch.from_numpy(v2).to(input_device) return v2 class LatentConsistencyModelWalkPipeline( DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin ): r""" Pipeline for text-to-image generation using a latent consistency model. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). The pipeline also inherits the following loading methods: - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Currently only supports [`LCMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`~transformers.CLIPImageProcessor`]): A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`. requires_safety_checker (`bool`, *optional*, defaults to `True`): Whether the pipeline requires a safety checker component. """ model_cpu_offload_seq = "text_encoder->unet->vae" _optional_components = ["safety_checker", "feature_extractor"] _exclude_from_cpu_offload = ["safety_checker"] _callback_tensor_inputs = ["latents", "denoised", "prompt_embeds", "w_embedding"] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: LCMScheduler, safety_checker: StableDiffusionSafetyChecker, feature_extractor: CLIPImageProcessor, requires_safety_checker: bool = True, ): super().__init__() if safety_checker is None and requires_safety_checker: logger.warning( f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure" " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered" " results in services or applications open to the public. Both the diffusers team and Hugging Face" " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling" " it only for use-cases that involve analyzing network behavior or auditing its results. For more" " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." ) if safety_checker is not None and feature_extractor is None: raise ValueError( "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety" " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead." ) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) self.register_to_config(requires_safety_checker=requires_safety_checker) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt def encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, clip_skip: Optional[int] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale # dynamically adjust the LoRA scale if not USE_PEFT_BACKEND: adjust_lora_scale_text_encoder(self.text_encoder, lora_scale) else: scale_lora_layers(self.text_encoder, lora_scale) if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None if clip_skip is None: prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask) prompt_embeds = prompt_embeds[0] else: prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True ) # Access the `hidden_states` first, that contains a tuple of # all the hidden states from the encoder layers. Then index into # the tuple to access the hidden states from the desired layer. prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)] # We also need to apply the final LayerNorm here to not mess with the # representations. The `last_hidden_states` that we typically use for # obtaining the final prompt representations passes through the LayerNorm # layer. prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds) if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND: # Retrieve the original scale by scaling back the LoRA layers unscale_lora_layers(self.text_encoder, lora_scale) return prompt_embeds, negative_prompt_embeds # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker def run_safety_checker(self, image, device, dtype): if self.safety_checker is None: has_nsfw_concept = None else: if torch.is_tensor(image): feature_extractor_input = self.image_processor.postprocess(image, output_type="pil") else: feature_extractor_input = self.image_processor.numpy_to_pil(image) safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device) image, has_nsfw_concept = self.safety_checker( images=image, clip_input=safety_checker_input.pixel_values.to(dtype) ) return image, has_nsfw_concept # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32): """ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 Args: timesteps (`torch.Tensor`): generate embedding vectors at these timesteps embedding_dim (`int`, *optional*, defaults to 512): dimension of the embeddings to generate dtype: data type of the generated embeddings Returns: `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)` """ assert len(w.shape) == 1 w = w * 1000.0 half_dim = embedding_dim // 2 emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) emb = w.to(dtype)[:, None] * emb[None, :] emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) if embedding_dim % 2 == 1: # zero pad emb = torch.nn.functional.pad(emb, (0, 1)) assert emb.shape == (w.shape[0], embedding_dim) return emb # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs # Currently StableDiffusionPipeline.check_inputs with negative prompt stuff removed def check_inputs( self, prompt: Union[str, List[str]], height: int, width: int, callback_steps: int, prompt_embeds: Optional[torch.FloatTensor] = None, callback_on_step_end_tensor_inputs=None, ): if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) if callback_on_step_end_tensor_inputs is not None and not all( k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs ): raise ValueError( f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" ) if prompt is not None and prompt_embeds is not None: raise ValueError( f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" " only forward one of the two." ) elif prompt is None and prompt_embeds is None: raise ValueError( "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." ) elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") @torch.no_grad() def interpolate_embedding( self, start_embedding: torch.FloatTensor, end_embedding: torch.FloatTensor, num_interpolation_steps: Union[int, List[int]], interpolation_type: str, ) -> torch.FloatTensor: if interpolation_type == "lerp": interpolation_fn = lerp elif interpolation_type == "slerp": interpolation_fn = slerp else: raise ValueError( f"embedding_interpolation_type must be one of ['lerp', 'slerp'], got {interpolation_type}." ) embedding = torch.cat([start_embedding, end_embedding]) steps = torch.linspace(0, 1, num_interpolation_steps, dtype=embedding.dtype).cpu().numpy() steps = np.expand_dims(steps, axis=tuple(range(1, embedding.ndim))) interpolations = [] # Interpolate between text embeddings # TODO(aryan): Think of a better way of doing this # See if it can be done parallelly instead for i in range(embedding.shape[0] - 1): interpolations.append(interpolation_fn(embedding[i], embedding[i + 1], steps).squeeze(dim=1)) interpolations = torch.cat(interpolations) return interpolations @torch.no_grad() def interpolate_latent( self, start_latent: torch.FloatTensor, end_latent: torch.FloatTensor, num_interpolation_steps: Union[int, List[int]], interpolation_type: str, ) -> torch.FloatTensor: if interpolation_type == "lerp": interpolation_fn = lerp elif interpolation_type == "slerp": interpolation_fn = slerp latent = torch.cat([start_latent, end_latent]) steps = torch.linspace(0, 1, num_interpolation_steps, dtype=latent.dtype).cpu().numpy() steps = np.expand_dims(steps, axis=tuple(range(1, latent.ndim))) interpolations = [] # Interpolate between latents # TODO: Think of a better way of doing this # See if it can be done parallelly instead for i in range(latent.shape[0] - 1): interpolations.append(interpolation_fn(latent[i], latent[i + 1], steps).squeeze(dim=1)) return torch.cat(interpolations) @property def guidance_scale(self): return self._guidance_scale @property def cross_attention_kwargs(self): return self._cross_attention_kwargs @property def clip_skip(self): return self._clip_skip @property def num_timesteps(self): return self._num_timesteps @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Union[str, List[str]] = None, height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 4, num_interpolation_steps: int = 8, original_inference_steps: int = None, guidance_scale: float = 8.5, num_images_per_prompt: Optional[int] = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, prompt_embeds: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "pil", return_dict: bool = True, cross_attention_kwargs: Optional[Dict[str, Any]] = None, clip_skip: Optional[int] = None, callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, callback_on_step_end_tensor_inputs: List[str] = ["latents"], embedding_interpolation_type: str = "lerp", latent_interpolation_type: str = "slerp", process_batch_size: int = 4, **kwargs, ): r""" The call function to the pipeline for generation. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`. height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The height in pixels of the generated image. width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`): The width in pixels of the generated image. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. original_inference_steps (`int`, *optional*): The original number of inference steps use to generate a linearly-spaced timestep schedule, from which we will draw `num_inference_steps` evenly spaced timesteps from as our final timestep schedule, following the Skipping-Step method in the paper (see Section 4.3). If not set this will default to the scheduler's `original_inference_steps` attribute. guidance_scale (`float`, *optional*, defaults to 7.5): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. Note that the original latent consistency models paper uses a different CFG formulation where the guidance scales are decreased by 1 (so in the paper formulation CFG is enabled when `guidance_scale > 0`). num_images_per_prompt (`int`, *optional*, defaults to 1): The number of images to generate per prompt. generator (`torch.Generator` or `List[torch.Generator]`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. latents (`torch.FloatTensor`, *optional*): Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image generation. Can be used to tweak the same generation with different prompts. If not provided, a latents tensor is generated by sampling using the supplied random `generator`. prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not provided, text embeddings are generated from the `prompt` input argument. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. cross_attention_kwargs (`dict`, *optional*): A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). clip_skip (`int`, *optional*): Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that the output of the pre-final layer will be used for computing the prompt embeddings. callback_on_step_end (`Callable`, *optional*): A function that calls at the end of each denoising steps during the inference. The function is called with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by `callback_on_step_end_tensor_inputs`. callback_on_step_end_tensor_inputs (`List`, *optional*): The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the `._callback_tensor_inputs` attribute of your pipeine class. embedding_interpolation_type (`str`, *optional*, defaults to `"lerp"`): The type of interpolation to use for interpolating between text embeddings. Choose between `"lerp"` and `"slerp"`. latent_interpolation_type (`str`, *optional*, defaults to `"slerp"`): The type of interpolation to use for interpolating between latents. Choose between `"lerp"` and `"slerp"`. process_batch_size (`int`, *optional*, defaults to 4): The batch size to use for processing the images. This is useful when generating a large number of images and you want to avoid running out of memory. Examples: Returns: [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ callback = kwargs.pop("callback", None) callback_steps = kwargs.pop("callback_steps", None) if callback is not None: deprecate( "callback", "1.0.0", "Passing `callback` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) if callback_steps is not None: deprecate( "callback_steps", "1.0.0", "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider use `callback_on_step_end`", ) # 0. Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # 1. Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps, prompt_embeds, callback_on_step_end_tensor_inputs) self._guidance_scale = guidance_scale self._clip_skip = clip_skip self._cross_attention_kwargs = cross_attention_kwargs # 2. Define call parameters if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if batch_size < 2: raise ValueError(f"`prompt` must have length of atleast 2 but found {batch_size}") if num_images_per_prompt != 1: raise ValueError("`num_images_per_prompt` must be `1` as no other value is supported yet") if prompt_embeds is not None: raise ValueError("`prompt_embeds` must be None since it is not supported yet") if latents is not None: raise ValueError("`latents` must be None since it is not supported yet") device = self._execution_device # do_classifier_free_guidance = guidance_scale > 1.0 lora_scale = ( self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None ) self.scheduler.set_timesteps(num_inference_steps, device, original_inference_steps=original_inference_steps) timesteps = self.scheduler.timesteps num_channels_latents = self.unet.config.in_channels # bs = batch_size * num_images_per_prompt # 3. Encode initial input prompt prompt_embeds_1, _ = self.encode_prompt( prompt[:1], device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 4. Prepare initial latent variables latents_1 = self.prepare_latents( 1, num_channels_latents, height, width, prompt_embeds_1.dtype, device, generator, latents, ) extra_step_kwargs = self.prepare_extra_step_kwargs(generator, None) num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order self._num_timesteps = len(timesteps) images = [] # 5. Iterate over prompts and perform latent walk. Note that we do this two prompts at a time # otherwise the memory usage ends up being too high. with self.progress_bar(total=batch_size - 1) as prompt_progress_bar: for i in range(1, batch_size): # 6. Encode current prompt prompt_embeds_2, _ = self.encode_prompt( prompt[i : i + 1], device, num_images_per_prompt=num_images_per_prompt, do_classifier_free_guidance=False, negative_prompt=None, prompt_embeds=prompt_embeds, negative_prompt_embeds=None, lora_scale=lora_scale, clip_skip=self.clip_skip, ) # 7. Prepare current latent variables latents_2 = self.prepare_latents( 1, num_channels_latents, height, width, prompt_embeds_2.dtype, device, generator, latents, ) # 8. Interpolate between previous and current prompt embeddings and latents inference_embeddings = self.interpolate_embedding( start_embedding=prompt_embeds_1, end_embedding=prompt_embeds_2, num_interpolation_steps=num_interpolation_steps, interpolation_type=embedding_interpolation_type, ) inference_latents = self.interpolate_latent( start_latent=latents_1, end_latent=latents_2, num_interpolation_steps=num_interpolation_steps, interpolation_type=latent_interpolation_type, ) next_prompt_embeds = inference_embeddings[-1:].detach().clone() next_latents = inference_latents[-1:].detach().clone() bs = num_interpolation_steps # 9. Perform inference in batches. Note the use of `process_batch_size` to control the batch size # of the inference. This is useful for reducing memory usage and can be configured based on the # available GPU memory. with self.progress_bar( total=(bs + process_batch_size - 1) // process_batch_size ) as batch_progress_bar: for batch_index in range(0, bs, process_batch_size): batch_inference_latents = inference_latents[batch_index : batch_index + process_batch_size] batch_inference_embedddings = inference_embeddings[ batch_index : batch_index + process_batch_size ] self.scheduler.set_timesteps( num_inference_steps, device, original_inference_steps=original_inference_steps ) timesteps = self.scheduler.timesteps current_bs = batch_inference_embedddings.shape[0] w = torch.tensor(self.guidance_scale - 1).repeat(current_bs) w_embedding = self.get_guidance_scale_embedding( w, embedding_dim=self.unet.config.time_cond_proj_dim ).to(device=device, dtype=latents_1.dtype) # 10. Perform inference for current batch with self.progress_bar(total=num_inference_steps) as progress_bar: for index, t in enumerate(timesteps): batch_inference_latents = batch_inference_latents.to(batch_inference_embedddings.dtype) # model prediction (v-prediction, eps, x) model_pred = self.unet( batch_inference_latents, t, timestep_cond=w_embedding, encoder_hidden_states=batch_inference_embedddings, cross_attention_kwargs=self.cross_attention_kwargs, return_dict=False, )[0] # compute the previous noisy sample x_t -> x_t-1 batch_inference_latents, denoised = self.scheduler.step( model_pred, t, batch_inference_latents, **extra_step_kwargs, return_dict=False ) if callback_on_step_end is not None: callback_kwargs = {} for k in callback_on_step_end_tensor_inputs: callback_kwargs[k] = locals()[k] callback_outputs = callback_on_step_end(self, index, t, callback_kwargs) batch_inference_latents = callback_outputs.pop("latents", batch_inference_latents) batch_inference_embedddings = callback_outputs.pop( "prompt_embeds", batch_inference_embedddings ) w_embedding = callback_outputs.pop("w_embedding", w_embedding) denoised = callback_outputs.pop("denoised", denoised) # call the callback, if provided if index == len(timesteps) - 1 or ( (index + 1) > num_warmup_steps and (index + 1) % self.scheduler.order == 0 ): progress_bar.update() if callback is not None and index % callback_steps == 0: step_idx = index // getattr(self.scheduler, "order", 1) callback(step_idx, t, batch_inference_latents) denoised = denoised.to(batch_inference_embedddings.dtype) # Note: This is not supported because you would get black images in your latent walk if # NSFW concept is detected # if not output_type == "latent": # image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] # image, has_nsfw_concept = self.run_safety_checker(image, device, inference_embeddings.dtype) # else: # image = denoised # has_nsfw_concept = None # if has_nsfw_concept is None: # do_denormalize = [True] * image.shape[0] # else: # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0] do_denormalize = [True] * image.shape[0] has_nsfw_concept = None image = self.image_processor.postprocess( image, output_type=output_type, do_denormalize=do_denormalize ) images.append(image) batch_progress_bar.update() prompt_embeds_1 = next_prompt_embeds latents_1 = next_latents prompt_progress_bar.update() # 11. Determine what should be returned if output_type == "pil": images = [image for image_list in images for image in image_list] elif output_type == "np": images = np.concatenate(images) elif output_type == "pt": images = torch.cat(images) else: raise ValueError("`output_type` must be one of 'pil', 'np' or 'pt'.") # Offload all models self.maybe_free_model_hooks() if not return_dict: return (images, has_nsfw_concept) return StableDiffusionPipelineOutput(images=images, nsfw_content_detected=has_nsfw_concept)
diffusers/examples/community/latent_consistency_interpolate.py/0
{ "file_path": "diffusers/examples/community/latent_consistency_interpolate.py", "repo_id": "diffusers", "token_count": 21992 }
105
# Copyright 2024 FABRIC authors and the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Union import torch from packaging import version from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, UNet2DConditionModel from diffusers.configuration_utils import FrozenDict from diffusers.image_processor import VaeImageProcessor from diffusers.loaders import LoraLoaderMixin, TextualInversionLoaderMixin from diffusers.models.attention import BasicTransformerBlock from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusionSchedulers from diffusers.utils import ( deprecate, logging, replace_example_docstring, ) from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name EXAMPLE_DOC_STRING = """ Examples: ```py >>> from diffusers import DiffusionPipeline >>> import torch >>> model_id = "dreamlike-art/dreamlike-photoreal-2.0" >>> pipe = DiffusionPipeline(model_id, torch_dtype=torch.float16, custom_pipeline="pipeline_fabric") >>> pipe = pipe.to("cuda") >>> prompt = "a giant standing in a fantasy landscape best quality" >>> liked = [] # list of images for positive feedback >>> disliked = [] # list of images for negative feedback >>> image = pipe(prompt, num_images=4, liked=liked, disliked=disliked).images[0] ``` """ class FabricCrossAttnProcessor: def __init__(self): self.attntion_probs = None def __call__( self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, weights=None, lora_scale=1.0, ): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) if isinstance(attn.processor, LoRAAttnProcessor): query = attn.to_q(hidden_states) + lora_scale * attn.processor.to_q_lora(hidden_states) else: query = attn.to_q(hidden_states) if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) if isinstance(attn.processor, LoRAAttnProcessor): key = attn.to_k(encoder_hidden_states) + lora_scale * attn.processor.to_k_lora(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) + lora_scale * attn.processor.to_v_lora(encoder_hidden_states) else: key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) if weights is not None: if weights.shape[0] != 1: weights = weights.repeat_interleave(attn.heads, dim=0) attention_probs = attention_probs * weights[:, None] attention_probs = attention_probs / attention_probs.sum(dim=-1, keepdim=True) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj if isinstance(attn.processor, LoRAAttnProcessor): hidden_states = attn.to_out[0](hidden_states) + lora_scale * attn.processor.to_out_lora(hidden_states) else: hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class FabricPipeline(DiffusionPipeline): r""" Pipeline for text-to-image generation using Stable Diffusion and conditioning the results using feedback images. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`~transformers.CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer ([`~transformers.CLIPTokenizer`]): A `CLIPTokenizer` to tokenize text. unet ([`UNet2DConditionModel`]): A `UNet2DConditionModel` to denoise the encoded image latents. scheduler ([`EulerAncestralDiscreteScheduler`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: KarrasDiffusionSchedulers, requires_safety_checker: bool = True, ): super().__init__() is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( unet=unet, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt=None, prompt_embeds: Optional[torch.FloatTensor] = None, negative_prompt_embeds: Optional[torch.FloatTensor] = None, lora_scale: Optional[float] = None, ): r""" Encodes the prompt into text encoder hidden states. Args: prompt (`str` or `List[str]`, *optional*): prompt to be encoded device: (`torch.device`): torch device num_images_per_prompt (`int`): number of images that should be generated per prompt do_classifier_free_guidance (`bool`): whether to use classifier free guidance or not negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, text embeddings will be generated from `prompt` input argument. negative_prompt_embeds (`torch.FloatTensor`, *optional*): Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. lora_scale (`float`, *optional*): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. """ # set lora scale so that monkey patched LoRA # function of text encoder can correctly access it if lora_scale is not None and isinstance(self, LoraLoaderMixin): self._lora_scale = lora_scale if prompt is not None and isinstance(prompt, str): batch_size = 1 elif prompt is not None and isinstance(prompt, list): batch_size = len(prompt) else: batch_size = prompt_embeds.shape[0] if prompt_embeds is None: # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): prompt = self.maybe_convert_prompt(prompt, self.tokenizer) text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal( text_input_ids, untruncated_ids ): removed_text = self.tokenizer.batch_decode( untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] if self.text_encoder is not None: prompt_embeds_dtype = self.text_encoder.dtype elif self.unet is not None: prompt_embeds_dtype = self.unet.dtype else: prompt_embeds_dtype = prompt_embeds.dtype prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) bs_embed, seq_len, _ = prompt_embeds.shape # duplicate text embeddings for each generation per prompt, using mps friendly method prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance and negative_prompt_embeds is None: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif prompt is not None and type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt # textual inversion: process multi-vector tokens if necessary if isinstance(self, TextualInversionLoaderMixin): uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer) max_length = prompt_embeds.shape[1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None negative_prompt_embeds = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) negative_prompt_embeds = negative_prompt_embeds[0] if do_classifier_free_guidance: # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = negative_prompt_embeds.shape[1] negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) return prompt_embeds def get_unet_hidden_states(self, z_all, t, prompt_embd): cached_hidden_states = [] for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): def new_forward(self, hidden_states, *args, **kwargs): cached_hidden_states.append(hidden_states.clone().detach().cpu()) return self.old_forward(hidden_states, *args, **kwargs) module.attn1.old_forward = module.attn1.forward module.attn1.forward = new_forward.__get__(module.attn1) # run forward pass to cache hidden states, output can be discarded _ = self.unet(z_all, t, encoder_hidden_states=prompt_embd) # restore original forward pass for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): module.attn1.forward = module.attn1.old_forward del module.attn1.old_forward return cached_hidden_states def unet_forward_with_cached_hidden_states( self, z_all, t, prompt_embd, cached_pos_hiddens: Optional[List[torch.Tensor]] = None, cached_neg_hiddens: Optional[List[torch.Tensor]] = None, pos_weights=(0.8, 0.8), neg_weights=(0.5, 0.5), ): if cached_pos_hiddens is None and cached_neg_hiddens is None: return self.unet(z_all, t, encoder_hidden_states=prompt_embd) local_pos_weights = torch.linspace(*pos_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist() local_neg_weights = torch.linspace(*neg_weights, steps=len(self.unet.down_blocks) + 1)[:-1].tolist() for block, pos_weight, neg_weight in zip( self.unet.down_blocks + [self.unet.mid_block] + self.unet.up_blocks, local_pos_weights + [pos_weights[1]] + local_pos_weights[::-1], local_neg_weights + [neg_weights[1]] + local_neg_weights[::-1], ): for module in block.modules(): if isinstance(module, BasicTransformerBlock): def new_forward( self, hidden_states, pos_weight=pos_weight, neg_weight=neg_weight, **kwargs, ): cond_hiddens, uncond_hiddens = hidden_states.chunk(2, dim=0) batch_size, d_model = cond_hiddens.shape[:2] device, dtype = hidden_states.device, hidden_states.dtype weights = torch.ones(batch_size, d_model, device=device, dtype=dtype) out_pos = self.old_forward(hidden_states) out_neg = self.old_forward(hidden_states) if cached_pos_hiddens is not None: cached_pos_hs = cached_pos_hiddens.pop(0).to(hidden_states.device) cond_pos_hs = torch.cat([cond_hiddens, cached_pos_hs], dim=1) pos_weights = weights.clone().repeat(1, 1 + cached_pos_hs.shape[1] // d_model) pos_weights[:, d_model:] = pos_weight attn_with_weights = FabricCrossAttnProcessor() out_pos = attn_with_weights( self, cond_hiddens, encoder_hidden_states=cond_pos_hs, weights=pos_weights, ) else: out_pos = self.old_forward(cond_hiddens) if cached_neg_hiddens is not None: cached_neg_hs = cached_neg_hiddens.pop(0).to(hidden_states.device) uncond_neg_hs = torch.cat([uncond_hiddens, cached_neg_hs], dim=1) neg_weights = weights.clone().repeat(1, 1 + cached_neg_hs.shape[1] // d_model) neg_weights[:, d_model:] = neg_weight attn_with_weights = FabricCrossAttnProcessor() out_neg = attn_with_weights( self, uncond_hiddens, encoder_hidden_states=uncond_neg_hs, weights=neg_weights, ) else: out_neg = self.old_forward(uncond_hiddens) out = torch.cat([out_pos, out_neg], dim=0) return out module.attn1.old_forward = module.attn1.forward module.attn1.forward = new_forward.__get__(module.attn1) out = self.unet(z_all, t, encoder_hidden_states=prompt_embd) # restore original forward pass for module in self.unet.modules(): if isinstance(module, BasicTransformerBlock): module.attn1.forward = module.attn1.old_forward del module.attn1.old_forward return out def preprocess_feedback_images(self, images, vae, dim, device, dtype, generator) -> torch.tensor: images_t = [self.image_to_tensor(img, dim, dtype) for img in images] images_t = torch.stack(images_t).to(device) latents = vae.config.scaling_factor * vae.encode(images_t).latent_dist.sample(generator) return torch.cat([latents], dim=0) def check_inputs( self, prompt, negative_prompt=None, liked=None, disliked=None, height=None, width=None, ): if prompt is None: raise ValueError("Provide `prompt`. Cannot leave both `prompt` undefined.") elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if negative_prompt is not None and ( not isinstance(negative_prompt, str) and not isinstance(negative_prompt, list) ): raise ValueError(f"`negative_prompt` has to be of type `str` or `list` but is {type(negative_prompt)}") if liked is not None and not isinstance(liked, list): raise ValueError(f"`liked` has to be of type `list` but is {type(liked)}") if disliked is not None and not isinstance(disliked, list): raise ValueError(f"`disliked` has to be of type `list` but is {type(disliked)}") if height is not None and not isinstance(height, int): raise ValueError(f"`height` has to be of type `int` but is {type(height)}") if width is not None and not isinstance(width, int): raise ValueError(f"`width` has to be of type `int` but is {type(width)}") @torch.no_grad() @replace_example_docstring(EXAMPLE_DOC_STRING) def __call__( self, prompt: Optional[Union[str, List[str]]] = "", negative_prompt: Optional[Union[str, List[str]]] = "lowres, bad anatomy, bad hands, cropped, worst quality", liked: Optional[Union[List[str], List[Image.Image]]] = [], disliked: Optional[Union[List[str], List[Image.Image]]] = [], generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, height: int = 512, width: int = 512, return_dict: bool = True, num_images: int = 4, guidance_scale: float = 7.0, num_inference_steps: int = 20, output_type: Optional[str] = "pil", feedback_start_ratio: float = 0.33, feedback_end_ratio: float = 0.66, min_weight: float = 0.05, max_weight: float = 0.8, neg_scale: float = 0.5, pos_bottleneck_scale: float = 1.0, neg_bottleneck_scale: float = 1.0, latents: Optional[torch.FloatTensor] = None, ): r""" The call function to the pipeline for generation. Generate a trajectory of images with binary feedback. The feedback can be given as a list of liked and disliked images. Args: prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds` instead. negative_prompt (`str` or `List[str]`, *optional*): The prompt or prompts to guide what to not include in image generation. If not defined, you need to pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`). liked (`List[Image.Image]` or `List[str]`, *optional*): Encourages images with liked features. disliked (`List[Image.Image]` or `List[str]`, *optional*): Discourages images with disliked features. generator (`torch.Generator` or `List[torch.Generator]` or `int`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) or an `int` to make generation deterministic. height (`int`, *optional*, defaults to 512): Height of the generated image. width (`int`, *optional*, defaults to 512): Width of the generated image. num_images (`int`, *optional*, defaults to 4): The number of images to generate per prompt. guidance_scale (`float`, *optional*, defaults to 7.0): A higher guidance scale value encourages the model to generate images closely linked to the text `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`. num_inference_steps (`int`, *optional*, defaults to 20): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generated image. Choose between `PIL.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a plain tuple. feedback_start_ratio (`float`, *optional*, defaults to `.33`): Start point for providing feedback (between 0 and 1). feedback_end_ratio (`float`, *optional*, defaults to `.66`): End point for providing feedback (between 0 and 1). min_weight (`float`, *optional*, defaults to `.05`): Minimum weight for feedback. max_weight (`float`, *optional*, defults tp `1.0`): Maximum weight for feedback. neg_scale (`float`, *optional*, defaults to `.5`): Scale factor for negative feedback. Examples: Returns: [`~pipelines.fabric.FabricPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated images and the second element is a list of `bool`s indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content. """ self.check_inputs(prompt, negative_prompt, liked, disliked) device = self._execution_device dtype = self.unet.dtype if isinstance(prompt, str) and prompt is not None: batch_size = 1 elif isinstance(prompt, list) and prompt is not None: batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if isinstance(negative_prompt, str): negative_prompt = negative_prompt elif isinstance(negative_prompt, list): negative_prompt = negative_prompt else: assert len(negative_prompt) == batch_size shape = ( batch_size * num_images, self.unet.config.in_channels, height // self.vae_scale_factor, width // self.vae_scale_factor, ) latent_noise = randn_tensor( shape, device=device, dtype=dtype, generator=generator, ) positive_latents = ( self.preprocess_feedback_images(liked, self.vae, (height, width), device, dtype, generator) if liked and len(liked) > 0 else torch.tensor( [], device=device, dtype=dtype, ) ) negative_latents = ( self.preprocess_feedback_images(disliked, self.vae, (height, width), device, dtype, generator) if disliked and len(disliked) > 0 else torch.tensor( [], device=device, dtype=dtype, ) ) do_classifier_free_guidance = guidance_scale > 0.1 (prompt_neg_embs, prompt_pos_embs) = self._encode_prompt( prompt, device, num_images, do_classifier_free_guidance, negative_prompt, ).split([num_images * batch_size, num_images * batch_size]) batched_prompt_embd = torch.cat([prompt_pos_embs, prompt_neg_embs], dim=0) null_tokens = self.tokenizer( [""], return_tensors="pt", max_length=self.tokenizer.model_max_length, padding="max_length", truncation=True, ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = null_tokens.attention_mask.to(device) else: attention_mask = None null_prompt_emb = self.text_encoder( input_ids=null_tokens.input_ids.to(device), attention_mask=attention_mask, ).last_hidden_state null_prompt_emb = null_prompt_emb.to(device=device, dtype=dtype) self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps latent_noise = latent_noise * self.scheduler.init_noise_sigma num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order ref_start_idx = round(len(timesteps) * feedback_start_ratio) ref_end_idx = round(len(timesteps) * feedback_end_ratio) with self.progress_bar(total=num_inference_steps) as pbar: for i, t in enumerate(timesteps): sigma = self.scheduler.sigma_t[t] if hasattr(self.scheduler, "sigma_t") else 0 if hasattr(self.scheduler, "sigmas"): sigma = self.scheduler.sigmas[i] alpha_hat = 1 / (sigma**2 + 1) z_single = self.scheduler.scale_model_input(latent_noise, t) z_all = torch.cat([z_single] * 2, dim=0) z_ref = torch.cat([positive_latents, negative_latents], dim=0) if i >= ref_start_idx and i <= ref_end_idx: weight_factor = max_weight else: weight_factor = min_weight pos_ws = (weight_factor, weight_factor * pos_bottleneck_scale) neg_ws = (weight_factor * neg_scale, weight_factor * neg_scale * neg_bottleneck_scale) if z_ref.size(0) > 0 and weight_factor > 0: noise = torch.randn_like(z_ref) if isinstance(self.scheduler, EulerAncestralDiscreteScheduler): z_ref_noised = (alpha_hat**0.5 * z_ref + (1 - alpha_hat) ** 0.5 * noise).type(dtype) else: z_ref_noised = self.scheduler.add_noise(z_ref, noise, t) ref_prompt_embd = torch.cat( [null_prompt_emb] * (len(positive_latents) + len(negative_latents)), dim=0 ) cached_hidden_states = self.get_unet_hidden_states(z_ref_noised, t, ref_prompt_embd) n_pos, n_neg = positive_latents.shape[0], negative_latents.shape[0] cached_pos_hs, cached_neg_hs = [], [] for hs in cached_hidden_states: cached_pos, cached_neg = hs.split([n_pos, n_neg], dim=0) cached_pos = cached_pos.view(1, -1, *cached_pos.shape[2:]).expand(num_images, -1, -1) cached_neg = cached_neg.view(1, -1, *cached_neg.shape[2:]).expand(num_images, -1, -1) cached_pos_hs.append(cached_pos) cached_neg_hs.append(cached_neg) if n_pos == 0: cached_pos_hs = None if n_neg == 0: cached_neg_hs = None else: cached_pos_hs, cached_neg_hs = None, None unet_out = self.unet_forward_with_cached_hidden_states( z_all, t, prompt_embd=batched_prompt_embd, cached_pos_hiddens=cached_pos_hs, cached_neg_hiddens=cached_neg_hs, pos_weights=pos_ws, neg_weights=neg_ws, )[0] noise_cond, noise_uncond = unet_out.chunk(2) guidance = noise_cond - noise_uncond noise_pred = noise_uncond + guidance_scale * guidance latent_noise = self.scheduler.step(noise_pred, t, latent_noise)[0] if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): pbar.update() y = self.vae.decode(latent_noise / self.vae.config.scaling_factor, return_dict=False)[0] imgs = self.image_processor.postprocess( y, output_type=output_type, ) if not return_dict: return imgs return StableDiffusionPipelineOutput(imgs, False) def image_to_tensor(self, image: Union[str, Image.Image], dim: tuple, dtype): """ Convert latent PIL image to a torch tensor for further processing. """ if isinstance(image, str): image = Image.open(image) if not image.mode == "RGB": image = image.convert("RGB") image = self.image_processor.preprocess(image, height=dim[0], width=dim[1])[0] return image.type(dtype)
diffusers/examples/community/pipeline_fabric.py/0
{ "file_path": "diffusers/examples/community/pipeline_fabric.py", "repo_id": "diffusers", "token_count": 16483 }
106
import math import tempfile from typing import List, Optional import numpy as np import PIL.Image import torch from accelerate import Accelerator from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers, LoraLoaderMixin from diffusers.models.attention_processor import ( AttnAddedKVProcessor, AttnAddedKVProcessor2_0, LoRAAttnAddedKVProcessor, LoRAAttnProcessor, LoRAAttnProcessor2_0, SlicedAttnAddedKVProcessor, ) from diffusers.optimization import get_scheduler class SdeDragPipeline(DiffusionPipeline): r""" Pipeline for image drag-and-drop editing using stochastic differential equations: https://arxiv.org/abs/2311.01410. Please refer to the [official repository](https://github.com/ML-GSAI/SDE-Drag) for more information. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder. Stable Diffusion uses the text portion of [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. tokenizer (`CLIPTokenizer`): Tokenizer of class [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Please use [`DDIMScheduler`]. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: DPMSolverMultistepScheduler, ): super().__init__() self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, prompt: str, image: PIL.Image.Image, mask_image: PIL.Image.Image, source_points: List[List[int]], target_points: List[List[int]], t0: Optional[float] = 0.6, steps: Optional[int] = 200, step_size: Optional[int] = 2, image_scale: Optional[float] = 0.3, adapt_radius: Optional[int] = 5, min_lora_scale: Optional[float] = 0.5, generator: Optional[torch.Generator] = None, ): r""" Function invoked when calling the pipeline for image editing. Args: prompt (`str`, *required*): The prompt to guide the image editing. image (`PIL.Image.Image`, *required*): Which will be edited, parts of the image will be masked out with `mask_image` and edited according to `prompt`. mask_image (`PIL.Image.Image`, *required*): To mask `image`. White pixels in the mask will be edited, while black pixels will be preserved. source_points (`List[List[int]]`, *required*): Used to mark the starting positions of drag editing in the image, with each pixel represented as a `List[int]` of length 2. target_points (`List[List[int]]`, *required*): Used to mark the target positions of drag editing in the image, with each pixel represented as a `List[int]` of length 2. t0 (`float`, *optional*, defaults to 0.6): The time parameter. Higher t0 improves the fidelity while lowering the faithfulness of the edited images and vice versa. steps (`int`, *optional*, defaults to 200): The number of sampling iterations. step_size (`int`, *optional*, defaults to 2): The drag diatance of each drag step. image_scale (`float`, *optional*, defaults to 0.3): To avoid duplicating the content, use image_scale to perturbs the source. adapt_radius (`int`, *optional*, defaults to 5): The size of the region for copy and paste operations during each step of the drag process. min_lora_scale (`float`, *optional*, defaults to 0.5): A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded. min_lora_scale specifies the minimum LoRA scale during the image drag-editing process. generator ('torch.Generator', *optional*, defaults to None): To make generation deterministic(https://pytorch.org/docs/stable/generated/torch.Generator.html). Examples: ```py >>> import PIL >>> import torch >>> from diffusers import DDIMScheduler, DiffusionPipeline >>> # Load the pipeline >>> model_path = "runwayml/stable-diffusion-v1-5" >>> scheduler = DDIMScheduler.from_pretrained(model_path, subfolder="scheduler") >>> pipe = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler, custom_pipeline="sde_drag") >>> pipe.to('cuda') >>> # To save GPU memory, torch.float16 can be used, but it may compromise image quality. >>> # If not training LoRA, please avoid using torch.float16 >>> # pipe.to(torch.float16) >>> # Provide prompt, image, mask image, and the starting and target points for drag editing. >>> prompt = "prompt of the image" >>> image = PIL.Image.open('/path/to/image') >>> mask_image = PIL.Image.open('/path/to/mask_image') >>> source_points = [[123, 456]] >>> target_points = [[234, 567]] >>> # train_lora is optional, and in most cases, using train_lora can better preserve consistency with the original image. >>> pipe.train_lora(prompt, image) >>> output = pipe(prompt, image, mask_image, source_points, target_points) >>> output_image = PIL.Image.fromarray(output) >>> output_image.save("./output.png") ``` """ self.scheduler.set_timesteps(steps) noise_scale = (1 - image_scale**2) ** (0.5) text_embeddings = self._get_text_embed(prompt) uncond_embeddings = self._get_text_embed([""]) text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) latent = self._get_img_latent(image) mask = mask_image.resize((latent.shape[3], latent.shape[2])) mask = torch.tensor(np.array(mask)) mask = mask.unsqueeze(0).expand_as(latent).to(self.device) source_points = torch.tensor(source_points).div(torch.tensor([8]), rounding_mode="trunc") target_points = torch.tensor(target_points).div(torch.tensor([8]), rounding_mode="trunc") distance = target_points - source_points distance_norm_max = torch.norm(distance.float(), dim=1, keepdim=True).max() if distance_norm_max <= step_size: drag_num = 1 else: drag_num = distance_norm_max.div(torch.tensor([step_size]), rounding_mode="trunc") if (distance_norm_max / drag_num - step_size).abs() > ( distance_norm_max / (drag_num + 1) - step_size ).abs(): drag_num += 1 latents = [] for i in tqdm(range(int(drag_num)), desc="SDE Drag"): source_new = source_points + (i / drag_num * distance).to(torch.int) target_new = source_points + ((i + 1) / drag_num * distance).to(torch.int) latent, noises, hook_latents, lora_scales, cfg_scales = self._forward( latent, steps, t0, min_lora_scale, text_embeddings, generator ) latent = self._copy_and_paste( latent, source_new, target_new, adapt_radius, latent.shape[2] - 1, latent.shape[3] - 1, image_scale, noise_scale, generator, ) latent = self._backward( latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator ) latents.append(latent) result_image = 1 / 0.18215 * latents[-1] with torch.no_grad(): result_image = self.vae.decode(result_image).sample result_image = (result_image / 2 + 0.5).clamp(0, 1) result_image = result_image.cpu().permute(0, 2, 3, 1).numpy()[0] result_image = (result_image * 255).astype(np.uint8) return result_image def train_lora(self, prompt, image, lora_step=100, lora_rank=16, generator=None): accelerator = Accelerator(gradient_accumulation_steps=1, mixed_precision="fp16") self.vae.requires_grad_(False) self.text_encoder.requires_grad_(False) self.unet.requires_grad_(False) unet_lora_attn_procs = {} for name, attn_processor in self.unet.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else self.unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = self.unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(self.unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = self.unet.config.block_out_channels[block_id] else: raise NotImplementedError("name must start with up_blocks, mid_blocks, or down_blocks") if isinstance(attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)): lora_attn_processor_class = LoRAAttnAddedKVProcessor else: lora_attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(torch.nn.functional, "scaled_dot_product_attention") else LoRAAttnProcessor ) unet_lora_attn_procs[name] = lora_attn_processor_class( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, rank=lora_rank ) self.unet.set_attn_processor(unet_lora_attn_procs) unet_lora_layers = AttnProcsLayers(self.unet.attn_processors) params_to_optimize = unet_lora_layers.parameters() optimizer = torch.optim.AdamW( params_to_optimize, lr=2e-4, betas=(0.9, 0.999), weight_decay=1e-2, eps=1e-08, ) lr_scheduler = get_scheduler( "constant", optimizer=optimizer, num_warmup_steps=0, num_training_steps=lora_step, num_cycles=1, power=1.0, ) unet_lora_layers = accelerator.prepare_model(unet_lora_layers) optimizer = accelerator.prepare_optimizer(optimizer) lr_scheduler = accelerator.prepare_scheduler(lr_scheduler) with torch.no_grad(): text_inputs = self._tokenize_prompt(prompt, tokenizer_max_length=None) text_embedding = self._encode_prompt( text_inputs.input_ids, text_inputs.attention_mask, text_encoder_use_attention_mask=False ) image_transforms = transforms.Compose( [ transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) image = image_transforms(image).to(self.device, dtype=self.vae.dtype) image = image.unsqueeze(dim=0) latents_dist = self.vae.encode(image).latent_dist for _ in tqdm(range(lora_step), desc="Train LoRA"): self.unet.train() model_input = latents_dist.sample() * self.vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn( model_input.size(), dtype=model_input.dtype, layout=model_input.layout, device=model_input.device, generator=generator, ) bsz, channels, height, width = model_input.shape # Sample a random timestep for each image timesteps = torch.randint( 0, self.scheduler.config.num_train_timesteps, (bsz,), device=model_input.device, generator=generator ) timesteps = timesteps.long() # Add noise to the model input according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_model_input = self.scheduler.add_noise(model_input, noise, timesteps) # Predict the noise residual model_pred = self.unet(noisy_model_input, timesteps, text_embedding).sample # Get the target for loss depending on the prediction type if self.scheduler.config.prediction_type == "epsilon": target = noise elif self.scheduler.config.prediction_type == "v_prediction": target = self.scheduler.get_velocity(model_input, noise, timesteps) else: raise ValueError(f"Unknown prediction type {self.scheduler.config.prediction_type}") loss = torch.nn.functional.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() with tempfile.TemporaryDirectory() as save_lora_dir: LoraLoaderMixin.save_lora_weights( save_directory=save_lora_dir, unet_lora_layers=unet_lora_layers, text_encoder_lora_layers=None, ) self.unet.load_attn_procs(save_lora_dir) def _tokenize_prompt(self, prompt, tokenizer_max_length=None): if tokenizer_max_length is not None: max_length = tokenizer_max_length else: max_length = self.tokenizer.model_max_length text_inputs = self.tokenizer( prompt, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt", ) return text_inputs def _encode_prompt(self, input_ids, attention_mask, text_encoder_use_attention_mask=False): text_input_ids = input_ids.to(self.device) if text_encoder_use_attention_mask: attention_mask = attention_mask.to(self.device) else: attention_mask = None prompt_embeds = self.text_encoder( text_input_ids, attention_mask=attention_mask, ) prompt_embeds = prompt_embeds[0] return prompt_embeds @torch.no_grad() def _get_text_embed(self, prompt): text_input = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] return text_embeddings def _copy_and_paste( self, latent, source_new, target_new, adapt_radius, max_height, max_width, image_scale, noise_scale, generator ): def adaption_r(source, target, adapt_radius, max_height, max_width): r_x_lower = min(adapt_radius, source[0], target[0]) r_x_upper = min(adapt_radius, max_width - source[0], max_width - target[0]) r_y_lower = min(adapt_radius, source[1], target[1]) r_y_upper = min(adapt_radius, max_height - source[1], max_height - target[1]) return r_x_lower, r_x_upper, r_y_lower, r_y_upper for source_, target_ in zip(source_new, target_new): r_x_lower, r_x_upper, r_y_lower, r_y_upper = adaption_r( source_, target_, adapt_radius, max_height, max_width ) source_feature = latent[ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper ].clone() latent[ :, :, source_[1] - r_y_lower : source_[1] + r_y_upper, source_[0] - r_x_lower : source_[0] + r_x_upper ] = image_scale * source_feature + noise_scale * torch.randn( latent.shape[0], 4, r_y_lower + r_y_upper, r_x_lower + r_x_upper, device=self.device, generator=generator, ) latent[ :, :, target_[1] - r_y_lower : target_[1] + r_y_upper, target_[0] - r_x_lower : target_[0] + r_x_upper ] = source_feature * 1.1 return latent @torch.no_grad() def _get_img_latent(self, image, height=None, weight=None): data = image.convert("RGB") if height is not None: data = data.resize((weight, height)) transform = transforms.ToTensor() data = transform(data).unsqueeze(0) data = (data * 2.0) - 1.0 data = data.to(self.device, dtype=self.vae.dtype) latent = self.vae.encode(data).latent_dist.sample() latent = 0.18215 * latent return latent @torch.no_grad() def _get_eps(self, latent, timestep, guidance_scale, text_embeddings, lora_scale=None): latent_model_input = torch.cat([latent] * 2) if guidance_scale > 1.0 else latent text_embeddings = text_embeddings if guidance_scale > 1.0 else text_embeddings.chunk(2)[1] cross_attention_kwargs = None if lora_scale is None else {"scale": lora_scale} with torch.no_grad(): noise_pred = self.unet( latent_model_input, timestep, encoder_hidden_states=text_embeddings, cross_attention_kwargs=cross_attention_kwargs, ).sample if guidance_scale > 1.0: noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) elif guidance_scale == 1.0: noise_pred_text = noise_pred noise_pred_uncond = 0.0 else: raise NotImplementedError(guidance_scale) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) return noise_pred def _forward_sde( self, timestep, sample, guidance_scale, text_embeddings, steps, eta=1.0, lora_scale=None, generator=None ): num_train_timesteps = len(self.scheduler) alphas_cumprod = self.scheduler.alphas_cumprod initial_alpha_cumprod = torch.tensor(1.0) prev_timestep = timestep + num_train_timesteps // steps alpha_prod_t = alphas_cumprod[timestep] if timestep >= 0 else initial_alpha_cumprod alpha_prod_t_prev = alphas_cumprod[prev_timestep] beta_prod_t_prev = 1 - alpha_prod_t_prev x_prev = (alpha_prod_t_prev / alpha_prod_t) ** (0.5) * sample + (1 - alpha_prod_t_prev / alpha_prod_t) ** ( 0.5 ) * torch.randn( sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator ) eps = self._get_eps(x_prev, prev_timestep, guidance_scale, text_embeddings, lora_scale) sigma_t_prev = ( eta * (1 - alpha_prod_t) ** (0.5) * (1 - alpha_prod_t_prev / (1 - alpha_prod_t_prev) * (1 - alpha_prod_t) / alpha_prod_t) ** (0.5) ) pred_original_sample = (x_prev - beta_prod_t_prev ** (0.5) * eps) / alpha_prod_t_prev ** (0.5) pred_sample_direction_coeff = (1 - alpha_prod_t - sigma_t_prev**2) ** (0.5) noise = ( sample - alpha_prod_t ** (0.5) * pred_original_sample - pred_sample_direction_coeff * eps ) / sigma_t_prev return x_prev, noise def _sample( self, timestep, sample, guidance_scale, text_embeddings, steps, sde=False, noise=None, eta=1.0, lora_scale=None, generator=None, ): num_train_timesteps = len(self.scheduler) alphas_cumprod = self.scheduler.alphas_cumprod final_alpha_cumprod = torch.tensor(1.0) eps = self._get_eps(sample, timestep, guidance_scale, text_embeddings, lora_scale) prev_timestep = timestep - num_train_timesteps // steps alpha_prod_t = alphas_cumprod[timestep] alpha_prod_t_prev = alphas_cumprod[prev_timestep] if prev_timestep >= 0 else final_alpha_cumprod beta_prod_t = 1 - alpha_prod_t sigma_t = ( eta * ((1 - alpha_prod_t_prev) / (1 - alpha_prod_t)) ** (0.5) * (1 - alpha_prod_t / alpha_prod_t_prev) ** (0.5) if sde else 0 ) pred_original_sample = (sample - beta_prod_t ** (0.5) * eps) / alpha_prod_t ** (0.5) pred_sample_direction_coeff = (1 - alpha_prod_t_prev - sigma_t**2) ** (0.5) noise = ( torch.randn( sample.size(), dtype=sample.dtype, layout=sample.layout, device=self.device, generator=generator ) if noise is None else noise ) latent = ( alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction_coeff * eps + sigma_t * noise ) return latent def _forward(self, latent, steps, t0, lora_scale_min, text_embeddings, generator): def scale_schedule(begin, end, n, length, type="linear"): if type == "constant": return end elif type == "linear": return begin + (end - begin) * n / length elif type == "cos": factor = (1 - math.cos(n * math.pi / length)) / 2 return (1 - factor) * begin + factor * end else: raise NotImplementedError(type) noises = [] latents = [] lora_scales = [] cfg_scales = [] latents.append(latent) t0 = int(t0 * steps) t_begin = steps - t0 length = len(self.scheduler.timesteps[t_begin - 1 : -1]) - 1 index = 1 for t in self.scheduler.timesteps[t_begin:].flip(dims=[0]): lora_scale = scale_schedule(1, lora_scale_min, index, length, type="cos") cfg_scale = scale_schedule(1, 3.0, index, length, type="linear") latent, noise = self._forward_sde( t, latent, cfg_scale, text_embeddings, steps, lora_scale=lora_scale, generator=generator ) noises.append(noise) latents.append(latent) lora_scales.append(lora_scale) cfg_scales.append(cfg_scale) index += 1 return latent, noises, latents, lora_scales, cfg_scales def _backward( self, latent, mask, steps, t0, noises, hook_latents, lora_scales, cfg_scales, text_embeddings, generator ): t0 = int(t0 * steps) t_begin = steps - t0 hook_latent = hook_latents.pop() latent = torch.where(mask > 128, latent, hook_latent) for t in self.scheduler.timesteps[t_begin - 1 : -1]: latent = self._sample( t, latent, cfg_scales.pop(), text_embeddings, steps, sde=True, noise=noises.pop(), lora_scale=lora_scales.pop(), generator=generator, ) hook_latent = hook_latents.pop() latent = torch.where(mask > 128, latent, hook_latent) return latent
diffusers/examples/community/sde_drag.py/0
{ "file_path": "diffusers/examples/community/sde_drag.py", "repo_id": "diffusers", "token_count": 11664 }
107
import types from typing import List, Optional, Tuple, Union import torch from transformers import CLIPTextModelWithProjection, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPTextModelOutput from diffusers.models import PriorTransformer from diffusers.pipelines import DiffusionPipeline, StableDiffusionImageVariationPipeline from diffusers.schedulers import UnCLIPScheduler from diffusers.utils import logging from diffusers.utils.torch_utils import randn_tensor logger = logging.get_logger(__name__) # pylint: disable=invalid-name def _encode_image(self, image, device, num_images_per_prompt, do_classifier_free_guidance): image = image.to(device=device) image_embeddings = image # take image as image_embeddings image_embeddings = image_embeddings.unsqueeze(1) # duplicate image embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = image_embeddings.shape image_embeddings = image_embeddings.repeat(1, num_images_per_prompt, 1) image_embeddings = image_embeddings.view(bs_embed * num_images_per_prompt, seq_len, -1) if do_classifier_free_guidance: uncond_embeddings = torch.zeros_like(image_embeddings) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes image_embeddings = torch.cat([uncond_embeddings, image_embeddings]) return image_embeddings class StableUnCLIPPipeline(DiffusionPipeline): def __init__( self, prior: PriorTransformer, tokenizer: CLIPTokenizer, text_encoder: CLIPTextModelWithProjection, prior_scheduler: UnCLIPScheduler, decoder_pipe_kwargs: Optional[dict] = None, ): super().__init__() decoder_pipe_kwargs = {"image_encoder": None} if decoder_pipe_kwargs is None else decoder_pipe_kwargs decoder_pipe_kwargs["torch_dtype"] = decoder_pipe_kwargs.get("torch_dtype", None) or prior.dtype self.decoder_pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", **decoder_pipe_kwargs ) # replace `_encode_image` method self.decoder_pipe._encode_image = types.MethodType(_encode_image, self.decoder_pipe) self.register_modules( prior=prior, tokenizer=tokenizer, text_encoder=text_encoder, prior_scheduler=prior_scheduler, ) def _encode_prompt( self, prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, ): if text_model_output is None: batch_size = len(prompt) if isinstance(prompt, list) else 1 # get prompt text embeddings text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ) text_input_ids = text_inputs.input_ids text_mask = text_inputs.attention_mask.bool().to(device) if text_input_ids.shape[-1] > self.tokenizer.model_max_length: removed_text = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) text_input_ids = text_input_ids[:, : self.tokenizer.model_max_length] text_encoder_output = self.text_encoder(text_input_ids.to(device)) text_embeddings = text_encoder_output.text_embeds text_encoder_hidden_states = text_encoder_output.last_hidden_state else: batch_size = text_model_output[0].shape[0] text_embeddings, text_encoder_hidden_states = text_model_output[0], text_model_output[1] text_mask = text_attention_mask text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0) text_encoder_hidden_states = text_encoder_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) text_mask = text_mask.repeat_interleave(num_images_per_prompt, dim=0) if do_classifier_free_guidance: uncond_tokens = [""] * batch_size uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) uncond_text_mask = uncond_input.attention_mask.bool().to(device) uncond_embeddings_text_encoder_output = self.text_encoder(uncond_input.input_ids.to(device)) uncond_embeddings = uncond_embeddings_text_encoder_output.text_embeds uncond_text_encoder_hidden_states = uncond_embeddings_text_encoder_output.last_hidden_state # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_images_per_prompt) uncond_embeddings = uncond_embeddings.view(batch_size * num_images_per_prompt, seq_len) seq_len = uncond_text_encoder_hidden_states.shape[1] uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.repeat(1, num_images_per_prompt, 1) uncond_text_encoder_hidden_states = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt, seq_len, -1 ) uncond_text_mask = uncond_text_mask.repeat_interleave(num_images_per_prompt, dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) text_encoder_hidden_states = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) text_mask = torch.cat([uncond_text_mask, text_mask]) return text_embeddings, text_encoder_hidden_states, text_mask @property def _execution_device(self): r""" Returns the device on which the pipeline's models will be executed. After calling `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module hooks. """ if self.device != torch.device("meta") or not hasattr(self.prior, "_hf_hook"): return self.device for module in self.prior.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def prepare_latents(self, shape, dtype, device, generator, latents, scheduler): if latents is None: latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) latents = latents * scheduler.init_noise_sigma return latents def to(self, torch_device: Optional[Union[str, torch.device]] = None): self.decoder_pipe.to(torch_device) super().to(torch_device) @torch.no_grad() def __call__( self, prompt: Optional[Union[str, List[str]]] = None, height: Optional[int] = None, width: Optional[int] = None, num_images_per_prompt: int = 1, prior_num_inference_steps: int = 25, generator: Optional[torch.Generator] = None, prior_latents: Optional[torch.FloatTensor] = None, text_model_output: Optional[Union[CLIPTextModelOutput, Tuple]] = None, text_attention_mask: Optional[torch.Tensor] = None, prior_guidance_scale: float = 4.0, decoder_guidance_scale: float = 8.0, decoder_num_inference_steps: int = 50, decoder_num_images_per_prompt: Optional[int] = 1, decoder_eta: float = 0.0, output_type: Optional[str] = "pil", return_dict: bool = True, ): if prompt is not None: if isinstance(prompt, str): batch_size = 1 elif isinstance(prompt, list): batch_size = len(prompt) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") else: batch_size = text_model_output[0].shape[0] device = self._execution_device batch_size = batch_size * num_images_per_prompt do_classifier_free_guidance = prior_guidance_scale > 1.0 or decoder_guidance_scale > 1.0 text_embeddings, text_encoder_hidden_states, text_mask = self._encode_prompt( prompt, device, num_images_per_prompt, do_classifier_free_guidance, text_model_output, text_attention_mask ) # prior self.prior_scheduler.set_timesteps(prior_num_inference_steps, device=device) prior_timesteps_tensor = self.prior_scheduler.timesteps embedding_dim = self.prior.config.embedding_dim prior_latents = self.prepare_latents( (batch_size, embedding_dim), text_embeddings.dtype, device, generator, prior_latents, self.prior_scheduler, ) for i, t in enumerate(self.progress_bar(prior_timesteps_tensor)): # expand the latents if we are doing classifier free guidance latent_model_input = torch.cat([prior_latents] * 2) if do_classifier_free_guidance else prior_latents predicted_image_embedding = self.prior( latent_model_input, timestep=t, proj_embedding=text_embeddings, encoder_hidden_states=text_encoder_hidden_states, attention_mask=text_mask, ).predicted_image_embedding if do_classifier_free_guidance: predicted_image_embedding_uncond, predicted_image_embedding_text = predicted_image_embedding.chunk(2) predicted_image_embedding = predicted_image_embedding_uncond + prior_guidance_scale * ( predicted_image_embedding_text - predicted_image_embedding_uncond ) if i + 1 == prior_timesteps_tensor.shape[0]: prev_timestep = None else: prev_timestep = prior_timesteps_tensor[i + 1] prior_latents = self.prior_scheduler.step( predicted_image_embedding, timestep=t, sample=prior_latents, generator=generator, prev_timestep=prev_timestep, ).prev_sample prior_latents = self.prior.post_process_latents(prior_latents) image_embeddings = prior_latents output = self.decoder_pipe( image=image_embeddings, height=height, width=width, num_inference_steps=decoder_num_inference_steps, guidance_scale=decoder_guidance_scale, generator=generator, output_type=output_type, return_dict=return_dict, num_images_per_prompt=decoder_num_images_per_prompt, eta=decoder_eta, ) return output
diffusers/examples/community/stable_unclip.py/0
{ "file_path": "diffusers/examples/community/stable_unclip.py", "repo_id": "diffusers", "token_count": 5489 }
108
# ControlNet training example [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) by Lvmin Zhang and Maneesh Agrawala. This example is based on the [training example in the original ControlNet repository](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md). It trains a ControlNet to fill circles using a [small synthetic dataset](https://huggingface.co/datasets/fusing/fill50k). ## Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install -e . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` Or for a default accelerate configuration without answering questions about your environment ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell e.g. a notebook ```python from accelerate.utils import write_basic_config write_basic_config() ``` ## Circle filling dataset The original dataset is hosted in the [ControlNet repo](https://huggingface.co/lllyasviel/ControlNet/blob/main/training/fill50k.zip). We re-uploaded it to be compatible with `datasets` [here](https://huggingface.co/datasets/fusing/fill50k). Note that `datasets` handles dataloading within the training script. Our training examples use [Stable Diffusion 1.5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as the original set of ControlNet models were trained from it. However, ControlNet can be trained to augment any Stable Diffusion compatible model (such as [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4)) or [stabilityai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1). ## Training Our training examples use two test conditioning images. They can be downloaded by running ```sh wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 ``` This default configuration requires ~38GB VRAM. By default, the training script logs outputs to tensorboard. Pass `--report_to wandb` to use weights and biases. Gradient accumulation with a smaller batch size can be used to reduce training requirements to ~20 GB VRAM. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 ``` ## Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch --mixed_precision="fp16" --multi_gpu train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=4 \ --mixed_precision="fp16" \ --tracker_project_name="controlnet-demo" \ --report_to=wandb ``` ## Example results #### After 300 steps with batch size 8 | | | |-------------------|:-------------------------:| | | red circle with blue background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_300_steps.png) | | | cyan circle with brown floral background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_300_steps.png) | #### After 6000 steps with batch size 8: | | | |-------------------|:-------------------------:| | | red circle with blue background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png) | ![red circle with blue background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/red_circle_with_blue_background_6000_steps.png) | | | cyan circle with brown floral background | ![conditioning image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png) | ![cyan circle with brown floral background](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/cyan_circle_with_brown_floral_background_6000_steps.png) | ## Training on a 16 GB GPU Optimizations: - Gradient checkpointing - bitsandbyte's 8-bit optimizer [bitandbytes install instructions](https://github.com/TimDettmers/bitsandbytes#requirements--installation). ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam ``` ## Training on a 12 GB GPU Optimizations: - Gradient checkpointing - bitsandbyte's 8-bit optimizer - xformers - set grads to none ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --use_8bit_adam \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none ``` When using `enable_xformers_memory_efficient_attention`, please make sure to install `xformers` by `pip install xformers`. ## Training on an 8 GB GPU We have not exhaustively tested DeepSpeed support for ControlNet. While the configuration does save memory, we have not confirmed the configuration to train successfully. You will very likely have to make changes to the config to have a successful training run. Optimizations: - Gradient checkpointing - xformers - set grads to none - DeepSpeed stage 2 with parameter and optimizer offloading - fp16 mixed precision [DeepSpeed](https://www.deepspeed.ai/) can offload tensors from VRAM to either CPU or NVME. This requires significantly more RAM (about 25 GB). Use `accelerate config` to enable DeepSpeed stage 2. The relevant parts of the resulting accelerate config file are ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 4 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: false zero_stage: 2 distributed_type: DEEPSPEED ``` See [documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more DeepSpeed configuration options. Changing the default Adam optimizer to DeepSpeed's Adam `deepspeed.ops.adam.DeepSpeedCPUAdam` gives a substantial speedup but it requires CUDA toolchain with the same version as pytorch. 8-bit optimizer does not seem to be compatible with DeepSpeed at the moment. ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="path to save model" accelerate launch train_controlnet.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --train_batch_size=1 \ --gradient_accumulation_steps=4 \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ --mixed_precision fp16 ``` ## Performing inference with the trained ControlNet The trained model can be run the same as the original ControlNet pipeline with the newly trained ControlNet. Set `base_model_path` and `controlnet_path` to the values `--pretrained_model_name_or_path` and `--output_dir` were respectively set to in the training script. ```py from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image import torch base_model_path = "path to model" controlnet_path = "path to controlnet" controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( base_model_path, controlnet=controlnet, torch_dtype=torch.float16 ) # speed up diffusion process with faster scheduler and memory optimization pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) # remove following line if xformers is not installed or when using Torch 2.0. pipe.enable_xformers_memory_efficient_attention() # memory optimization. pipe.enable_model_cpu_offload() control_image = load_image("./conditioning_image_1.png") prompt = "pale golden rod circle with old lace background" # generate image generator = torch.manual_seed(0) image = pipe( prompt, num_inference_steps=20, generator=generator, image=control_image ).images[0] image.save("./output.png") ``` ## Training with Flax/JAX For faster training on TPUs and GPUs you can leverage the flax training example. Follow the instructions above to get the model and dataset before running the script. ### Running on Google Cloud TPU See below for commands to set up a TPU VM(`--accelerator-type v4-8`). For more details about how to set up and use TPUs, refer to [Cloud docs for single VM setup](https://cloud.google.com/tpu/docs/run-calculation-jax). First create a single TPUv4-8 VM and connect to it: ``` ZONE=us-central2-b TPU_TYPE=v4-8 VM_NAME=hg_flax gcloud alpha compute tpus tpu-vm create $VM_NAME \ --zone $ZONE \ --accelerator-type $TPU_TYPE \ --version tpu-vm-v4-base gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \ ``` When connected install JAX `0.4.5`: ``` pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` To verify that JAX was correctly installed, you can run the following command: ``` import jax jax.device_count() ``` This should display the number of TPU cores, which should be 4 on a TPUv4-8 VM. Then install Diffusers and the library's training dependencies: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -U -r requirements_flax.txt ``` If you want to use Weights and Biases logging, you should also install `wandb` now ```bash pip install wandb ``` Now let's downloading two conditioning images that we will use to run validation during the training in order to track our progress ``` wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png ``` We encourage you to store or share your model with the community. To use huggingface hub, please login to your Hugging Face account, or ([create one](https://huggingface.co/docs/diffusers/main/en/training/hf.co/join) if you don’t have one already): ``` huggingface-cli login ``` Make sure you have the `MODEL_DIR`,`OUTPUT_DIR` and `HUB_MODEL_ID` environment variables set. The `OUTPUT_DIR` and `HUB_MODEL_ID` variables specify where to save the model to on the Hub: ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/fill-circle-{timestamp}" export HUB_MODEL_ID="controlnet-fill-circle" ``` And finally start the training ```bash python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=fusing/fill50k \ --resolution=512 \ --learning_rate=1e-5 \ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \ --validation_steps=1000 \ --train_batch_size=2 \ --revision="non-ema" \ --from_pt \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID \ --num_train_epochs=11 \ --push_to_hub \ --hub_model_id=$HUB_MODEL_ID ``` Since we passed the `--push_to_hub` flag, it will automatically create a model repo under your huggingface account based on `$HUB_MODEL_ID`. By the end of training, the final checkpoint will be automatically stored on the hub. You can find an example model repo [here](https://huggingface.co/YiYiXu/fill-circle-controlnet). Our training script also provides limited support for streaming large datasets from the Hugging Face Hub. In order to enable streaming, one must also set `--max_train_samples`. Here is an example command (from [this blog article](https://huggingface.co/blog/train-your-controlnet)): ```bash export MODEL_DIR="runwayml/stable-diffusion-v1-5" export OUTPUT_DIR="runs/uncanny-faces-{timestamp}" export HUB_MODEL_ID="controlnet-uncanny-faces" python3 train_controlnet_flax.py \ --pretrained_model_name_or_path=$MODEL_DIR \ --output_dir=$OUTPUT_DIR \ --dataset_name=multimodalart/facesyntheticsspigacaptioned \ --streaming \ --conditioning_image_column=spiga_seg \ --image_column=image \ --caption_column=image_caption \ --resolution=512 \ --max_train_samples 100000 \ --learning_rate=1e-5 \ --train_batch_size=1 \ --revision="flax" \ --report_to="wandb" \ --tracker_project_name=$HUB_MODEL_ID ``` Note, however, that the performance of the TPUs might get bottlenecked as streaming with `datasets` is not optimized for images. For ensuring maximum throughput, we encourage you to explore the following options: * [Webdataset](https://webdataset.github.io/webdataset/) * [TorchData](https://github.com/pytorch/data) * [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) When work with a larger dataset, you may need to run training process for a long time and it’s useful to save regular checkpoints during the process. You can use the following argument to enable intermediate checkpointing: ```bash --checkpointing_steps=500 ``` This will save the trained model in subfolders of your output_dir. Subfolder names is the number of steps performed so far; for example: a checkpoint saved after 500 training steps would be saved in a subfolder named 500 You can then start your training from this saved checkpoint with ```bash --controlnet_model_name_or_path="./control_out/500" ``` We support training with the Min-SNR weighting strategy proposed in [Efficient Diffusion Training via Min-SNR Weighting Strategy](https://arxiv.org/abs/2303.09556) which helps to achieve faster convergence by rebalancing the loss. To use it, one needs to set the `--snr_gamma` argument. The recommended value when using it is `5.0`. We also support gradient accumulation - it is a technique that lets you use a bigger batch size than your machine would normally be able to fit into memory. You can use `gradient_accumulation_steps` argument to set gradient accumulation steps. The ControlNet author recommends using gradient accumulation to achieve better convergence. Read more [here](https://github.com/lllyasviel/ControlNet/blob/main/docs/train.md#more-consideration-sudden-converge-phenomenon-and-gradient-accumulation). You can **profile your code** with: ```bash --profile_steps==5 ``` Refer to the [JAX documentation on profiling](https://jax.readthedocs.io/en/latest/profiling.html). To inspect the profile trace, you'll have to install and start Tensorboard with the profile plugin: ```bash pip install tensorflow tensorboard-plugin-profile tensorboard --logdir runs/fill-circle-100steps-20230411_165612/ ``` The profile can then be inspected at http://localhost:6006/#profile Sometimes you'll get version conflicts (error messages like `Duplicate plugins for name projector`), which means that you have to uninstall and reinstall all versions of Tensorflow/Tensorboard (e.g. with `pip uninstall tensorflow tf-nightly tensorboard tb-nightly tensorboard-plugin-profile && pip install tf-nightly tbp-nightly tensorboard-plugin-profile`). Note that the debugging functionality of the Tensorboard `profile` plugin is still under active development. Not all views are fully functional, and for example the `trace_viewer` cuts off events after 1M (which can result in all your device traces getting lost if you for example profile the compilation step by accident). ## Support for Stable Diffusion XL We provide a training script for training a ControlNet with [Stable Diffusion XL](https://huggingface.co/papers/2307.01952). Please refer to [README_sdxl.md](./README_sdxl.md) for more details.
diffusers/examples/controlnet/README.md/0
{ "file_path": "diffusers/examples/controlnet/README.md", "repo_id": "diffusers", "token_count": 6041 }
109
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import sys import tempfile sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class InstructPix2Pix(ExamplesTestsAccelerate): def test_instruct_pix2pix_checkpointing_checkpoints_total_limit(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=6 --checkpointing_steps=2 --checkpoints_total_limit=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}, ) def test_instruct_pix2pix_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=4 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 """.split() run_command(self._launch_args + test_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) resume_run_args = f""" examples/instruct_pix2pix/train_instruct_pix2pix.py --pretrained_model_name_or_path=hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name=hf-internal-testing/instructpix2pix-10-samples --resolution=64 --random_flip --train_batch_size=1 --max_train_steps=8 --checkpointing_steps=2 --output_dir {tmpdir} --seed=0 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 """.split() run_command(self._launch_args + resume_run_args) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, )
diffusers/examples/instruct_pix2pix/test_instruct_pix2pix.py/0
{ "file_path": "diffusers/examples/instruct_pix2pix/test_instruct_pix2pix.py", "repo_id": "diffusers", "token_count": 1823 }
110
# Consistency Training `train_cm_ct_unconditional.py` trains a consistency model (CM) from scratch following the consistency training (CT) algorithm introduced in [Consistency Models](https://arxiv.org/abs/2303.01469) and refined in [Improved Techniques for Training Consistency Models](https://arxiv.org/abs/2310.14189). Both unconditional and class-conditional training are supported. A usage example is as follows: ```bash accelerate launch examples/research_projects/consistency_training/train_cm_ct_unconditional.py \ --dataset_name="cifar10" \ --dataset_image_column_name="img" \ --output_dir="/path/to/output/dir" \ --mixed_precision=fp16 \ --resolution=32 \ --max_train_steps=1000 --max_train_samples=10000 \ --dataloader_num_workers=8 \ --noise_precond_type="cm" --input_precond_type="cm" \ --train_batch_size=4 \ --learning_rate=1e-04 --lr_scheduler="constant" --lr_warmup_steps=0 \ --use_8bit_adam \ --use_ema \ --validation_steps=100 --eval_batch_size=4 \ --checkpointing_steps=100 --checkpoints_total_limit=10 \ --class_conditional --num_classes=10 \ ```
diffusers/examples/research_projects/consistency_training/README.md/0
{ "file_path": "diffusers/examples/research_projects/consistency_training/README.md", "repo_id": "diffusers", "token_count": 413 }
111
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning script for Stable Diffusion for text2image with support for LoRA.""" import argparse import itertools import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer import diffusers from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, is_wandb_available from diffusers.utils.import_utils import is_xformers_available # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.14.0.dev0") logger = get_logger(__name__, log_level="INFO") def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None): img_str = "" for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - diffusers-training - lora inference: true --- """ model_card = f""" # LoRA text2image fine-tuning - {repo_id} These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is sampled during training for inference." ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_epochs", type=int, default=1, help=( "Run fine-tuning validation every X epochs. The validation process consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned-lora", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # lora args parser.add_argument("--use_peft", action="store_true", help="Whether to use peft to support lora") parser.add_argument("--lora_r", type=int, default=4, help="Lora rank, only used if use_lora is True") parser.add_argument("--lora_alpha", type=int, default=32, help="Lora alpha, only used if lora is True") parser.add_argument("--lora_dropout", type=float, default=0.0, help="Lora dropout, only used if use_lora is True") parser.add_argument( "--lora_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora is True", ) parser.add_argument( "--lora_text_encoder_r", type=int, default=4, help="Lora rank for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_alpha", type=int, default=32, help="Lora alpha for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_dropout", type=float, default=0.0, help="Lora dropout for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") return args DATASET_NAME_MAPPING = { "lambdalabs/pokemon-blip-captions": ("image", "text"), } def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) if args.report_to == "wandb": if not is_wandb_available(): raise ImportError("Make sure to install wandb if you want to use it for logging during training.") import wandb # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 if args.use_peft: from peft import LoraConfig, LoraModel, get_peft_model_state_dict, set_peft_model_state_dict UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.lora_dropout, bias=args.lora_bias, ) unet = LoraModel(config, unet) vae.requires_grad_(False) if args.train_text_encoder: config = LoraConfig( r=args.lora_text_encoder_r, lora_alpha=args.lora_text_encoder_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.lora_text_encoder_dropout, bias=args.lora_text_encoder_bias, ) text_encoder = LoraModel(config, text_encoder) else: # freeze parameters of models to save more memory unet.requires_grad_(False) vae.requires_grad_(False) text_encoder.requires_grad_(False) # now we will add new LoRA weights to the attention layers # It's important to realize here how many attention weights will be added and of which sizes # The sizes of the attention layers consist only of two different variables: # 1) - the "hidden_size", which is increased according to `unet.config.block_out_channels`. # 2) - the "cross attention size", which is set to `unet.config.cross_attention_dim`. # Let's first see how many attention processors we will have to set. # For Stable Diffusion, it should be equal to: # - down blocks (2x attention layers) * (2x transformer layers) * (3x down blocks) = 12 # - mid blocks (2x attention layers) * (1x transformer layers) * (1x mid blocks) = 2 # - up blocks (2x attention layers) * (3x transformer layers) * (3x down blocks) = 18 # => 32 layers # Set correct lora layers lora_attn_procs = {} for name in unet.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else unet.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = unet.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(unet.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = unet.config.block_out_channels[block_id] lora_attn_procs[name] = LoRAAttnProcessor(hidden_size=hidden_size, cross_attention_dim=cross_attention_dim) unet.set_attn_processor(lora_attn_procs) lora_layers = AttnProcsLayers(unet.attn_processors) # Move unet, vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW if args.use_peft: # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_cls( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) else: optimizer = optimizer_cls( lora_layers.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. if args.use_peft: if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) else: lora_layers, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( lora_layers, optimizer, train_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("text2image-fine-tune", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: if args.use_peft: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) else: params_to_clip = lora_layers.parameters() accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompt is not None and epoch % args.validation_epochs == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for _ in range(args.num_validation_images): images.append( pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0] ) if accelerator.is_main_process: for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() # Save the lora layers accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_peft: lora_config = {} unwarpped_unet = accelerator.unwrap_model(unet) state_dict = get_peft_model_state_dict(unwarpped_unet, state_dict=accelerator.get_state_dict(unet)) lora_config["peft_config"] = unwarpped_unet.get_peft_config_as_dict(inference=True) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) text_encoder_state_dict = get_peft_model_state_dict( unwarpped_text_encoder, state_dict=accelerator.get_state_dict(text_encoder) ) text_encoder_state_dict = {f"text_encoder_{k}": v for k, v in text_encoder_state_dict.items()} state_dict.update(text_encoder_state_dict) lora_config["text_encoder_peft_config"] = unwarpped_text_encoder.get_peft_config_as_dict( inference=True ) accelerator.save(state_dict, os.path.join(args.output_dir, f"{global_step}_lora.pt")) with open(os.path.join(args.output_dir, f"{global_step}_lora_config.json"), "w") as f: json.dump(lora_config, f) else: unet = unet.to(torch.float32) unet.save_attn_procs(args.output_dir) if args.push_to_hub: save_model_card( repo_id, images=images, base_model=args.pretrained_model_name_or_path, dataset_name=args.dataset_name, repo_folder=args.output_dir, ) upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) # Final inference # Load previous pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, revision=args.revision, torch_dtype=weight_dtype ) if args.use_peft: def load_and_set_lora_ckpt(pipe, ckpt_dir, global_step, device, dtype): with open(os.path.join(args.output_dir, f"{global_step}_lora_config.json"), "r") as f: lora_config = json.load(f) print(lora_config) checkpoint = os.path.join(args.output_dir, f"{global_step}_lora.pt") lora_checkpoint_sd = torch.load(checkpoint) unet_lora_ds = {k: v for k, v in lora_checkpoint_sd.items() if "text_encoder_" not in k} text_encoder_lora_ds = { k.replace("text_encoder_", ""): v for k, v in lora_checkpoint_sd.items() if "text_encoder_" in k } unet_config = LoraConfig(**lora_config["peft_config"]) pipe.unet = LoraModel(unet_config, pipe.unet) set_peft_model_state_dict(pipe.unet, unet_lora_ds) if "text_encoder_peft_config" in lora_config: text_encoder_config = LoraConfig(**lora_config["text_encoder_peft_config"]) pipe.text_encoder = LoraModel(text_encoder_config, pipe.text_encoder) set_peft_model_state_dict(pipe.text_encoder, text_encoder_lora_ds) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe pipeline = load_and_set_lora_ckpt(pipeline, args.output_dir, global_step, accelerator.device, weight_dtype) else: pipeline = pipeline.to(accelerator.device) # load attention processors pipeline.unet.load_attn_procs(args.output_dir) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): images.append(pipeline(args.validation_prompt, num_inference_steps=30, generator=generator).images[0]) if accelerator.is_main_process: for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": tracker.log( { "test": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/lora/train_text_to_image_lora.py/0
{ "file_path": "diffusers/examples/research_projects/lora/train_text_to_image_lora.py", "repo_id": "diffusers", "token_count": 19064 }
112
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import argparse import logging import math import os import random from pathlib import Path import accelerate import datasets import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.state import AcceleratorState from accelerate.utils import ProjectConfiguration, set_seed from datasets import load_dataset from huggingface_hub import create_repo, upload_folder from onnxruntime.training.optim.fp16_optimizer import FP16_Optimizer as ORT_FP16_Optimizer from onnxruntime.training.ortmodule import ORTModule from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from transformers.utils import ContextManagers import diffusers from diffusers import AutoencoderKL, DDPMScheduler, StableDiffusionPipeline, UNet2DConditionModel from diffusers.optimization import get_scheduler from diffusers.training_utils import EMAModel, compute_snr from diffusers.utils import check_min_version, deprecate, is_wandb_available from diffusers.utils.import_utils import is_xformers_available if is_wandb_available(): import wandb # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.17.0.dev0") logger = get_logger(__name__, log_level="INFO") DATASET_NAME_MAPPING = { "lambdalabs/pokemon-blip-captions": ("image", "text"), } def log_validation(vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, epoch): logger.info("Running validation... ") pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, vae=accelerator.unwrap_model(vae), text_encoder=accelerator.unwrap_model(text_encoder), tokenizer=tokenizer, unet=accelerator.unwrap_model(unet), safety_checker=None, revision=args.revision, torch_dtype=weight_dtype, ) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) if args.enable_xformers_memory_efficient_attention: pipeline.enable_xformers_memory_efficient_attention() if args.seed is None: generator = None else: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [] for i in range(len(args.validation_prompts)): with torch.autocast("cuda"): image = pipeline(args.validation_prompts[i], num_inference_steps=20, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") elif tracker.name == "wandb": tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompts[i]}") for i, image in enumerate(images) ] } ) else: logger.warning(f"image logging not implemented for {tracker.name}") del pipeline torch.cuda.empty_cache() def parse_args(): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--input_pertubation", type=float, default=0, help="The scale of input pretubation. Recommended 0.1." ) parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--dataset_name", type=str, default=None, help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem," " or to a folder containing files that 🤗 Datasets can understand." ), ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The config of the Dataset, leave as None if there's only one config.", ) parser.add_argument( "--train_data_dir", type=str, default=None, help=( "A folder containing the training data. Folder contents must follow the structure described in" " https://huggingface.co/docs/datasets/image_dataset#imagefolder. In particular, a `metadata.jsonl` file" " must exist to provide the captions for the images. Ignored if `dataset_name` is specified." ), ) parser.add_argument( "--image_column", type=str, default="image", help="The column of the dataset containing an image." ) parser.add_argument( "--caption_column", type=str, default="text", help="The column of the dataset containing a caption or a list of captions.", ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--validation_prompts", type=str, default=None, nargs="+", help=("A set of prompts evaluated every `--validation_epochs` and logged to `--report_to`."), ) parser.add_argument( "--output_dir", type=str, default="sd-model-finetuned", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--cache_dir", type=str, default=None, help="The directory where the downloaded models and datasets will be stored.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", default=False, action="store_true", help=( "Whether to center crop the input images to the resolution. If not set, the images will be randomly" " cropped. The images will be resized to the resolution first before cropping." ), ) parser.add_argument( "--random_flip", action="store_true", help="whether to randomly flip images horizontally", ) parser.add_argument( "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader." ) parser.add_argument("--num_train_epochs", type=int, default=100) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=1e-4, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--snr_gamma", type=float, default=None, help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. " "More details here: https://arxiv.org/abs/2303.09556.", ) parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.") parser.add_argument( "--non_ema_revision", type=str, default=None, required=False, help=( "Revision of pretrained non-ema model identifier. Must be a branch, tag or git identifier of the local or" " remote repository specified with --pretrained_model_name_or_path." ), ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help=( "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process." ), ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints are only suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--checkpoints_total_limit", type=int, default=None, help=( "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`." " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state" " for more docs" ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.") parser.add_argument( "--validation_epochs", type=int, default=5, help="Run validation every X epochs.", ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank # Sanity checks if args.dataset_name is None and args.train_data_dir is None: raise ValueError("Need either a dataset name or a training folder.") # default to using the same revision for the non-ema model if not specified if args.non_ema_revision is None: args.non_ema_revision = args.revision return args def main(): args = parse_args() if args.report_to == "wandb" and args.hub_token is not None: raise ValueError( "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token." " Please use `huggingface-cli login` to authenticate with the Hub." ) if args.non_ema_revision is not None: deprecate( "non_ema_revision!=None", "0.15.0", message=( "Downloading 'non_ema' weights from revision branches of the Hub is deprecated. Please make sure to" " use `--variant=non_ema` instead." ), ) logging_dir = os.path.join(args.output_dir, args.logging_dir) accelerator_project_config = ProjectConfiguration( total_limit=args.checkpoints_total_limit, project_dir=args.output_dir, logging_dir=logging_dir ) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_config=accelerator_project_config, ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) if args.push_to_hub: repo_id = create_repo( repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token ).repo_id # Load scheduler, tokenizer and models. noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision ) def deepspeed_zero_init_disabled_context_manager(): """ returns either a context list that includes one that will disable zero.Init or an empty context list """ deepspeed_plugin = AcceleratorState().deepspeed_plugin if accelerate.state.is_initialized() else None if deepspeed_plugin is None: return [] return [deepspeed_plugin.zero3_init_context_manager(enable=False)] # Currently Accelerate doesn't know how to handle multiple models under Deepspeed ZeRO stage 3. # For this to work properly all models must be run through `accelerate.prepare`. But accelerate # will try to assign the same optimizer with the same weights to all models during # `deepspeed.initialize`, which of course doesn't work. # # For now the following workaround will partially support Deepspeed ZeRO-3, by excluding the 2 # frozen models from being partitioned during `zero.Init` which gets called during # `from_pretrained` So CLIPTextModel and AutoencoderKL will not enjoy the parameter sharding # across multiple gpus and only UNet2DConditionModel will get ZeRO sharded. with ContextManagers(deepspeed_zero_init_disabled_context_manager()): text_encoder = CLIPTextModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision ) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.non_ema_revision ) # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) # Create EMA for the unet. if args.use_ema: ema_unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) ema_unet = EMAModel(ema_unet.parameters(), model_cls=UNet2DConditionModel, model_config=ema_unet.config) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): import xformers xformers_version = version.parse(xformers.__version__) if xformers_version == version.parse("0.0.16"): logger.warning( "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details." ) unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # `accelerate` 0.16.0 will have better support for customized saving if version.parse(accelerate.__version__) >= version.parse("0.16.0"): # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: if args.use_ema: ema_unet.save_pretrained(os.path.join(output_dir, "unet_ema")) for i, model in enumerate(models): model.save_pretrained(os.path.join(output_dir, "unet")) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): if args.use_ema: load_model = EMAModel.from_pretrained(os.path.join(input_dir, "unet_ema"), UNet2DConditionModel) ema_unet.load_state_dict(load_model.state_dict()) ema_unet.to(accelerator.device) del load_model for i in range(len(models)): # pop models so that they are not loaded again model = models.pop() # load diffusers style into model load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet") model.register_to_config(**load_model.config) model.load_state_dict(load_model.state_dict()) del load_model accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Initialize the optimizer if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) optimizer_cls = bnb.optim.AdamW8bit else: optimizer_cls = torch.optim.AdamW optimizer = optimizer_cls( unet.parameters(), lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) optimizer = ORT_FP16_Optimizer(optimizer) # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( args.dataset_name, args.dataset_config_name, cache_dir=args.cache_dir, ) else: data_files = {} if args.train_data_dir is not None: data_files["train"] = os.path.join(args.train_data_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.4.0/en/image_load#imagefolder # Preprocessing the datasets. # We need to tokenize inputs and targets. column_names = dataset["train"].column_names # 6. Get the column names for input/target. dataset_columns = DATASET_NAME_MAPPING.get(args.dataset_name, None) if args.image_column is None: image_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: image_column = args.image_column if image_column not in column_names: raise ValueError( f"--image_column' value '{args.image_column}' needs to be one of: {', '.join(column_names)}" ) if args.caption_column is None: caption_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: caption_column = args.caption_column if caption_column not in column_names: raise ValueError( f"--caption_column' value '{args.caption_column}' needs to be one of: {', '.join(column_names)}" ) # Preprocessing the datasets. # We need to tokenize input captions and transform the images. def tokenize_captions(examples, is_train=True): captions = [] for caption in examples[caption_column]: if isinstance(caption, str): captions.append(caption) elif isinstance(caption, (list, np.ndarray)): # take a random caption if there are multiple captions.append(random.choice(caption) if is_train else caption[0]) else: raise ValueError( f"Caption column `{caption_column}` should contain either strings or lists of strings." ) inputs = tokenizer( captions, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt" ) return inputs.input_ids # Preprocessing the datasets. train_transforms = transforms.Compose( [ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(args.resolution) if args.center_crop else transforms.RandomCrop(args.resolution), transforms.RandomHorizontalFlip() if args.random_flip else transforms.Lambda(lambda x: x), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def preprocess_train(examples): images = [image.convert("RGB") for image in examples[image_column]] examples["pixel_values"] = [train_transforms(image) for image in images] examples["input_ids"] = tokenize_captions(examples) return examples with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.stack([example["input_ids"] for example in examples]) return {"pixel_values": pixel_values, "input_ids": input_ids} # DataLoaders creation: train_dataloader = torch.utils.data.DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.train_batch_size, num_workers=args.dataloader_num_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) if args.use_ema: ema_unet.to(accelerator.device) unet = ORTModule(unet) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: tracker_config = dict(vars(args)) tracker_config.pop("validation_prompts") accelerator.init_trackers(args.tracker_project_name, tracker_config) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] if len(dirs) > 0 else None if path is None: accelerator.print( f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run." ) args.resume_from_checkpoint = None else: accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = global_step // num_update_steps_per_epoch resume_step = resume_global_step % (num_update_steps_per_epoch * args.gradient_accumulation_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() train_loss = 0.0 for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(weight_dtype)).latent_dist.sample() latents = latents * vae.config.scaling_factor # Sample noise that we'll add to the latents noise = torch.randn_like(latents) if args.noise_offset: # https://www.crosslabs.org//blog/diffusion-with-offset-noise noise += args.noise_offset * torch.randn( (latents.shape[0], latents.shape[1], 1, 1), device=latents.device ) if args.input_pertubation: new_noise = noise + args.input_pertubation * torch.randn_like(noise) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) if args.input_pertubation: noisy_latents = noise_scheduler.add_noise(latents, new_noise, timesteps) else: noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") # Predict the noise residual and compute loss model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample if args.snr_gamma is None: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") else: # Compute loss-weights as per Section 3.4 of https://arxiv.org/abs/2303.09556. # Since we predict the noise instead of x_0, the original formulation is slightly changed. # This is discussed in Section 4.2 of the same paper. snr = compute_snr(noise_scheduler, timesteps) mse_loss_weights = torch.stack([snr, args.snr_gamma * torch.ones_like(timesteps)], dim=1).min( dim=1 )[0] if noise_scheduler.config.prediction_type == "epsilon": mse_loss_weights = mse_loss_weights / snr elif noise_scheduler.config.prediction_type == "v_prediction": mse_loss_weights = mse_loss_weights / (snr + 1) loss = F.mse_loss(model_pred.float(), target.float(), reduction="none") loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights loss = loss.mean() # Gather the losses across all processes for logging (if we use distributed training). avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean() train_loss += avg_loss.item() / args.gradient_accumulation_steps # Backpropagate accelerator.backward(loss) if accelerator.sync_gradients: accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: if args.use_ema: ema_unet.step(unet.parameters()) progress_bar.update(1) global_step += 1 accelerator.log({"train_loss": train_loss}, step=global_step) train_loss = 0.0 if global_step % args.checkpointing_steps == 0: if accelerator.is_main_process: save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") accelerator.save_state(save_path) logger.info(f"Saved state to {save_path}") logs = {"step_loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) if global_step >= args.max_train_steps: break if accelerator.is_main_process: if args.validation_prompts is not None and epoch % args.validation_epochs == 0: if args.use_ema: # Store the UNet parameters temporarily and load the EMA parameters to perform inference. ema_unet.store(unet.parameters()) ema_unet.copy_to(unet.parameters()) log_validation( vae, text_encoder, tokenizer, unet, args, accelerator, weight_dtype, global_step, ) if args.use_ema: # Switch back to the original UNet parameters. ema_unet.restore(unet.parameters()) # Create the pipeline using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: unet = accelerator.unwrap_model(unet) if args.use_ema: ema_unet.copy_to(unet.parameters()) pipeline = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: upload_folder( repo_id=repo_id, folder_path=args.output_dir, commit_message="End of training", ignore_patterns=["step_*", "epoch_*"], ) accelerator.end_training() if __name__ == "__main__": main()
diffusers/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py/0
{ "file_path": "diffusers/examples/research_projects/onnxruntime/text_to_image/train_text_to_image.py", "repo_id": "diffusers", "token_count": 17009 }
113
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import shutil import sys import tempfile from diffusers import DiffusionPipeline, UNet2DConditionModel # noqa: E402 sys.path.append("..") from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402 logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class TextToImage(ExamplesTestsAccelerate): def test_text_to_image(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json"))) def test_text_to_image_checkpointing(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=1) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 2 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-4 --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-5"}, ) def test_text_to_image_checkpointing_use_ema(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --use_ema --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=2) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # check can run an intermediate checkpoint unet = UNet2DConditionModel.from_pretrained(tmpdir, subfolder="checkpoint-2/unet") pipe = DiffusionPipeline.from_pretrained(pretrained_model_name_or_path, unet=unet, safety_checker=None) pipe(prompt, num_inference_steps=1) # Remove checkpoint 2 so that we can check only later checkpoints exist after resuming shutil.rmtree(os.path.join(tmpdir, "checkpoint-2")) # Run training script for 2 total steps resuming from checkpoint 4 resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=1 --resume_from_checkpoint=checkpoint-4 --use_ema --seed=0 """.split() run_command(self._launch_args + resume_run_args) # check can run new fully trained pipeline pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # no checkpoint-2 -> check old checkpoints do not exist # check new checkpoints exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-5"}, ) def test_text_to_image_checkpointing_checkpoints_total_limit(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 6, checkpointing_steps == 2, checkpoints_total_limit == 2 # Should create checkpoints at steps 2, 4, 6 # with checkpoint at step 2 deleted initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 6 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --checkpoints_total_limit=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist # checkpoint-2 should have been deleted self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-4", "checkpoint-6"}) def test_text_to_image_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self): pretrained_model_name_or_path = "hf-internal-testing/tiny-stable-diffusion-pipe" prompt = "a prompt" with tempfile.TemporaryDirectory() as tmpdir: # Run training script with checkpointing # max_train_steps == 4, checkpointing_steps == 2 # Should create checkpoints at steps 2, 4 initial_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 4 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --seed=0 """.split() run_command(self._launch_args + initial_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"}, ) # resume and we should try to checkpoint at 6, where we'll have to remove # checkpoint-2 and checkpoint-4 instead of just a single previous checkpoint resume_run_args = f""" examples/text_to_image/train_text_to_image.py --pretrained_model_name_or_path {pretrained_model_name_or_path} --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 8 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} --checkpointing_steps=2 --resume_from_checkpoint=checkpoint-4 --checkpoints_total_limit=2 --seed=0 """.split() run_command(self._launch_args + resume_run_args) pipe = DiffusionPipeline.from_pretrained(tmpdir, safety_checker=None) pipe(prompt, num_inference_steps=1) # check checkpoint directories exist self.assertEqual( {x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"}, ) class TextToImageSDXL(ExamplesTestsAccelerate): def test_text_to_image_sdxl(self): with tempfile.TemporaryDirectory() as tmpdir: test_args = f""" examples/text_to_image/train_text_to_image_sdxl.py --pretrained_model_name_or_path hf-internal-testing/tiny-stable-diffusion-xl-pipe --dataset_name hf-internal-testing/dummy_image_text_data --resolution 64 --center_crop --random_flip --train_batch_size 1 --gradient_accumulation_steps 1 --max_train_steps 2 --learning_rate 5.0e-04 --scale_lr --lr_scheduler constant --lr_warmup_steps 0 --output_dir {tmpdir} """.split() run_command(self._launch_args + test_args) # save_pretrained smoke test self.assertTrue(os.path.isfile(os.path.join(tmpdir, "unet", "diffusion_pytorch_model.safetensors"))) self.assertTrue(os.path.isfile(os.path.join(tmpdir, "scheduler", "scheduler_config.json")))
diffusers/examples/text_to_image/test_text_to_image.py/0
{ "file_path": "diffusers/examples/text_to_image/test_text_to_image.py", "repo_id": "diffusers", "token_count": 7427 }
114
## Training an unconditional diffusion model Creating a training image set is [described in a different document](https://huggingface.co/docs/datasets/image_process#image-datasets). ### Installing the dependencies Before running the scripts, make sure to install the library's training dependencies: **Important** To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Then cd in the example folder and run ```bash pip install -r requirements.txt ``` And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with: ```bash accelerate config ``` ### Unconditional Flowers The command to train a DDPM UNet model on the Oxford Flowers dataset: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/flowers-102-categories" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-flowers-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` An example trained model: https://huggingface.co/anton-l/ddpm-ema-flowers-64 A full training run takes 2 hours on 4xV100 GPUs. <img src="https://user-images.githubusercontent.com/26864830/180248660-a0b143d0-b89a-42c5-8656-2ebf6ece7e52.png" width="700" /> ### Unconditional Pokemon The command to train a DDPM UNet model on the Pokemon dataset: ```bash accelerate launch train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision=no \ --push_to_hub ``` An example trained model: https://huggingface.co/anton-l/ddpm-ema-pokemon-64 A full training run takes 2 hours on 4xV100 GPUs. <img src="https://user-images.githubusercontent.com/26864830/180248200-928953b4-db38-48db-b0c6-8b740fe6786f.png" width="700" /> ### Training with multiple GPUs `accelerate` allows for seamless multi-GPU training. Follow the instructions [here](https://huggingface.co/docs/accelerate/basic_tutorials/launch) for running distributed training with `accelerate`. Here is an example command: ```bash accelerate launch --mixed_precision="fp16" --multi_gpu train_unconditional.py \ --dataset_name="huggan/pokemon" \ --resolution=64 --center_crop --random_flip \ --output_dir="ddpm-ema-pokemon-64" \ --train_batch_size=16 \ --num_epochs=100 \ --gradient_accumulation_steps=1 \ --use_ema \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="fp16" \ --logger="wandb" ``` To be able to use Weights and Biases (`wandb`) as a logger you need to install the library: `pip install wandb`. ### Using your own data To use your own dataset, there are 2 ways: - you can either provide your own folder as `--train_data_dir` - or you can upload your dataset to the hub (possibly as a private repo, if you prefer so), and simply pass the `--dataset_name` argument. Below, we explain both in more detail. #### Provide the dataset as a folder If you provide your own folders with images, the script expects the following directory structure: ```bash data_dir/xxx.png data_dir/xxy.png data_dir/[...]/xxz.png ``` In other words, the script will take care of gathering all images inside the folder. You can then run the script like this: ```bash accelerate launch train_unconditional.py \ --train_data_dir <path-to-train-directory> \ <other-arguments> ``` Internally, the script will use the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature which will automatically turn the folders into 🤗 Dataset objects. #### Upload your data to the hub, as a (possibly private) repo It's very easy (and convenient) to upload your image dataset to the hub using the [`ImageFolder`](https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder) feature available in 🤗 Datasets. Simply do the following: ```python from datasets import load_dataset # example 1: local folder dataset = load_dataset("imagefolder", data_dir="path_to_your_folder") # example 2: local files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="path_to_zip_file") # example 3: remote files (supported formats are tar, gzip, zip, xz, rar, zstd) dataset = load_dataset("imagefolder", data_files="https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip") # example 4: providing several splits dataset = load_dataset("imagefolder", data_files={"train": ["path/to/file1", "path/to/file2"], "test": ["path/to/file3", "path/to/file4"]}) ``` `ImageFolder` will create an `image` column containing the PIL-encoded images. Next, push it to the hub! ```python # assuming you have ran the huggingface-cli login command in a terminal dataset.push_to_hub("name_of_your_dataset") # if you want to push to a private repo, simply pass private=True: dataset.push_to_hub("name_of_your_dataset", private=True) ``` and that's it! You can now train your model by simply setting the `--dataset_name` argument to the name of your dataset on the hub. More on this can also be found in [this blog post](https://huggingface.co/blog/image-search-datasets).
diffusers/examples/unconditional_image_generation/README.md/0
{ "file_path": "diffusers/examples/unconditional_image_generation/README.md", "repo_id": "diffusers", "token_count": 1939 }
115
import argparse import torch from diffusers import MotionAdapter def convert_motion_module(original_state_dict): converted_state_dict = {} for k, v in original_state_dict.items(): if "pos_encoder" in k: continue else: converted_state_dict[ k.replace(".norms.0", ".norm1") .replace(".norms.1", ".norm2") .replace(".ff_norm", ".norm3") .replace(".attention_blocks.0", ".attn1") .replace(".attention_blocks.1", ".attn2") .replace(".temporal_transformer", "") ] = v return converted_state_dict def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) parser.add_argument("--use_motion_mid_block", action="store_true") parser.add_argument("--motion_max_seq_length", type=int, default=32) parser.add_argument("--save_fp16", action="store_true") return parser.parse_args() if __name__ == "__main__": args = get_args() state_dict = torch.load(args.ckpt_path, map_location="cpu") if "state_dict" in state_dict.keys(): state_dict = state_dict["state_dict"] conv_state_dict = convert_motion_module(state_dict) adapter = MotionAdapter( use_motion_mid_block=args.use_motion_mid_block, motion_max_seq_length=args.motion_max_seq_length ) # skip loading position embeddings adapter.load_state_dict(conv_state_dict, strict=False) adapter.save_pretrained(args.output_path) if args.save_fp16: adapter.to(torch.float16).save_pretrained(args.output_path, variant="fp16")
diffusers/scripts/convert_animatediff_motion_module_to_diffusers.py/0
{ "file_path": "diffusers/scripts/convert_animatediff_motion_module_to_diffusers.py", "repo_id": "diffusers", "token_count": 731 }
116
# Run this script to convert the Stable Cascade model weights to a diffusers pipeline. import argparse from contextlib import nullcontext import torch from safetensors.torch import load_file from transformers import ( AutoTokenizer, CLIPConfig, CLIPImageProcessor, CLIPTextModelWithProjection, CLIPVisionModelWithProjection, ) from diffusers import ( DDPMWuerstchenScheduler, StableCascadeCombinedPipeline, StableCascadeDecoderPipeline, StableCascadePriorPipeline, ) from diffusers.loaders.single_file_utils import convert_stable_cascade_unet_single_file_to_diffusers from diffusers.models import StableCascadeUNet from diffusers.models.modeling_utils import load_model_dict_into_meta from diffusers.pipelines.wuerstchen import PaellaVQModel from diffusers.utils import is_accelerate_available if is_accelerate_available(): from accelerate import init_empty_weights parser = argparse.ArgumentParser(description="Convert Stable Cascade model weights to a diffusers pipeline") parser.add_argument("--model_path", type=str, help="Location of Stable Cascade weights") parser.add_argument("--stage_c_name", type=str, default="stage_c.safetensors", help="Name of stage c checkpoint file") parser.add_argument("--stage_b_name", type=str, default="stage_b.safetensors", help="Name of stage b checkpoint file") parser.add_argument("--skip_stage_c", action="store_true", help="Skip converting stage c") parser.add_argument("--skip_stage_b", action="store_true", help="Skip converting stage b") parser.add_argument("--use_safetensors", action="store_true", help="Use SafeTensors for conversion") parser.add_argument( "--prior_output_path", default="stable-cascade-prior", type=str, help="Hub organization to save the pipelines to" ) parser.add_argument( "--decoder_output_path", type=str, default="stable-cascade-decoder", help="Hub organization to save the pipelines to", ) parser.add_argument( "--combined_output_path", type=str, default="stable-cascade-combined", help="Hub organization to save the pipelines to", ) parser.add_argument("--save_combined", action="store_true") parser.add_argument("--push_to_hub", action="store_true", help="Push to hub") parser.add_argument("--variant", type=str, help="Set to bf16 to save bfloat16 weights") args = parser.parse_args() if args.skip_stage_b and args.skip_stage_c: raise ValueError("At least one stage should be converted") if (args.skip_stage_b or args.skip_stage_c) and args.save_combined: raise ValueError("Cannot skip stages when creating a combined pipeline") model_path = args.model_path device = "cpu" if args.variant == "bf16": dtype = torch.bfloat16 else: dtype = torch.float32 # set paths to model weights prior_checkpoint_path = f"{model_path}/{args.stage_c_name}" decoder_checkpoint_path = f"{model_path}/{args.stage_b_name}" # Clip Text encoder and tokenizer config = CLIPConfig.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") config.text_config.projection_dim = config.projection_dim text_encoder = CLIPTextModelWithProjection.from_pretrained( "laion/CLIP-ViT-bigG-14-laion2B-39B-b160k", config=config.text_config ) tokenizer = AutoTokenizer.from_pretrained("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") # image processor feature_extractor = CLIPImageProcessor() image_encoder = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") # scheduler for prior and decoder scheduler = DDPMWuerstchenScheduler() ctx = init_empty_weights if is_accelerate_available() else nullcontext if not args.skip_stage_c: # Prior if args.use_safetensors: prior_orig_state_dict = load_file(prior_checkpoint_path, device=device) else: prior_orig_state_dict = torch.load(prior_checkpoint_path, map_location=device) prior_state_dict = convert_stable_cascade_unet_single_file_to_diffusers(prior_orig_state_dict) with ctx(): prior_model = StableCascadeUNet( in_channels=16, out_channels=16, timestep_ratio_embedding_dim=64, patch_size=1, conditioning_dim=2048, block_out_channels=[2048, 2048], num_attention_heads=[32, 32], down_num_layers_per_block=[8, 24], up_num_layers_per_block=[24, 8], down_blocks_repeat_mappers=[1, 1], up_blocks_repeat_mappers=[1, 1], block_types_per_layer=[ ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], clip_text_in_channels=1280, clip_text_pooled_in_channels=1280, clip_image_in_channels=768, clip_seq=4, kernel_size=3, dropout=[0.1, 0.1], self_attn=True, timestep_conditioning_type=["sca", "crp"], switch_level=[False], ) if is_accelerate_available(): load_model_dict_into_meta(prior_model, prior_state_dict) else: prior_model.load_state_dict(prior_state_dict) # Prior pipeline prior_pipeline = StableCascadePriorPipeline( prior=prior_model, tokenizer=tokenizer, text_encoder=text_encoder, image_encoder=image_encoder, scheduler=scheduler, feature_extractor=feature_extractor, ) prior_pipeline.to(dtype).save_pretrained( args.prior_output_path, push_to_hub=args.push_to_hub, variant=args.variant ) if not args.skip_stage_b: # Decoder if args.use_safetensors: decoder_orig_state_dict = load_file(decoder_checkpoint_path, device=device) else: decoder_orig_state_dict = torch.load(decoder_checkpoint_path, map_location=device) decoder_state_dict = convert_stable_cascade_unet_single_file_to_diffusers(decoder_orig_state_dict) with ctx(): decoder = StableCascadeUNet( in_channels=4, out_channels=4, timestep_ratio_embedding_dim=64, patch_size=2, conditioning_dim=1280, block_out_channels=[320, 640, 1280, 1280], down_num_layers_per_block=[2, 6, 28, 6], up_num_layers_per_block=[6, 28, 6, 2], down_blocks_repeat_mappers=[1, 1, 1, 1], up_blocks_repeat_mappers=[3, 3, 2, 2], num_attention_heads=[0, 0, 20, 20], block_types_per_layer=[ ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ["SDCascadeResBlock", "SDCascadeTimestepBlock", "SDCascadeAttnBlock"], ], clip_text_pooled_in_channels=1280, clip_seq=4, effnet_in_channels=16, pixel_mapper_in_channels=3, kernel_size=3, dropout=[0, 0, 0.1, 0.1], self_attn=True, timestep_conditioning_type=["sca"], ) if is_accelerate_available(): load_model_dict_into_meta(decoder, decoder_state_dict) else: decoder.load_state_dict(decoder_state_dict) # VQGAN from Wuerstchen-V2 vqmodel = PaellaVQModel.from_pretrained("warp-ai/wuerstchen", subfolder="vqgan") # Decoder pipeline decoder_pipeline = StableCascadeDecoderPipeline( decoder=decoder, text_encoder=text_encoder, tokenizer=tokenizer, vqgan=vqmodel, scheduler=scheduler ) decoder_pipeline.to(dtype).save_pretrained( args.decoder_output_path, push_to_hub=args.push_to_hub, variant=args.variant ) if args.save_combined: # Stable Cascade combined pipeline stable_cascade_pipeline = StableCascadeCombinedPipeline( # Decoder text_encoder=text_encoder, tokenizer=tokenizer, decoder=decoder, scheduler=scheduler, vqgan=vqmodel, # Prior prior_text_encoder=text_encoder, prior_tokenizer=tokenizer, prior_prior=prior_model, prior_scheduler=scheduler, prior_image_encoder=image_encoder, prior_feature_extractor=feature_extractor, ) stable_cascade_pipeline.to(dtype).save_pretrained( args.combined_output_path, push_to_hub=args.push_to_hub, variant=args.variant )
diffusers/scripts/convert_stable_cascade.py/0
{ "file_path": "diffusers/scripts/convert_stable_cascade.py", "repo_id": "diffusers", "token_count": 3605 }
117
import argparse import json import os from datetime import date from pathlib import Path from slack_sdk import WebClient from tabulate import tabulate MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters parser = argparse.ArgumentParser() parser.add_argument("--slack_channel_name", default="diffusers-ci-nightly") def main(slack_channel_name=None): failed = [] passed = [] group_info = [] total_num_failed = 0 empty_file = False or len(list(Path().glob("*.log"))) == 0 total_empty_files = [] for log in Path().glob("*.log"): section_num_failed = 0 i = 0 with open(log) as f: for line in f: line = json.loads(line) i += 1 if line.get("nodeid", "") != "": test = line["nodeid"] if line.get("duration", None) is not None: duration = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split("_")[0]]) total_num_failed += 1 else: passed.append([test, duration, log.name.split("_")[0]]) empty_file = i == 0 group_info.append([str(log), section_num_failed, failed]) total_empty_files.append(empty_file) os.remove(log) failed = [] text = ( "🌞 There were no failures!" if not any(total_empty_files) else "Something went wrong there is at least one empty file - please check GH action results." ) no_error_payload = { "type": "section", "text": { "type": "plain_text", "text": text, "emoji": True, }, } message = "" payload = [ { "type": "header", "text": { "type": "plain_text", "text": "🤗 Results of the Diffusers scheduled nightly tests.", }, }, ] if total_num_failed > 0: for i, (name, num_failed, failed_tests) in enumerate(group_info): if num_failed > 0: if num_failed == 1: message += f"*{name}: {num_failed} failed test*\n" else: message += f"*{name}: {num_failed} failed tests*\n" failed_table = [] for test in failed_tests: failed_table.append(test[0].split("::")) failed_table = tabulate( failed_table, headers=["Test Location", "Test Case", "Test Name"], showindex="always", tablefmt="grid", maxcolwidths=[12, 12, 12], ) message += "\n```\n" + failed_table + "\n```" if total_empty_files[i]: message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n" print(f"### {message}") else: payload.append(no_error_payload) if len(message) > MAX_LEN_MESSAGE: print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}") message = message[:MAX_LEN_MESSAGE] + "..." if len(message) != 0: md_report = { "type": "section", "text": {"type": "mrkdwn", "text": message}, } payload.append(md_report) action_button = { "type": "section", "text": {"type": "mrkdwn", "text": "*For more details:*"}, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/diffusers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"Nightly test results for {date.today()}", }, ], } payload.append(date_report) print(payload) client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload) if __name__ == "__main__": args = parser.parse_args() main(args.slack_channel_name)
diffusers/scripts/log_reports.py/0
{ "file_path": "diffusers/scripts/log_reports.py", "repo_id": "diffusers", "token_count": 2322 }
118
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub.utils import validate_hf_hub_args from .single_file_utils import ( create_diffusers_vae_model_from_ldm, fetch_ldm_config_and_checkpoint, ) class FromOriginalVAEMixin: """ Load pretrained AutoencoderKL weights saved in the `.ckpt` or `.safetensors` format into a [`AutoencoderKL`]. """ @classmethod @validate_hf_hub_args def from_single_file(cls, pretrained_model_link_or_path, **kwargs): r""" Instantiate a [`AutoencoderKL`] from pretrained ControlNet weights saved in the original `.ckpt` or `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default. Parameters: pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A link to the `.ckpt` file (for example `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub. - A path to a *file* containing all pipeline weights. config_file (`str`, *optional*): Filepath to the configuration YAML file associated with the model. If not provided it will default to: https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml torch_dtype (`str` or `torch.dtype`, *optional*): Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the dtype is automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. cache_dir (`Union[str, os.PathLike]`, *optional*): Path to a directory where a downloaded pretrained model configuration is cached if the standard cache is not used. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to resume downloading the model weights and configuration files. If set to `False`, any incompletely downloaded files are deleted. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. local_files_only (`bool`, *optional*, defaults to `False`): Whether to only load local model weights and configuration files or not. If set to True, the model won't be downloaded from the Hub. token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from `diffusers-cli login` (stored in `~/.huggingface`) is used. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier allowed by Git. image_size (`int`, *optional*, defaults to 512): The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable Diffusion v2 base model. Use 768 for Stable Diffusion v2. scaling_factor (`float`, *optional*, defaults to 0.18215): The component-wise standard deviation of the trained latent space computed using the first batch of the training set. This is used to scale the latent space to have unit variance when training the diffusion model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load and saveable variables (for example the pipeline components of the specific pipeline class). The overwritten components are directly passed to the pipelines `__init__` method. See example below for more information. <Tip warning={true}> Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you're loading a VAE from SDXL or a Stable Diffusion v2 model or higher. </Tip> Examples: ```py from diffusers import AutoencoderKL url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" # can also be local file model = AutoencoderKL.from_single_file(url) ``` """ original_config_file = kwargs.pop("original_config_file", None) config_file = kwargs.pop("config_file", None) resume_download = kwargs.pop("resume_download", False) force_download = kwargs.pop("force_download", False) proxies = kwargs.pop("proxies", None) token = kwargs.pop("token", None) cache_dir = kwargs.pop("cache_dir", None) local_files_only = kwargs.pop("local_files_only", None) revision = kwargs.pop("revision", None) torch_dtype = kwargs.pop("torch_dtype", None) class_name = cls.__name__ if (config_file is not None) and (original_config_file is not None): raise ValueError( "You cannot pass both `config_file` and `original_config_file` to `from_single_file`. Please use only one of these arguments." ) original_config_file = original_config_file or config_file original_config, checkpoint = fetch_ldm_config_and_checkpoint( pretrained_model_link_or_path=pretrained_model_link_or_path, class_name=class_name, original_config_file=original_config_file, resume_download=resume_download, force_download=force_download, proxies=proxies, token=token, revision=revision, local_files_only=local_files_only, cache_dir=cache_dir, ) image_size = kwargs.pop("image_size", None) scaling_factor = kwargs.pop("scaling_factor", None) component = create_diffusers_vae_model_from_ldm( class_name, original_config, checkpoint, image_size=image_size, scaling_factor=scaling_factor, torch_dtype=torch_dtype, ) vae = component["vae"] if torch_dtype is not None: vae = vae.to(torch_dtype) return vae
diffusers/src/diffusers/loaders/autoencoder.py/0
{ "file_path": "diffusers/src/diffusers/loaders/autoencoder.py", "repo_id": "diffusers", "token_count": 3112 }
119
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import math import flax.linen as nn import jax import jax.numpy as jnp def _query_chunk_attention(query, key, value, precision, key_chunk_size: int = 4096): """Multi-head dot product attention with a limited number of queries.""" num_kv, num_heads, k_features = key.shape[-3:] v_features = value.shape[-1] key_chunk_size = min(key_chunk_size, num_kv) query = query / jnp.sqrt(k_features) @functools.partial(jax.checkpoint, prevent_cse=False) def summarize_chunk(query, key, value): attn_weights = jnp.einsum("...qhd,...khd->...qhk", query, key, precision=precision) max_score = jnp.max(attn_weights, axis=-1, keepdims=True) max_score = jax.lax.stop_gradient(max_score) exp_weights = jnp.exp(attn_weights - max_score) exp_values = jnp.einsum("...vhf,...qhv->...qhf", value, exp_weights, precision=precision) max_score = jnp.einsum("...qhk->...qh", max_score) return (exp_values, exp_weights.sum(axis=-1), max_score) def chunk_scanner(chunk_idx): # julienne key array key_chunk = jax.lax.dynamic_slice( operand=key, start_indices=[0] * (key.ndim - 3) + [chunk_idx, 0, 0], # [...,k,h,d] slice_sizes=list(key.shape[:-3]) + [key_chunk_size, num_heads, k_features], # [...,k,h,d] ) # julienne value array value_chunk = jax.lax.dynamic_slice( operand=value, start_indices=[0] * (value.ndim - 3) + [chunk_idx, 0, 0], # [...,v,h,d] slice_sizes=list(value.shape[:-3]) + [key_chunk_size, num_heads, v_features], # [...,v,h,d] ) return summarize_chunk(query, key_chunk, value_chunk) chunk_values, chunk_weights, chunk_max = jax.lax.map(f=chunk_scanner, xs=jnp.arange(0, num_kv, key_chunk_size)) global_max = jnp.max(chunk_max, axis=0, keepdims=True) max_diffs = jnp.exp(chunk_max - global_max) chunk_values *= jnp.expand_dims(max_diffs, axis=-1) chunk_weights *= max_diffs all_values = chunk_values.sum(axis=0) all_weights = jnp.expand_dims(chunk_weights, -1).sum(axis=0) return all_values / all_weights def jax_memory_efficient_attention( query, key, value, precision=jax.lax.Precision.HIGHEST, query_chunk_size: int = 1024, key_chunk_size: int = 4096 ): r""" Flax Memory-efficient multi-head dot product attention. https://arxiv.org/abs/2112.05682v2 https://github.com/AminRezaei0x443/memory-efficient-attention Args: query (`jnp.ndarray`): (batch..., query_length, head, query_key_depth_per_head) key (`jnp.ndarray`): (batch..., key_value_length, head, query_key_depth_per_head) value (`jnp.ndarray`): (batch..., key_value_length, head, value_depth_per_head) precision (`jax.lax.Precision`, *optional*, defaults to `jax.lax.Precision.HIGHEST`): numerical precision for computation query_chunk_size (`int`, *optional*, defaults to 1024): chunk size to divide query array value must divide query_length equally without remainder key_chunk_size (`int`, *optional*, defaults to 4096): chunk size to divide key and value array value must divide key_value_length equally without remainder Returns: (`jnp.ndarray`) with shape of (batch..., query_length, head, value_depth_per_head) """ num_q, num_heads, q_features = query.shape[-3:] def chunk_scanner(chunk_idx, _): # julienne query array query_chunk = jax.lax.dynamic_slice( operand=query, start_indices=([0] * (query.ndim - 3)) + [chunk_idx, 0, 0], # [...,q,h,d] slice_sizes=list(query.shape[:-3]) + [min(query_chunk_size, num_q), num_heads, q_features], # [...,q,h,d] ) return ( chunk_idx + query_chunk_size, # unused ignore it _query_chunk_attention( query=query_chunk, key=key, value=value, precision=precision, key_chunk_size=key_chunk_size ), ) _, res = jax.lax.scan( f=chunk_scanner, init=0, xs=None, length=math.ceil(num_q / query_chunk_size), # start counter # stop counter ) return jnp.concatenate(res, axis=-3) # fuse the chunked result back class FlaxAttention(nn.Module): r""" A Flax multi-head attention module as described in: https://arxiv.org/abs/1706.03762 Parameters: query_dim (:obj:`int`): Input hidden states dimension heads (:obj:`int`, *optional*, defaults to 8): Number of heads dim_head (:obj:`int`, *optional*, defaults to 64): Hidden states dimension inside each head dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ query_dim: int heads: int = 8 dim_head: int = 64 dropout: float = 0.0 use_memory_efficient_attention: bool = False split_head_dim: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): inner_dim = self.dim_head * self.heads self.scale = self.dim_head**-0.5 # Weights were exported with old names {to_q, to_k, to_v, to_out} self.query = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_q") self.key = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_k") self.value = nn.Dense(inner_dim, use_bias=False, dtype=self.dtype, name="to_v") self.proj_attn = nn.Dense(self.query_dim, dtype=self.dtype, name="to_out_0") self.dropout_layer = nn.Dropout(rate=self.dropout) def reshape_heads_to_batch_dim(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size) return tensor def reshape_batch_dim_to_heads(self, tensor): batch_size, seq_len, dim = tensor.shape head_size = self.heads tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) tensor = jnp.transpose(tensor, (0, 2, 1, 3)) tensor = tensor.reshape(batch_size // head_size, seq_len, dim * head_size) return tensor def __call__(self, hidden_states, context=None, deterministic=True): context = hidden_states if context is None else context query_proj = self.query(hidden_states) key_proj = self.key(context) value_proj = self.value(context) if self.split_head_dim: b = hidden_states.shape[0] query_states = jnp.reshape(query_proj, (b, -1, self.heads, self.dim_head)) key_states = jnp.reshape(key_proj, (b, -1, self.heads, self.dim_head)) value_states = jnp.reshape(value_proj, (b, -1, self.heads, self.dim_head)) else: query_states = self.reshape_heads_to_batch_dim(query_proj) key_states = self.reshape_heads_to_batch_dim(key_proj) value_states = self.reshape_heads_to_batch_dim(value_proj) if self.use_memory_efficient_attention: query_states = query_states.transpose(1, 0, 2) key_states = key_states.transpose(1, 0, 2) value_states = value_states.transpose(1, 0, 2) # this if statement create a chunk size for each layer of the unet # the chunk size is equal to the query_length dimension of the deepest layer of the unet flatten_latent_dim = query_states.shape[-3] if flatten_latent_dim % 64 == 0: query_chunk_size = int(flatten_latent_dim / 64) elif flatten_latent_dim % 16 == 0: query_chunk_size = int(flatten_latent_dim / 16) elif flatten_latent_dim % 4 == 0: query_chunk_size = int(flatten_latent_dim / 4) else: query_chunk_size = int(flatten_latent_dim) hidden_states = jax_memory_efficient_attention( query_states, key_states, value_states, query_chunk_size=query_chunk_size, key_chunk_size=4096 * 4 ) hidden_states = hidden_states.transpose(1, 0, 2) else: # compute attentions if self.split_head_dim: attention_scores = jnp.einsum("b t n h, b f n h -> b n f t", key_states, query_states) else: attention_scores = jnp.einsum("b i d, b j d->b i j", query_states, key_states) attention_scores = attention_scores * self.scale attention_probs = nn.softmax(attention_scores, axis=-1 if self.split_head_dim else 2) # attend to values if self.split_head_dim: hidden_states = jnp.einsum("b n f t, b t n h -> b f n h", attention_probs, value_states) b = hidden_states.shape[0] hidden_states = jnp.reshape(hidden_states, (b, -1, self.heads * self.dim_head)) else: hidden_states = jnp.einsum("b i j, b j d -> b i d", attention_probs, value_states) hidden_states = self.reshape_batch_dim_to_heads(hidden_states) hidden_states = self.proj_attn(hidden_states) return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxBasicTransformerBlock(nn.Module): r""" A Flax transformer block layer with `GLU` (Gated Linear Unit) activation function as described in: https://arxiv.org/abs/1706.03762 Parameters: dim (:obj:`int`): Inner hidden states dimension n_heads (:obj:`int`): Number of heads d_head (:obj:`int`): Hidden states dimension inside each head dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate only_cross_attention (`bool`, defaults to `False`): Whether to only apply cross attention. dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ dim: int n_heads: int d_head: int dropout: float = 0.0 only_cross_attention: bool = False dtype: jnp.dtype = jnp.float32 use_memory_efficient_attention: bool = False split_head_dim: bool = False def setup(self): # self attention (or cross_attention if only_cross_attention is True) self.attn1 = FlaxAttention( self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, self.split_head_dim, dtype=self.dtype, ) # cross attention self.attn2 = FlaxAttention( self.dim, self.n_heads, self.d_head, self.dropout, self.use_memory_efficient_attention, self.split_head_dim, dtype=self.dtype, ) self.ff = FlaxFeedForward(dim=self.dim, dropout=self.dropout, dtype=self.dtype) self.norm1 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) self.norm2 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) self.norm3 = nn.LayerNorm(epsilon=1e-5, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, context, deterministic=True): # self attention residual = hidden_states if self.only_cross_attention: hidden_states = self.attn1(self.norm1(hidden_states), context, deterministic=deterministic) else: hidden_states = self.attn1(self.norm1(hidden_states), deterministic=deterministic) hidden_states = hidden_states + residual # cross attention residual = hidden_states hidden_states = self.attn2(self.norm2(hidden_states), context, deterministic=deterministic) hidden_states = hidden_states + residual # feed forward residual = hidden_states hidden_states = self.ff(self.norm3(hidden_states), deterministic=deterministic) hidden_states = hidden_states + residual return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxTransformer2DModel(nn.Module): r""" A Spatial Transformer layer with Gated Linear Unit (GLU) activation function as described in: https://arxiv.org/pdf/1506.02025.pdf Parameters: in_channels (:obj:`int`): Input number of channels n_heads (:obj:`int`): Number of heads d_head (:obj:`int`): Hidden states dimension inside each head depth (:obj:`int`, *optional*, defaults to 1): Number of transformers block dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate use_linear_projection (`bool`, defaults to `False`): tbd only_cross_attention (`bool`, defaults to `False`): tbd dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` use_memory_efficient_attention (`bool`, *optional*, defaults to `False`): enable memory efficient attention https://arxiv.org/abs/2112.05682 split_head_dim (`bool`, *optional*, defaults to `False`): Whether to split the head dimension into a new axis for the self-attention computation. In most cases, enabling this flag should speed up the computation for Stable Diffusion 2.x and Stable Diffusion XL. """ in_channels: int n_heads: int d_head: int depth: int = 1 dropout: float = 0.0 use_linear_projection: bool = False only_cross_attention: bool = False dtype: jnp.dtype = jnp.float32 use_memory_efficient_attention: bool = False split_head_dim: bool = False def setup(self): self.norm = nn.GroupNorm(num_groups=32, epsilon=1e-5) inner_dim = self.n_heads * self.d_head if self.use_linear_projection: self.proj_in = nn.Dense(inner_dim, dtype=self.dtype) else: self.proj_in = nn.Conv( inner_dim, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) self.transformer_blocks = [ FlaxBasicTransformerBlock( inner_dim, self.n_heads, self.d_head, dropout=self.dropout, only_cross_attention=self.only_cross_attention, dtype=self.dtype, use_memory_efficient_attention=self.use_memory_efficient_attention, split_head_dim=self.split_head_dim, ) for _ in range(self.depth) ] if self.use_linear_projection: self.proj_out = nn.Dense(inner_dim, dtype=self.dtype) else: self.proj_out = nn.Conv( inner_dim, kernel_size=(1, 1), strides=(1, 1), padding="VALID", dtype=self.dtype, ) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, context, deterministic=True): batch, height, width, channels = hidden_states.shape residual = hidden_states hidden_states = self.norm(hidden_states) if self.use_linear_projection: hidden_states = hidden_states.reshape(batch, height * width, channels) hidden_states = self.proj_in(hidden_states) else: hidden_states = self.proj_in(hidden_states) hidden_states = hidden_states.reshape(batch, height * width, channels) for transformer_block in self.transformer_blocks: hidden_states = transformer_block(hidden_states, context, deterministic=deterministic) if self.use_linear_projection: hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch, height, width, channels) else: hidden_states = hidden_states.reshape(batch, height, width, channels) hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states + residual return self.dropout_layer(hidden_states, deterministic=deterministic) class FlaxFeedForward(nn.Module): r""" Flax module that encapsulates two Linear layers separated by a non-linearity. It is the counterpart of PyTorch's [`FeedForward`] class, with the following simplifications: - The activation function is currently hardcoded to a gated linear unit from: https://arxiv.org/abs/2002.05202 - `dim_out` is equal to `dim`. - The number of hidden dimensions is hardcoded to `dim * 4` in [`FlaxGELU`]. Parameters: dim (:obj:`int`): Inner hidden states dimension dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ dim: int dropout: float = 0.0 dtype: jnp.dtype = jnp.float32 def setup(self): # The second linear layer needs to be called # net_2 for now to match the index of the Sequential layer self.net_0 = FlaxGEGLU(self.dim, self.dropout, self.dtype) self.net_2 = nn.Dense(self.dim, dtype=self.dtype) def __call__(self, hidden_states, deterministic=True): hidden_states = self.net_0(hidden_states, deterministic=deterministic) hidden_states = self.net_2(hidden_states) return hidden_states class FlaxGEGLU(nn.Module): r""" Flax implementation of a Linear layer followed by the variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202. Parameters: dim (:obj:`int`): Input hidden states dimension dropout (:obj:`float`, *optional*, defaults to 0.0): Dropout rate dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32): Parameters `dtype` """ dim: int dropout: float = 0.0 dtype: jnp.dtype = jnp.float32 def setup(self): inner_dim = self.dim * 4 self.proj = nn.Dense(inner_dim * 2, dtype=self.dtype) self.dropout_layer = nn.Dropout(rate=self.dropout) def __call__(self, hidden_states, deterministic=True): hidden_states = self.proj(hidden_states) hidden_linear, hidden_gelu = jnp.split(hidden_states, 2, axis=2) return self.dropout_layer(hidden_linear * nn.gelu(hidden_gelu), deterministic=deterministic)
diffusers/src/diffusers/models/attention_flax.py/0
{ "file_path": "diffusers/src/diffusers/models/attention_flax.py", "repo_id": "diffusers", "token_count": 9031 }
120
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch - Flax general utilities.""" import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging logger = logging.get_logger(__name__) def rename_key(key): regex = r"\w+[.]\d+" pats = re.findall(regex, key) for pat in pats: key = key.replace(pat, "_".join(pat.split("."))) return key ##################### # PyTorch => Flax # ##################### # Adapted from https://github.com/huggingface/transformers/blob/c603c80f46881ae18b2ca50770ef65fa4033eacd/src/transformers/modeling_flax_pytorch_utils.py#L69 # and https://github.com/patil-suraj/stable-diffusion-jax/blob/main/stable_diffusion_jax/convert_diffusers_to_jax.py def rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict): """Rename PT weight names to corresponding Flax weight names and reshape tensor if necessary""" # conv norm or layer norm renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) # rename attention layers if len(pt_tuple_key) > 1: for rename_from, rename_to in ( ("to_out_0", "proj_attn"), ("to_k", "key"), ("to_v", "value"), ("to_q", "query"), ): if pt_tuple_key[-2] == rename_from: weight_name = pt_tuple_key[-1] weight_name = "kernel" if weight_name == "weight" else weight_name renamed_pt_tuple_key = pt_tuple_key[:-2] + (rename_to, weight_name) if renamed_pt_tuple_key in random_flax_state_dict: assert random_flax_state_dict[renamed_pt_tuple_key].shape == pt_tensor.T.shape return renamed_pt_tuple_key, pt_tensor.T if ( any("norm" in str_ for str_ in pt_tuple_key) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: renamed_pt_tuple_key = pt_tuple_key[:-1] + ("scale",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: pt_tuple_key = pt_tuple_key[:-1] + ("embedding",) return renamed_pt_tuple_key, pt_tensor # conv layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: pt_tensor = pt_tensor.transpose(2, 3, 1, 0) return renamed_pt_tuple_key, pt_tensor # linear layer renamed_pt_tuple_key = pt_tuple_key[:-1] + ("kernel",) if pt_tuple_key[-1] == "weight": pt_tensor = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight renamed_pt_tuple_key = pt_tuple_key[:-1] + ("weight",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias renamed_pt_tuple_key = pt_tuple_key[:-1] + ("bias",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def convert_pytorch_state_dict_to_flax(pt_state_dict, flax_model, init_key=42): # Step 1: Convert pytorch tensor to numpy pt_state_dict = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params random_flax_params = flax_model.init_weights(PRNGKey(init_key)) random_flax_state_dict = flatten_dict(random_flax_params) flax_state_dict = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): renamed_pt_key = rename_key(pt_key) pt_tuple_key = tuple(renamed_pt_key.split(".")) # Correctly rename weight parameters flax_key, flax_tensor = rename_key_and_reshape_tensor(pt_tuple_key, pt_tensor, random_flax_state_dict) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape " f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." ) # also add unexpected weight so that warning is thrown flax_state_dict[flax_key] = jnp.asarray(flax_tensor) return unflatten_dict(flax_state_dict)
diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py/0
{ "file_path": "diffusers/src/diffusers/models/modeling_flax_pytorch_utils.py", "repo_id": "diffusers", "token_count": 2325 }
121
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...utils import BaseOutput, deprecate, is_torch_version, logging from ..attention import BasicTransformerBlock from ..embeddings import ImagePositionalEmbeddings, PatchEmbed, PixArtAlphaTextProjection from ..modeling_utils import ModelMixin from ..normalization import AdaLayerNormSingle logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class Transformer2DModelOutput(BaseOutput): """ The output of [`Transformer2DModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability distributions for the unnoised latent pixels. """ sample: torch.FloatTensor class Transformer2DModel(ModelMixin, ConfigMixin): """ A 2D Transformer model for image-like data. Parameters: num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. in_channels (`int`, *optional*): The number of channels in the input and output (specify if the input is **continuous**). num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use. sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**). This is fixed during training since it is used to learn a number of position embeddings. num_vector_embeds (`int`, *optional*): The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**). Includes the class for the masked latent pixel. activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward. num_embeds_ada_norm ( `int`, *optional*): The number of diffusion steps used during training. Pass if at least one of the norm_layers is `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`. attention_bias (`bool`, *optional*): Configure if the `TransformerBlocks` attention should contain a bias parameter. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, num_attention_heads: int = 16, attention_head_dim: int = 88, in_channels: Optional[int] = None, out_channels: Optional[int] = None, num_layers: int = 1, dropout: float = 0.0, norm_num_groups: int = 32, cross_attention_dim: Optional[int] = None, attention_bias: bool = False, sample_size: Optional[int] = None, num_vector_embeds: Optional[int] = None, patch_size: Optional[int] = None, activation_fn: str = "geglu", num_embeds_ada_norm: Optional[int] = None, use_linear_projection: bool = False, only_cross_attention: bool = False, double_self_attention: bool = False, upcast_attention: bool = False, norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen' norm_elementwise_affine: bool = True, norm_eps: float = 1e-5, attention_type: str = "default", caption_channels: int = None, interpolation_scale: float = None, ): super().__init__() if patch_size is not None: if norm_type not in ["ada_norm", "ada_norm_zero", "ada_norm_single"]: raise NotImplementedError( f"Forward pass is not implemented when `patch_size` is not None and `norm_type` is '{norm_type}'." ) elif norm_type in ["ada_norm", "ada_norm_zero"] and num_embeds_ada_norm is None: raise ValueError( f"When using a `patch_size` and this `norm_type` ({norm_type}), `num_embeds_ada_norm` cannot be None." ) self.use_linear_projection = use_linear_projection self.num_attention_heads = num_attention_heads self.attention_head_dim = attention_head_dim inner_dim = num_attention_heads * attention_head_dim conv_cls = nn.Conv2d linear_cls = nn.Linear # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` # Define whether input is continuous or discrete depending on configuration self.is_input_continuous = (in_channels is not None) and (patch_size is None) self.is_input_vectorized = num_vector_embeds is not None self.is_input_patches = in_channels is not None and patch_size is not None if norm_type == "layer_norm" and num_embeds_ada_norm is not None: deprecation_message = ( f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" " incorrectly set to `'layer_norm'`. Make sure to set `norm_type` to `'ada_norm'` in the config." " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" " would be very nice if you could open a Pull request for the `transformer/config.json` file" ) deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) norm_type = "ada_norm" if self.is_input_continuous and self.is_input_vectorized: raise ValueError( f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" " sure that either `in_channels` or `num_vector_embeds` is None." ) elif self.is_input_vectorized and self.is_input_patches: raise ValueError( f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" " sure that either `num_vector_embeds` or `num_patches` is None." ) elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: raise ValueError( f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." ) # 2. Define input layers if self.is_input_continuous: self.in_channels = in_channels self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) if use_linear_projection: self.proj_in = linear_cls(in_channels, inner_dim) else: self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" self.height = sample_size self.width = sample_size self.num_vector_embeds = num_vector_embeds self.num_latent_pixels = self.height * self.width self.latent_image_embedding = ImagePositionalEmbeddings( num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width ) elif self.is_input_patches: assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size" self.height = sample_size self.width = sample_size self.patch_size = patch_size interpolation_scale = ( interpolation_scale if interpolation_scale is not None else max(self.config.sample_size // 64, 1) ) self.pos_embed = PatchEmbed( height=sample_size, width=sample_size, patch_size=patch_size, in_channels=in_channels, embed_dim=inner_dim, interpolation_scale=interpolation_scale, ) # 3. Define transformers blocks self.transformer_blocks = nn.ModuleList( [ BasicTransformerBlock( inner_dim, num_attention_heads, attention_head_dim, dropout=dropout, cross_attention_dim=cross_attention_dim, activation_fn=activation_fn, num_embeds_ada_norm=num_embeds_ada_norm, attention_bias=attention_bias, only_cross_attention=only_cross_attention, double_self_attention=double_self_attention, upcast_attention=upcast_attention, norm_type=norm_type, norm_elementwise_affine=norm_elementwise_affine, norm_eps=norm_eps, attention_type=attention_type, ) for d in range(num_layers) ] ) # 4. Define output layers self.out_channels = in_channels if out_channels is None else out_channels if self.is_input_continuous: # TODO: should use out_channels for continuous projections if use_linear_projection: self.proj_out = linear_cls(inner_dim, in_channels) else: self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) elif self.is_input_vectorized: self.norm_out = nn.LayerNorm(inner_dim) self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1) elif self.is_input_patches and norm_type != "ada_norm_single": self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim) self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) elif self.is_input_patches and norm_type == "ada_norm_single": self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5) self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) # 5. PixArt-Alpha blocks. self.adaln_single = None self.use_additional_conditions = False if norm_type == "ada_norm_single": self.use_additional_conditions = self.config.sample_size == 128 # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use # additional conditions until we find better name self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions) self.caption_projection = None if caption_channels is not None: self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim) self.gradient_checkpointing = False def _set_gradient_checkpointing(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value def forward( self, hidden_states: torch.Tensor, encoder_hidden_states: Optional[torch.Tensor] = None, timestep: Optional[torch.LongTensor] = None, added_cond_kwargs: Dict[str, torch.Tensor] = None, class_labels: Optional[torch.LongTensor] = None, cross_attention_kwargs: Dict[str, Any] = None, attention_mask: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, return_dict: bool = True, ): """ The [`Transformer2DModel`] forward method. Args: hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): Input `hidden_states`. encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): Conditional embeddings for cross attention layer. If not given, cross-attention defaults to self-attention. timestep ( `torch.LongTensor`, *optional*): Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in `AdaLayerZeroNorm`. cross_attention_kwargs ( `Dict[str, Any]`, *optional*): A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under `self.processor` in [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). attention_mask ( `torch.Tensor`, *optional*): An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large negative values to the attention scores corresponding to "discard" tokens. encoder_attention_mask ( `torch.Tensor`, *optional*): Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: * Mask `(batch, sequence_length)` True = keep, False = discard. * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format above. This bias will be added to the cross-attention scores. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. Returns: If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a `tuple` where the first element is the sample tensor. """ if cross_attention_kwargs is not None: if cross_attention_kwargs.get("scale", None) is not None: logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.") # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. # expects mask of shape: # [batch, key_tokens] # adds singleton query_tokens dimension: # [batch, 1, key_tokens] # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) if attention_mask is not None and attention_mask.ndim == 2: # assume that mask is expressed as: # (1 = keep, 0 = discard) # convert mask into a bias that can be added to attention scores: # (keep = +0, discard = -10000.0) attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 attention_mask = attention_mask.unsqueeze(1) # convert encoder_attention_mask to a bias the same way we do for attention_mask if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0 encoder_attention_mask = encoder_attention_mask.unsqueeze(1) # 1. Input if self.is_input_continuous: batch, _, height, width = hidden_states.shape residual = hidden_states hidden_states = self.norm(hidden_states) if not self.use_linear_projection: hidden_states = self.proj_in(hidden_states) inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) else: inner_dim = hidden_states.shape[1] hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) hidden_states = self.proj_in(hidden_states) elif self.is_input_vectorized: hidden_states = self.latent_image_embedding(hidden_states) elif self.is_input_patches: height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size hidden_states = self.pos_embed(hidden_states) if self.adaln_single is not None: if self.use_additional_conditions and added_cond_kwargs is None: raise ValueError( "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`." ) batch_size = hidden_states.shape[0] timestep, embedded_timestep = self.adaln_single( timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype ) # 2. Blocks if self.caption_projection is not None: batch_size = hidden_states.shape[0] encoder_hidden_states = self.caption_projection(encoder_hidden_states) encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1]) for block in self.transformer_blocks: if self.training and self.gradient_checkpointing: def create_custom_forward(module, return_dict=None): def custom_forward(*inputs): if return_dict is not None: return module(*inputs, return_dict=return_dict) else: return module(*inputs) return custom_forward ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, timestep, cross_attention_kwargs, class_labels, **ckpt_kwargs, ) else: hidden_states = block( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, timestep=timestep, cross_attention_kwargs=cross_attention_kwargs, class_labels=class_labels, ) # 3. Output if self.is_input_continuous: if not self.use_linear_projection: hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() hidden_states = self.proj_out(hidden_states) else: hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() output = hidden_states + residual elif self.is_input_vectorized: hidden_states = self.norm_out(hidden_states) logits = self.out(hidden_states) # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) logits = logits.permute(0, 2, 1) # log(p(x_0)) output = F.log_softmax(logits.double(), dim=1).float() if self.is_input_patches: if self.config.norm_type != "ada_norm_single": conditioning = self.transformer_blocks[0].norm1.emb( timestep, class_labels, hidden_dtype=hidden_states.dtype ) shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] hidden_states = self.proj_out_2(hidden_states) elif self.config.norm_type == "ada_norm_single": shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1) hidden_states = self.norm_out(hidden_states) # Modulation hidden_states = hidden_states * (1 + scale) + shift hidden_states = self.proj_out(hidden_states) hidden_states = hidden_states.squeeze(1) # unpatchify if self.adaln_single is None: height = width = int(hidden_states.shape[1] ** 0.5) hidden_states = hidden_states.reshape( shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) ) hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) output = hidden_states.reshape( shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) ) if not return_dict: return (output,) return Transformer2DModelOutput(sample=output)
diffusers/src/diffusers/models/transformers/transformer_2d.py/0
{ "file_path": "diffusers/src/diffusers/models/transformers/transformer_2d.py", "repo_id": "diffusers", "token_count": 10660 }
122