response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Configures and builds TVM.
def build_tvm(llvm_config_path): """Configures and builds TVM.""" os.chdir("3rdparty/tvm") if not os.path.exists("build"): os.makedirs("build") os.chdir("build") # Copy the config.cmake as a baseline if not os.path.exists("config.cmake"): shutil.copy("../cmake/config.cmake", "config.cmake") # Set LLVM path and enable CUDA in config.cmake with open("config.cmake", "a") as config_file: config_file.write(f"set(USE_LLVM {llvm_config_path})\n") config_file.write("set(USE_CUDA ON)\n") # Run CMake and make try: subprocess.check_call(["cmake", ".."]) subprocess.check_call(["make", "-j"]) except subprocess.CalledProcessError as error: raise RuntimeError("Failed to build TVM") from error finally: # Go back to the original directory os.chdir("../../..")
Downloads and extracts LLVM, then configures TVM to use it.
def setup_llvm_for_tvm(): """Downloads and extracts LLVM, then configures TVM to use it.""" # Assume the download_and_extract_llvm function and its dependencies are defined elsewhere in this script extract_path = download_and_extract_llvm(LLVM_VERSION, IS_AARCH64, EXTRACT_PATH) llvm_config_path = os.path.join(extract_path, "bin", "llvm-config") return extract_path, llvm_config_path
Rotates half the hidden dims of the input.
def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1)
Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. position_ids (`torch.Tensor`, *optional*): Deprecated and unused. unsqueeze_dim (`int`, *optional*, defaults to 1): The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. Returns: `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. """ cos = cos.unsqueeze(unsqueeze_dim) sin = sin.unsqueeze(unsqueeze_dim) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_heads, slen, head_dim = hidden_states.shape if n_rep == 1: return hidden_states hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
Set the logging level for the module's logger. Args: level (str or int): Can be the string name of the level (e.g., 'INFO') or the actual level (e.g., logging.INFO).
def set_log_level(level): """ Set the logging level for the module's logger. Args: level (str or int): Can be the string name of the level (e.g., 'INFO') or the actual level (e.g., logging.INFO). """ if isinstance(level, str): level = getattr(logging, level.upper(), logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(level)
Initialize the logger specific for this module with custom settings and a Tqdm-based handler.
def _init_logger(): """ Initialize the logger specific for this module with custom settings and a Tqdm-based handler. """ logger = logging.getLogger(__name__) handler = TqdmLoggingHandler() formatter = logging.Formatter( fmt="%(asctime)s [BitBLAS:%(levelname)s]: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) logger.addHandler(handler) logger.propagate = False set_log_level('WARNING')
Normalize the primfunc to normal form
def normalize_prim_func(sch: tir.Schedule) -> Optional[List[BlockInfo]]: """Normalize the primfunc to normal form""" try: result = _normalize_prim_func(sch) if result is None: return None except Exception: # pylint: disable=broad-except return None def _iter_kind(i: tir.IterVar) -> str: return { tir.IterVar.DataPar: "S", tir.IterVar.CommReduce: "R", }.get(i.iter_type, "O") blocks: List[BlockInfo] = [] for block, loops, iters, is_reduction in zip(*result): blocks.append( BlockInfo( name=sch.get(block).name_hint, iters=[ IterInfo( kind=_iter_kind(iter), # type: ignore var=iter.var, dom=iter.dom, loop_rv=loop, ) for loop, iter in zip(loops, iters) ], block_rv=block, reduction_block=is_reduction, )) return blocks
Collect the block iter variables used in the access region of a buffer region.
def collect_block_iter_vars_used_in_access_region(block: tir.Block, region: List[ir.Range]) -> Set[tir.Var]: """Collect the block iter variables used in the access region of a buffer region.""" tir_vars = set() for expr in region: if expr.extent != 1: continue tir_vars |= collect_vars_used_in_prim_expr(expr.min) tir_vars &= set(iter_var.var for iter_var in block.iter_vars) return tir_vars
Collect the variables used in the PrimExpr.
def collect_vars_used_in_prim_expr(expr: tir.PrimExpr) -> Set[tir.Var]: """Collect the variables used in the PrimExpr.""" tir_vars = set() def _collect_tir_var(expr): if isinstance(expr, tir.Var): tir_vars.add(expr) tir.stmt_functor.post_order_visit(expr, _collect_tir_var) return tir_vars
Detect the dominant read indices in the block.
def detect_dominant_read(block: tir.Block) -> tir.PrimExpr: """Detect the dominant read indices in the block.""" dominant_read = None num_read_iters = -1 for buffer_region in block.reads: tir_vars = collect_block_iter_vars_used_in_access_region(block, buffer_region.region) if num_read_iters < len(tir_vars): num_read_iters = len(tir_vars) dominant_read = buffer_region assert dominant_read is not None (result,) = dominant_read.buffer.offset_of([e.min for e in dominant_read.region]) return result
Check if the epilogue block is a broadcast pattern
def is_broadcast_epilogue( sch: tir.Schedule, block: tir.schedule.BlockRV, epilogue: tir.schedule.BlockRV, ) -> bool: """Check if the epilogue block is a broadcast pattern""" write_buffers = {r.buffer for r in sch.get(block).writes} epilogue_iters = {i.var: i for i in sch.get(epilogue).iter_vars if i.dom != 1} for buffer_region in sch.get(epilogue).reads: if buffer_region.buffer not in write_buffers: continue tir_vars = collect_block_iter_vars_used_in_access_region( sch.get(epilogue), buffer_region.region) if len(tir_vars) < len(epilogue_iters): return True return False
Get the target block from a schedule. Parameters ---------- sch : tir.Schedule The TIR schedule used to get target block. name : str The name of the target block. Returns ------- target_block : BlockRV The target block.
def get_block( sch: tir.Schedule, blocks: List[BlockInfo], name: str, ): """Get the target block from a schedule. Parameters ---------- sch : tir.Schedule The TIR schedule used to get target block. name : str The name of the target block. Returns ------- target_block : BlockRV The target block. """ target_block: tir.BlockRV = None for block_info in blocks: block = block_info.block_rv if sch.get(block).name_hint == name: target_block = block return target_block
Get the output blocks of a schedule. Parameters ---------- sch : tir.Schedule The TIR schedule used to get output blocks. blocks : List[BlockInfo] The blocks to be analyzed. Returns ------- output_blocks : List[BlockInfo] The output blocks.
def get_output_blocks( sch: tir.Schedule, blocks: List[BlockInfo], ): """Get the output blocks of a schedule. Parameters ---------- sch : tir.Schedule The TIR schedule used to get output blocks. blocks : List[BlockInfo] The blocks to be analyzed. Returns ------- output_blocks : List[BlockInfo] The output blocks. """ # collect arguments buffer func = sch.mod["main"] args = list(func.buffer_map.values()) output_blocks = [] for block_info in blocks: block = block_info.block_rv for write in sch.get(block).writes: if write.buffer in args: output_blocks.append(block) return output_blocks
Try to inline as many blocks as possible, and return the remaining blocks. Parameters ---------- sch : tir.Schedule The TIR schedule used to inline blocks. blocks : List[BlockInfo] The blocks to be inlined. Returns ------- remaining : List[BlockInfo] The remaining blocks that cannot be inlined.
def try_inline( sch: tir.Schedule, blocks: List[BlockInfo], ) -> List[BlockInfo]: """Try to inline as many blocks as possible, and return the remaining blocks. Parameters ---------- sch : tir.Schedule The TIR schedule used to inline blocks. blocks : List[BlockInfo] The blocks to be inlined. Returns ------- remaining : List[BlockInfo] The remaining blocks that cannot be inlined. """ def _trial(func: Callable): for i, block in enumerate(blocks): try: func(block.block_rv) except Exception: # pylint: disable=bare-except continue return i return None while True: i = _trial(sch.compute_inline) if i is None: i = _trial(sch.reverse_compute_inline) if i is None: break blocks.pop(i) return blocks
Try to inline contiguous spatial blocks in a schedule Parameters ---------- sch : tir.Schedule The TIR schedule used to inline blocks. block_infos : List[BlockInfo] The blocks to be try. Returns ------- remaining : List[BlockInfo] The remaining blocks that cannot be inlined.
def try_inline_contiguous_spatial( sch: tir.Schedule, block_infos: List[BlockInfo], ) -> List[BlockInfo]: """Try to inline contiguous spatial blocks in a schedule Parameters ---------- sch : tir.Schedule The TIR schedule used to inline blocks. block_infos : List[BlockInfo] The blocks to be try. Returns ------- remaining : List[BlockInfo] The remaining blocks that cannot be inlined. """ if block_infos is None: return None results = [] spatial_blocks = [] block: BlockInfo for block in block_infos: if block.is_injective(): spatial_blocks.append(block) elif spatial_blocks: results.extend(try_inline(sch, spatial_blocks)) results.append(block) spatial_blocks = [] else: results.append(block) if spatial_blocks: results.extend(try_inline(sch, spatial_blocks)) return results
find rules: case 1. if the main block has no reduce op, then use the Elementwise rule. case 2. if the config enabled tensorcore, then use the TensorCore rule. case 3. if any([t > 1 for t in config.reduce_thread]), we should use the InnerThread Reduction Rule. case 4. else we should use general reduction rule.
def _apply_config( func: tir.PrimFunc, config=None, # todo(lei): update typing ) -> Optional[tir.Schedule]: """ find rules: case 1. if the main block has no reduce op, then use the Elementwise rule. case 2. if the config enabled tensorcore, then use the TensorCore rule. case 3. if any([t > 1 for t in config.reduce_thread]), we should use the InnerThread Reduction Rule. case 4. else we should use general reduction rule. """ logger.debug("Apply config {}".format(config)) sch = tir.Schedule(func) root_block = get_root_block(sch) blocks = sch.get_child_blocks(root_block) reduction_blocks = get_reduction_blocks(sch, blocks) if not reduction_blocks: return bitblas.gpu.ElementWise().apply_config(func, config) elif config.use_tc: if config.arch.sm_version >= 80: # For A100(sm_80) or more advanced gpu, use MMA tensorization. return bitblas.gpu.MatmulTensorizationMMA().apply_config(func, config) else: # For other GPUs, use WMMA tensorization. return bitblas.gpu.MatmulTensorizationWMMA().apply_config(func, config) else: _reduction_rules = [] _reduction_rules.append(bitblas.gpu.GEMV()) if not any([t > 1 for t in config.reduce_thread]): # Matrix multiplication template doesn't support inner thread reduction _reduction_rules.append(bitblas.gpu.Matmul()) _reduction_rules.append(bitblas.gpu.GeneralReduction()) for rule in _reduction_rules: sch = rule.apply_config(func, config) try: sch = rule.apply_config(func, config) except Exception as e_msg: logger.debug("Apply config failed: ", e_msg) continue if sch is not None: return sch return None
Check if the block is a GEMV. Parameters ---------- sch : tir.Schedule The schedule block_info : BlockInfo The block info to be checked Returns ------- ret : Optional[List[tir.Buffer]] The vector buffers used in the GEMV if it is a GEMV, otherwise None.
def is_gemv(sch: tir.Schedule, block_info: BlockInfo) -> Optional[List[tir.Buffer]]: """Check if the block is a GEMV. Parameters ---------- sch : tir.Schedule The schedule block_info : BlockInfo The block info to be checked Returns ------- ret : Optional[List[tir.Buffer]] The vector buffers used in the GEMV if it is a GEMV, otherwise None. """ block = block_info.block_rv block_stmt = sch.get(block) conditions = [] conditions.append(block_info.is_reduction()) conditions.append(len(block_stmt.reads) >= 2) conditions.append(len(block_stmt.writes) == 1) conditions.append(_get_reduction_expr(block_stmt) is not None) conditions.append( len(collect_block_iter_vars_used_in_access_region(block_stmt, block_stmt.writes[0].region)) > 0) if not all(conditions): return None iter_num = len(block_stmt.iter_vars) ret = [ read.buffer for read in block_stmt.reads if len(collect_block_iter_vars_used_in_access_region(block_stmt, read.region)) < iter_num and len(collect_block_iter_vars_used_in_access_region(block_stmt, read.region)) > 0 ] if len(ret) == len(block_stmt.reads): func = sch.mod["main"] opt_shapes: Dict = {} if "opt_shapes" in func.attrs: opt_shapes = func.attrs["opt_shapes"] # check with dynamic symbolic and at least one is unit if not all([opt_shapes.get(buf.name, (1,))[0] == 1 for buf in ret]): return None elif len(ret) == 0: return None return ret
Normalize the main block.
def normalize( sch: tir.Schedule, block_info: BlockInfo, ) -> Optional[bool]: """Normalize the main block.""" block_stmt: tir.Block = sch.get(block_info.block_rv) access = arith.normalize_to_iter_sum( detect_dominant_read(block_stmt), input_iters={i.var: i.dom for i in block_stmt.iter_vars}, ) buffers_use_vars = [ collect_block_iter_vars_used_in_access_region(block_stmt, buf.region) for buf in block_stmt.writes ] buffers_use_vars.extend([ collect_block_iter_vars_used_in_access_region(block_stmt, buf.region) for buf in block_stmt.reads ]) if collect_vars_used_in_prim_expr(access.base) & set( iter_var.var for iter_var in block_stmt.iter_vars): return None iter_to_info = {i.var: i for i in block_info.iters} batch_loops, s_loops, r_loops, c_loops = [], [], [], [] inner_axis = access.args[-1].source.source is_inner_reduction = iter_to_info[inner_axis].kind == "R" for split_expr in access.args: var = split_expr.source.source info = iter_to_info.get(var) loop = info.loop_rv is_reduction = info.kind == "R" if split_expr.lower_factor > 1: if c_loops: return None loop, c_loop = sch.split(loop, factors=[None, split_expr.lower_factor]) # we only support the reduction dim being grouped atm if not is_reduction: return None c_loops.append(c_loop) if is_reduction: r_loops.append(loop) elif all([var in buf_vars for buf_vars in buffers_use_vars]): batch_loops.append(loop) else: s_loops.append(loop) assert s_loops assert r_loops if not c_loops: c_loops = [sch.add_unit_loop(block_info.block_rv)] if not batch_loops: batch_loops = [sch.add_unit_loop(block_info.block_rv)] sch.reorder(*batch_loops, *s_loops, *r_loops, *c_loops) sch.fuse(*batch_loops) sch.fuse(*s_loops) sch.fuse(*r_loops) return is_inner_reduction
traverse to find the arg index from the buffer
def find_arg_idx_from_buffer_chain(sch: tir.Schedule, main_block: tir.schedule.BlockRV, buffer: tir.Buffer) -> int: """traverse to find the arg index from the buffer""" producers = sch.get_producers(main_block) # a head buffer has no producer blocks def find_args_index(sch: tir.Schedule, buffer: tir.Buffer): for i, param in enumerate(sch.mod["main"].params): if sch.mod["main"].buffer_map[param] == buffer: return i return None is_head_buffer = len(producers) == 0 if is_head_buffer: return find_args_index(sch, buffer) for block in sch.get_producers(main_block): if len(sch.get(block).reads) != 1 or len(sch.get(block).writes) != 1: continue for write in sch.get(block).writes: if write.buffer == buffer: return find_arg_idx_from_buffer_chain(sch, block, buffer) # if no buffer producer block found, it means the buffer is an input buffer return find_args_index(sch, buffer)
Detect iter traits based on the pattern C[S, I, J] += A[S, I, K] * B[S, J, K] Parameters ---------- block : tir.Block The block to be analyzed Returns ------- traits : Optional[Tuple[List[IterTrait]]] The detected iter traits for axes in A, B and C. None if the block does not match the pattern.
def detect_iter_traits(block: tir.Block) -> Optional[Tuple[List[IterTrait]]]: """Detect iter traits based on the pattern C[S, I, J] += A[S, I, K] * B[S, J, K] Parameters ---------- block : tir.Block The block to be analyzed Returns ------- traits : Optional[Tuple[List[IterTrait]]] The detected iter traits for axes in A, B and C. None if the block does not match the pattern. """ if len(block.reads) != 2 or len(block.writes) != 1: return None def get_access_axes(region: List[Range]) -> Set[Var]: axes: Set[Var] = set() for r in region: if not _is_one(r.extent): raise ValueError("Expect elemwise block access") axes = axes.union(set(undefined_vars(r.min))) return axes try: A_axes = get_access_axes(block.reads[0].region) B_axes = get_access_axes(block.reads[1].region) C_axes = get_access_axes(block.writes[0].region) except ValueError: return None traits: Dict[Var, IterTrait] = {} for iter_var in block.iter_vars: var = iter_var.var kind: IterKind if _is_one(iter_var.dom.extent): if iter_var.iter_type == tir.IterVar.CommReduce: # for simplified case (e.g. 1x1 conv kernel) kind = IterKind.kIter_K else: kind = IterKind.kIter_T elif iter_var.iter_type == iter_var.DataPar: if var in A_axes and var in B_axes and var in C_axes: kind = IterKind.kIter_S elif var in A_axes and var in C_axes: kind = IterKind.kIter_I elif var in B_axes and var in C_axes: kind = IterKind.kIter_J else: return None elif iter_var.iter_type == tir.IterVar.CommReduce: if var in A_axes and var in B_axes and var not in C_axes: kind = IterKind.kIter_K else: return None else: return None traits[var] = IterTrait(kind, iter_var.dom.extent) # A Gemm-kernel requires have I, J and K axes gemm_traits = {IterKind.kIter_I, IterKind.kIter_J, IterKind.kIter_K} if {x.kind for x in traits.values()}.intersection(gemm_traits) != gemm_traits: return None A_traits = [traits[iter_var.var] for iter_var in block.iter_vars if iter_var.var in A_axes] B_traits = [traits[iter_var.var] for iter_var in block.iter_vars if iter_var.var in B_axes] C_traits = [traits[iter_var.var] for iter_var in block.iter_vars if iter_var.var in C_axes] block_traits = [traits[i.var] for i in block.iter_vars] return A_traits, B_traits, C_traits, block_traits
Get index maps for the block Parameters ---------- block : tir.Block The block to be analyzed layout : List[str] the target layout index map to be used. 'n' for [i, k] layout 't' for [k, j] layout 'a' for auto inference based on whether the last axis is reduction. Returns ------- index_maps : Optional[Tuple[tir.IndexMap]] The index maps for the block, or None if the block is not a gemm-liked kernel
def get_index_map(block: tir.Block, layout: Optional[List[str]] = None) -> Optional[Tuple[tir.IndexMap, ...]]: """Get index maps for the block Parameters ---------- block : tir.Block The block to be analyzed layout : List[str] the target layout index map to be used. 'n' for [i, k] layout 't' for [k, j] layout 'a' for auto inference based on whether the last axis is reduction. Returns ------- index_maps : Optional[Tuple[tir.IndexMap]] The index maps for the block, or None if the block is not a gemm-liked kernel """ if layout is None: layout = ["n", "t", "n"] traits = detect_iter_traits(block) if traits is None: return None A_traits, B_traits, C_traits, block_traits = traits def get_ordered_axes(region: List[Range]) -> Set[Var]: axes: List[Var] = [] for r in region: if not _is_one(r.extent): raise ValueError("Expect elemwise block access") axes.append(r.min) return axes def is_common_reduce(var: Var) -> bool: for iter_var in block.iter_vars: if iter_var.var == var and iter_var.iter_type == IterVar.CommReduce: return True return False def check_last_trait(region: List[Range]): axes = get_ordered_axes(region) return is_common_reduce(axes[-1]) def infer_layout(layout: str, region: List[Range], kind: str = "A"): """ Infer the layout based on the region and the kind of buffer kind: "A", "B", "C" """ primary_iter, secondary_iter, reduction_iter = { "A": (IterKind.kIter_I, IterKind.kIter_K, IterKind.kIter_K), "B": (IterKind.kIter_K, IterKind.kIter_J, IterKind.kIter_K), "C": (IterKind.kIter_I, IterKind.kIter_J, None), }[kind] spatial_iter = { "A": IterKind.kIter_I, "B": IterKind.kIter_J, "C": None, }[kind] if layout == "n": return [IterKind.kIter_S, primary_iter, secondary_iter] elif layout == "t": return [IterKind.kIter_S, secondary_iter, primary_iter] elif layout == "a": # auto inference layout # for buffer with reduction axis, we put it as the last axis # otherwise, we put it as the first axis if kind == "C": return [IterKind.kIter_S, primary_iter, secondary_iter] else: return ([IterKind.kIter_S, spatial_iter, reduction_iter] if check_last_trait(region) else [IterKind.kIter_S, reduction_iter, spatial_iter]) else: raise ValueError(f"Unknown layout {layout}") A_index_map = make_iter_fusion_index_map( A_traits, infer_layout(layout[0], block.reads[0].region, kind="A")) B_index_map = make_iter_fusion_index_map( B_traits, infer_layout(layout[1], block.reads[1].region, kind="B")) C_index_map = make_iter_fusion_index_map( C_traits, infer_layout(layout[2], block.writes[0].region, kind="C")) matmul_index_map = make_iter_fusion_index_map( block_traits, [IterKind.kIter_S, IterKind.kIter_I, IterKind.kIter_J, IterKind.kIter_K], ) return ( matmul_index_map, A_index_map, B_index_map, C_index_map, )
Detect In/Out data types for the given block based on the analysis if read/write buffers.
def get_in_out_dtypes(block: tir.Block) -> Tuple[str]: """ Detect In/Out data types for the given block based on the analysis if read/write buffers. """ assert len(block.reads) > 0 and len(block.writes) > 0 in_dtype = block.reads[0].buffer.dtype out_dtype = block.writes[0].buffer.dtype return (in_dtype, out_dtype)
for layout transformed gemm, the index map should be 5d
def get_index_map_5d(index_map): """ for layout transformed gemm, the index map should be 5d """ def index_map_5d(b, i, j, ii, jj): return ( b, i, j, *index_map(ii, jj), ) return index_map_5d
for layout transformed gemm, the index map should be 5d
def get_index_map_5d(index_map): """ for layout transformed gemm, the index map should be 5d """ def index_map_5d(b, i, j, ii, jj): return ( b, i, j, *index_map(ii, jj), ) return index_map_5d
Get the maximum number of threads per block for a given target. Parameters ---------- target : Target The target to get the maximum number of threads per block for. Returns ------- max_threads_per_block : int The maximum number of threads per block for the given target.
def max_threads_per_block(target: Target) -> int: """Get the maximum number of threads per block for a given target. Parameters ---------- target : Target The target to get the maximum number of threads per block for. Returns ------- max_threads_per_block : int The maximum number of threads per block for the given target. """ for name in ["max_threads_per_block", "max_num_threads"]: result = target.attrs.get(name, None) if result is not None: return result if target.kind.name == "cuda": return 1024 return 256
loops extent is the number of elements to be decoded in one stage for memory friendly process, the loops_extent should be a multiple of (sizeof(int) // 8). However, for the case of int1b, it is not possible to decode 8 elements in one stage, so we have to use 16.
def get_fast_decode_intrin( source_bit=4, storage_dtype="int8", source_format="uint", target_dtype="float16", loops_extent=8, with_scale=False, with_zeros=False, zeros_mode="original", ): """ loops extent is the number of elements to be decoded in one stage for memory friendly process, the loops_extent should be a multiple of (sizeof(int) // 8). However, for the case of int1b, it is not possible to decode 8 elements in one stage, so we have to use 16. """ if target_dtype == "float16": d4f = "f16" elif target_dtype == "int8": d4f = "i8s" else: raise ValueError("Unsupported target dtype: {}".format(target_dtype)) source_symbol = "u" if source_format == "uint" else "s" func_name = "decode_i{}{}_to_{}".format(source_bit, source_symbol, d4f) if with_scale: func_name += "_scale" if with_zeros: func_name += f"_zeros_{zeros_mode}" assert storage_dtype in ["int8", "int32", "uint32"] storage_nbit = int("".join(c for c in storage_dtype if c.isdigit())) storage_type = str("".join(c for c in storage_dtype if not c.isdigit())) elem_per_unit = storage_nbit // source_bit n_storage_elems = loops_extent // elem_per_unit if with_zeros and zeros_mode == "quantized": decode_func = _tir_packed_to_unsigned_convert_with_zeros(storage_type, storage_nbit) elif source_format == "int": if source_bit == 1: decode_func = _tir_packed_int_to_int_convert(storage_type, storage_nbit) else: decode_func = _tir_packed_to_signed_convert(storage_type, storage_nbit) elif source_format == "uint": decode_func = _tir_packed_to_unsigned_convert(storage_type, storage_nbit) else: raise ValueError("Unsupported source_format: {}".format(source_format)) if with_scale is False: @T.prim_func def fast_decode_desc(compressed: T.handle, decompressed: T.handle) -> None: Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems]) T.writes(Decompressed[0:loops_extent]) for i in T.grid(loops_extent): with T.block("decode"): vi = T.axis.remap("S", [i]) Decompressed[vi] = decode_func( source_bit, Compressed[vi // elem_per_unit], vi % elem_per_unit, dtype=target_dtype, ) @T.prim_func def fast_decode_impl(compressed: T.handle, decompressed: T.handle) -> None: Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems]) T.writes(Decompressed[0:loops_extent]) T.call_extern( "handle", func_name, Compressed.data, Decompressed.data, loops_extent, ) elif with_zeros is False: @T.prim_func def fast_decode_desc(compressed: T.handle, decompressed: T.handle, scale: T.handle) -> None: Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, scope="global", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems], Scale[0:1]) T.writes(Decompressed[0:loops_extent]) for i in T.grid(loops_extent): with T.block("decode"): vi = T.axis.remap("S", [i]) Decompressed[vi] = ( decode_func( source_bit, Compressed[vi // elem_per_unit], vi % elem_per_unit, dtype=target_dtype, ) * Scale[0]) @T.prim_func def fast_decode_impl(compressed: T.handle, decompressed: T.handle, scale: T.handle) -> None: s0 = T.int32() Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, offset_factor=1, strides=[s0], scope="global", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems], Scale[0:1]) T.writes(Decompressed[0:loops_extent]) T.call_extern( "handle", func_name, Compressed.data, Decompressed.data, Scale.access_ptr("r"), loops_extent, ) elif zeros_mode == "quantized": def get_dequantize_buffers_list(weight, scale, zeros, zeros_mode="original"): if zeros_mode == "original": return [weight, zeros, scale] elif zeros_mode == "rescale": return [weight, scale, zeros] elif zeros_mode == "quantized": return [weight, zeros, scale] else: raise ValueError(f"Unsupported zeros_mode: {zeros_mode}") def get_dequantize_func(weight, scale, zeros, zeros_mode="original"): if zeros_mode == "original": return (weight - zeros) * scale elif zeros_mode == "rescale": return weight * scale - zeros elif zeros_mode == "quantized": return weight * scale else: raise ValueError(f"Unsupported zeros_mode: {zeros_mode}") # Scale with Zeros @T.prim_func def fast_decode_desc( compressed: T.handle, decompressed: T.handle, scale: T.handle, zeros: T.handle, ) -> None: Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, scope="local", ) Zeros = T.match_buffer( zeros, [ 1, ], dtype=storage_dtype, scope="local", ) with T.block("root"): T.reads(*get_dequantize_buffers_list( Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1], zeros_mode=zeros_mode, )) T.writes(Decompressed[0:loops_extent]) for i in T.grid(loops_extent): with T.block("decode"): vi = T.axis.remap("S", [i]) Decompressed[vi] = get_dequantize_func( decode_func( source_bit, Compressed[vi // elem_per_unit], vi % elem_per_unit, Zeros[0], dtype=target_dtype, ), Scale[0], Zeros[0], zeros_mode, ) @T.prim_func def fast_decode_impl( compressed: T.handle, decompressed: T.handle, scale: T.handle, zeros: T.handle, ) -> None: s0 = T.int32() s1 = T.int32() Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, offset_factor=1, strides=[s0], scope="local", ) Zeros = T.match_buffer( zeros, [ 1, ], dtype=storage_dtype, offset_factor=1, strides=[s1], scope="local", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1]) T.writes(Decompressed[0:loops_extent]) T.call_extern( "handle", func_name, Compressed.data, Decompressed.data, Scale.access_ptr("r"), Zeros.access_ptr("r"), loops_extent, ) else: def get_dequantize_buffers_list(weight, scale, zeros, zeros_mode="original"): if zeros_mode == "original": return [weight, zeros, scale] elif zeros_mode == "rescale": return [weight, scale, zeros] else: raise ValueError(f"Unsupported zeros_mode: {zeros_mode}") def get_dequantize_func(weight, scale, zeros, zeros_mode="original"): if zeros_mode == "original": return (weight - zeros) * scale elif zeros_mode == "rescale": return weight * scale - zeros else: raise ValueError(f"Unsupported zeros_mode: {zeros_mode}") # Scale with Zeros @T.prim_func def fast_decode_desc( compressed: T.handle, decompressed: T.handle, scale: T.handle, zeros: T.handle, ) -> None: Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, scope="global", ) Zeros = T.match_buffer( zeros, [ 1, ], dtype=target_dtype, scope="global", ) with T.block("root"): T.reads(*get_dequantize_buffers_list( Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1], zeros_mode=zeros_mode, )) T.writes(Decompressed[0:loops_extent]) for i in T.grid(loops_extent): with T.block("decode"): vi = T.axis.remap("S", [i]) Decompressed[vi] = get_dequantize_func( decode_func( source_bit, Compressed[vi // elem_per_unit], vi % elem_per_unit, dtype=target_dtype, ), Scale[0], Zeros[0], zeros_mode, ) @T.prim_func def fast_decode_impl( compressed: T.handle, decompressed: T.handle, scale: T.handle, zeros: T.handle, ) -> None: s0 = T.int32() s1 = T.int32() Compressed = T.match_buffer( compressed, [ n_storage_elems, ], dtype=storage_dtype, scope="local", ) Decompressed = T.match_buffer( decompressed, [ loops_extent, ], dtype=target_dtype, scope="local", ) Scale = T.match_buffer( scale, [ 1, ], dtype=target_dtype, offset_factor=1, strides=[s0], scope="global", ) Zeros = T.match_buffer( zeros, [ 1, ], dtype=target_dtype, offset_factor=1, strides=[s1], scope="global", ) with T.block("root"): T.reads(Compressed[0:n_storage_elems], Scale[0:1], Zeros[0:1]) T.writes(Decompressed[0:loops_extent]) T.call_extern( "handle", func_name, Compressed.data, Decompressed.data, Scale.access_ptr("r"), Zeros.access_ptr("r"), loops_extent, ) return fast_decode_desc, fast_decode_impl
This function is used to get the intrinsic group of the LOP3 operation to avoid the overhead of fast decoding. LOP3 is a type of logic operation that takes three inputs. The intrinsic group refers to the set of intrinsic operations that can be performed on these inputs. This function retrieves and returns this group. Parameters ---------- in_dtype : Literal["int8"] The data type of the input. It should be "int8". out_dtype : Literal["float16", "int8"] The data type of the output. It can be either "float16" or "int8". storage_nbit : int, optional The number of bits used for storage. By default, it is 4. with_scale : bool, optional A boolean parameter that indicates whether scaling should be applied. By default, it is False. Returns ------- Dict[str, str] A dictionary mapping the names of the intrinsics to their corresponding implementations.
def get_lop3_intrin_group( out_dtype: Literal["float16", "int8"], source_format: Literal["int", "uint"] = "uint", source_bit: int = 4, storage_dtype: Literal["int32", "int8"] = "int8", with_scaling: bool = False, with_zeros: bool = False, zeros_mode: Literal["original", "rescale", "quantized"] = "original", ) -> Dict[str, str]: """ This function is used to get the intrinsic group of the LOP3 operation to avoid the overhead of fast decoding. LOP3 is a type of logic operation that takes three inputs. The intrinsic group refers to the set of intrinsic operations that can be performed on these inputs. This function retrieves and returns this group. Parameters ---------- in_dtype : Literal["int8"] The data type of the input. It should be "int8". out_dtype : Literal["float16", "int8"] The data type of the output. It can be either "float16" or "int8". storage_nbit : int, optional The number of bits used for storage. By default, it is 4. with_scale : bool, optional A boolean parameter that indicates whether scaling should be applied. By default, it is False. Returns ------- Dict[str, str] A dictionary mapping the names of the intrinsics to their corresponding implementations. """ assert out_dtype in ["float16", "int8"] dtype_mapping = {"float16": "f16", "int8": "i8", "int32": "i32"} target_dtype = dtype_mapping[out_dtype] target_bits = tvm.DataType(out_dtype).bits loop_extent = 128 // target_bits if source_format not in ["int", "uint"]: raise ValueError("Invalid source_format. Expected 'int' or 'uint'.") source_symbol = "i" if source_format == "int" else "u" _intrin = f"lop3_fast_decode_{source_symbol}{source_bit}_to_{storage_dtype}_to_{target_dtype}_l{loop_extent}_" if with_scaling: _intrin += "scale_" if with_zeros: _intrin += f"zeros_{zeros_mode}_" import_c_map = { "i4_to_f16": decode_i4_to_f16, "i2_to_f16": decode_i2_to_f16, "i1_to_f16": decode_i1_to_f16, "i4_to_f16_scale": decode_i4_to_f16_scale, "i2_to_f16_scale": decode_i2_to_f16_scale, "i1_to_f16_scale": decode_i1_to_f16_scale, "i4_to_f16_scale_zeros_original": decode_i4_to_f16_scale_zeros_original, "i2_to_f16_scale_zeros_original": decode_i2_to_f16_scale_zeros_original, "i1_to_f16_scale_zeros_original": decode_i1_to_f16_scale_zeros_original, "i4_to_f16_scale_zeros_rescale": decode_i4_to_f16_scale_zeros_rescale, "i2_to_f16_scale_zeros_rescale": decode_i2_to_f16_scale_zeros_rescale, "i1_to_f16_scale_zeros_rescale": decode_i1_to_f16_scale_zeros_rescale, "i4_to_f16_scale_zeros_quantized": decode_i4_to_f16_scale_zeros_quantized, "i2_to_f16_scale_zeros_quantized": decode_i2_to_f16_scale_zeros_quantized, "i1_to_i8": decode_i1s_to_i8s, "i2_to_i8": decode_i2s_to_i8s, "i4_to_i8": decode_i4s_to_i8s, } key = f"i{source_bit}_to_{target_dtype}" if with_scaling: key += "_scale" if with_zeros: key += f"_zeros_{zeros_mode}" return { "c_source": import_c_map[key], "compute": _intrin, }
Detect In/Out data types for the given block based on the analysis if read/write buffers.
def get_in_out_dtypes(block: tir.Block) -> Tuple[str]: """ Detect In/Out data types for the given block based on the analysis if read/write buffers. """ assert len(block.reads) > 0 and len(block.writes) > 0 in_dtype = block.reads[0].buffer.dtype out_dtype = block.writes[0].buffer.dtype return (in_dtype, out_dtype)
Executes the 'nvidia-smi' command to fetch the name of the first available NVIDIA GPU. Returns: str: The name of the GPU, or None if 'nvidia-smi' command fails.
def get_gpu_model_from_nvidia_smi(): """ Executes the 'nvidia-smi' command to fetch the name of the first available NVIDIA GPU. Returns: str: The name of the GPU, or None if 'nvidia-smi' command fails. """ try: # Execute nvidia-smi command to get the GPU name output = subprocess.check_output( ["nvidia-smi", "--query-gpu=gpu_name", "--format=csv,noheader"], encoding="utf-8", ).strip() except subprocess.CalledProcessError as e: logger.info("nvidia-smi failed with error: %s", e) return None # Return the name of the first GPU if multiple are present return output.split("\n")[0]
Finds the best match for a query within a list of tags using fuzzy string matching.
def find_best_match(tags, query): """ Finds the best match for a query within a list of tags using fuzzy string matching. """ MATCH_THRESHOLD = 25 best_match, score = process.extractOne(query, tags) def check_target(best, default): return best if Target(best).arch == Target(default).arch else default if check_target(best_match, "cuda"): return best_match if score >= MATCH_THRESHOLD else "cuda" else: return "cuda"
Automatically detects the NVIDIA GPU architecture to set the appropriate TVM target. Returns: str: The detected TVM target architecture.
def auto_detect_nvidia_target() -> str: """ Automatically detects the NVIDIA GPU architecture to set the appropriate TVM target. Returns: str: The detected TVM target architecture. """ # Return a predefined target if specified in the environment variable # if "TVM_TARGET" in os.environ: # return os.environ["TVM_TARGET"] # Fetch all available tags and filter for NVIDIA tags all_tags = list_tags() nvidia_tags = [tag for tag in all_tags if "nvidia" in tag] # Get the current GPU model and find the best matching target gpu_model = get_gpu_model_from_nvidia_smi() target = find_best_match(nvidia_tags, gpu_model) if gpu_model else "cuda" return target
Lower the given IRModule and create a device module for the specified target. Parameters: - mod: The input IRModule. - target: The compilation target. Returns: - A device module ready for execution.
def get_annotated_device_mod(mod: IRModule, target: Target): """ Lower the given IRModule and create a device module for the specified target. Parameters: - mod: The input IRModule. - target: The compilation target. Returns: - A device module ready for execution. """ input_mod = lower(mod) target_input_mod = {target: input_mod} annotated_mods = {} runtime = None target_host = None for tgt, mod in target_input_mod.items(): if not isinstance(tgt, (str, Target)): raise ValueError("The key of inputs must be str or " "Target when inputs is dict.") if not isinstance(mod, tvm.IRModule): raise ValueError("inputs must be Schedule, IRModule, " "or dict of str to IRModule.") annotated_mods[tgt] = mod.with_attr("runtime", runtime) annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host) if not target_host: for tar, _ in annotated_mods.items(): device_type = ndarray.device(tar.kind.name, 0).device_type if device_type == ndarray.cpu(0).device_type: target_host = tar break if not target_host: target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm" annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host) for target, mod in annotated_mods.items(): mixed_mod_passes = tvm.get_global_func("driver.mixed_mod_passes") device_mod_passes = tvm.get_global_func("driver.device_mod_passes") mod = mixed_mod_passes(mod, target)(mod) device_mod = device_mod_passes(mod, target)(mod) return device_mod
Extracts the thread block and grid dimensions for the reduction block within a given IRModule. Parameters: - mod: The input IRModule from which to extract thread block and grid information. Returns: A tuple containing two lists: - The first list contains the dimensions of the thread block (threadIdx.x, threadIdx.y, threadIdx.z). - The second list contains the dimensions of the grid (blockIdx.x, blockIdx.y, blockIdx.z).
def get_thread_block_information(mod: IRModule): """ Extracts the thread block and grid dimensions for the reduction block within a given IRModule. Parameters: - mod: The input IRModule from which to extract thread block and grid information. Returns: A tuple containing two lists: - The first list contains the dimensions of the thread block (threadIdx.x, threadIdx.y, threadIdx.z). - The second list contains the dimensions of the grid (blockIdx.x, blockIdx.y, blockIdx.z). """ # Initialize the schedule from the IRModule sch = tvm.tir.Schedule(mod) # Get the root block and its child blocks root_block = sch.get_block("root") child_blocks = sch.get_child_blocks(root_block) # Initialize default block and grid dimensions (1, 1, 1) block_dims, grid_dims = [1, 1, 1], [1, 1, 1] for block in child_blocks: # Get the loops surrounding the main block loops = sch.get_loops(block) # Iterate over each loop to extract thread and block bindings for loop in loops: stmt = sch.get(loop) thread_binding = stmt.thread_binding extent = int(stmt.extent) # Skip loops without thread binding if thread_binding: if "threadIdx" in thread_binding.thread_tag: block_dims["xyz".index(thread_binding.thread_tag[-1])] = extent elif "blockIdx" in thread_binding.thread_tag: grid_dims["xyz".index(thread_binding.thread_tag[-1])] = extent return block_dims, grid_dims
From a dataframe create a parallel coordinate plot
def parallel_plot(df, color=None, palette=None): """From a dataframe create a parallel coordinate plot """ npts = df.shape[0] ndims = len(df.columns) if color is None: color = np.ones(npts) if palette is None: palette = ['#ff0000'] cmap = LinearColorMapper(high=color.min(), low=color.max(), palette=palette) data_source = ColumnDataSource(dict( xs=np.arange(ndims)[None, :].repeat(npts, axis=0).tolist(), ys=np.array((df-df.min())/(df.max()-df.min())).tolist(), color=color)) p = figure(x_range=(-1, ndims), y_range=(0, 1), width=1000, tools="pan, box_zoom") # Create x axis ticks from columns contained in dataframe fixed_x_ticks = FixedTicker( ticks=np.arange(ndims), minor_ticks=[]) formatter_x_ticks = CustomJSTickFormatter( code="return columns[index]", args={"columns": df.columns}) p.xaxis.ticker = fixed_x_ticks p.xaxis.formatter = formatter_x_ticks p.yaxis.visible = False p.y_range.start = 0 p.y_range.end = 1 p.y_range.bounds = (-0.1, 1.1) # add a little padding around y axis p.xgrid.visible = False p.ygrid.visible = False # Create extra y axis for each dataframe column tickformatter = BasicTickFormatter(precision=1) for index, col in enumerate(df.columns): start = df[col].min() end = df[col].max() bound_min = start + abs(end-start) * (p.y_range.bounds[0] - p.y_range.start) bound_max = end + abs(end-start) * (p.y_range.bounds[1] - p.y_range.end) p.extra_y_ranges.update( {col: Range1d(start=bound_min, end=bound_max, bounds=(bound_min, bound_max))}) fixedticks = FixedTicker( ticks=np.linspace(start, end, 8), minor_ticks=[]) p.add_layout(LinearAxis(fixed_location=index, y_range_name=col, ticker=fixedticks, formatter=tickformatter), 'right') # create the data renderer ( MultiLine ) # specify selected and non selected style non_selected_line_style = dict(line_color='grey', line_width=0.1, line_alpha=0.5) selected_line_style = dict(line_color={'field': 'color', 'transform': cmap}, line_width=1) parallel_renderer = p.multi_line( xs="xs", ys="ys", source=data_source, **non_selected_line_style) # Specify selection style selected_lines = MultiLine(**selected_line_style) # Specify non selection style nonselected_lines = MultiLine(**non_selected_line_style) parallel_renderer.selection_glyph = selected_lines parallel_renderer.nonselection_glyph = nonselected_lines p.y_range.start = p.y_range.bounds[0] p.y_range.end = p.y_range.bounds[1] rect_source = ColumnDataSource({ 'x': [], 'y': [], 'width': [], 'height': [], }) # add rectangle selections selection_renderer = p.rect(x='x', y='y', width='width', height='height', source=rect_source, fill_alpha=0.7, fill_color='#009933') selection_tool = ParallelSelectionTool( renderer_select=selection_renderer, renderer_data=parallel_renderer, box_width=10) # custom resets (reset only axes not selections) reset_axes = ParallelResetTool() # add tools and activate selection ones p.add_tools(selection_tool, reset_axes) p.toolbar.active_drag = selection_tool return p
Get a text color with high contrast v.s. the background color.
def get_text_color(hex_color): """Get a text color with high contrast v.s. the background color.""" return '#000000' if RGB.from_hex_string(hex_color).luminance > 0.4 else '#ffffff'
Function to build a suitable CustomJS to display the current event in the div model.
def display_event(div: Div, attributes: list[str] = []) -> CustomJS: """ Function to build a suitable CustomJS to display the current event in the div model. """ style = 'float: left; clear: left; font-size: 13px' return CustomJS(args=dict(div=div), code=f""" const attrs = {attributes}; const args = []; for (let i = 0; i < attrs.length; i++) {{ const val = JSON.stringify(cb_obj[attrs[i]], function(key, val) {{ return val.toFixed ? Number(val.toFixed(2)) : val; }}) args.push(attrs[i] + '=' + val) }} const line = "<span style={style!r}><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n"; const text = div.text.concat(line); const lines = text.split("\\n") if (lines.length > 35) lines.shift(); div.text = lines.join("\\n"); """)
Shorthand to override default units with "data", for e.g. `Ray.length`.
def data(val: float): """Shorthand to override default units with "data", for e.g. `Ray.length`. """ return value(val, units="data")
Distance between (lat1, lon1) and (lat2, lon2).
def distance(p1, p2): """Distance between (lat1, lon1) and (lat2, lon2). """ R = 6371 lat1, lon1 = p1 lat2, lon2 = p2 phi1 = radians(lat1) phi2 = radians(lat2) delta_lat = radians(lat2 - lat1) delta_lon = radians(lon2 - lon1) a = haversin(delta_lat) + cos(phi1) * cos(phi2) * haversin(delta_lon) return 2 * R * atan2(sqrt(a), sqrt(1 - a))
Function to build a suitable CustomJS to display the current event in the div model.
def display_event(div, attributes=[]): """ Function to build a suitable CustomJS to display the current event in the div model. """ style = 'float: left; clear: left; font-size: 13px' return CustomJS(args=dict(div=div), code=f""" const {{to_string}} = Bokeh.require("core/util/pretty") const attrs = {attributes}; const args = []; for (let i = 0; i<attrs.length; i++ ) {{ const val = to_string(cb_obj[attrs[i]], {{precision: 2}}) args.push(attrs[i] + '=' + val) }} const line = "<span style={style!r}><b>" + cb_obj.event_name + "</b>(" + args.join(", ") + ")</span>\\n"; const text = div.text.concat(line); const lines = text.split("\\n") if (lines.length > 35) lines.shift(); div.text = lines.join("\\n"); """)
Function that returns a Python callback to pretty print the events.
def print_event(attributes=[]): """ Function that returns a Python callback to pretty print the events. """ def python_callback(event): cls_name = event.__class__.__name__ attrs = ', '.join([f"{attr}={event.__dict__[attr]}" for attr in attributes]) print(f"{cls_name}({attrs})") return python_callback
Projects the given (longitude, latitude) values into Web Mercator coordinates (meters East of Greenwich and meters North of the Equator).
def lnglat_to_meters(longitude: float, latitude: float) -> tuple[float, float]: """ Projects the given (longitude, latitude) values into Web Mercator coordinates (meters East of Greenwich and meters North of the Equator). """ origin_shift = np.pi * 6378137 easting = longitude * origin_shift / 180.0 northing = np.log(np.tan((90 + latitude) * np.pi / 360.0)) * origin_shift / np.pi return (easting, northing)
Hits the GitHub GraphQL API with the given query and returns the data or None.
def query_github(query, token): """ Hits the GitHub GraphQL API with the given query and returns the data or None. """ API_HEADERS = {"Authorization": f"Bearer {token}"} BASE_URL = "https://api.github.com/graphql" if logging.getLogger().getEffectiveLevel() == logging.DEBUG: query_string = " ".join(line.strip() for line in query.split("\n")) logging.debug("POST https://api.github.com/graphql; query:%s", query_string) response = requests.post(BASE_URL, json={"query": query}, headers=API_HEADERS) errors = response.json().get("errors", []) for error in errors: path = "/".join(error["path"]) msg = error["message"] print(f"error: {path}: {msg}", file=sys.stderr) if logging.getLogger().getEffectiveLevel() == logging.DEBUG: logging.debug(f"Response {response.status_code}: {response.text}") return response.json()["data"] if not errors else None
Returns the list of labels for the given issue or PR data.
def get_labels(data): """ Returns the list of labels for the given issue or PR data. """ return [edge["node"]["name"] for edge in data["node"]["labels"]["edges"]]
Returns the type label of the given issue or PR data, otherwise None.
def get_label_type(data): """ Returns the type label of the given issue or PR data, otherwise None. """ return get_label_for(data, "type: ")
Returns the component label of the given issue or PR data, otherwise None.
def get_label_component(data): """ Returns the component label of the given issue or PR data, otherwise None. """ return get_label_for(data, "tag: component: ")
Returns a humanized description of the given issue or PR data.
def description(data): """ Returns a humanized description of the given issue or PR data. """ component = get_label_component(data) component_str = "" if not component else f"[component: {component}] " return f'#{data["node"]["number"]} {component_str}{data["node"]["title"]}'
Iterates over all open milestones looking for one with the given title.
def get_milestone_number(title, token, allow_closed): """ Iterates over all open milestones looking for one with the given title. """ open_str = "" if allow_closed else "states: OPEN," def helper(cursor=None): cursor_or_null = f'"{cursor}"' if cursor else "null" query = f""" {{ repository(owner: "bokeh", name: "bokeh") {{ milestones(first: 10, {open_str} after: {cursor_or_null}) {{ edges {{ node {{ number title }} }} pageInfo {{ endCursor }} }} }} }} """ data = query_github(query, token) if not data: print("error: graphql query failure", file=sys.stderr) sys.exit(1) milestones = data["repository"]["milestones"] end_cursor = milestones["pageInfo"]["endCursor"] for edge in milestones["edges"]: if edge["node"]["title"] == title: return edge["node"]["number"] return helper(end_cursor) if end_cursor else None return helper()
Returns the issues and PRs in the milestone with the given title, otherwise None if the milestone doesn't exist.
def get_milestone_items(title, token, allow_closed): """ Returns the issues and PRs in the milestone with the given title, otherwise None if the milestone doesn't exist. """ milestone_number = get_milestone_number(title, token, allow_closed) if not milestone_number: return None results = [] def helper(kind, cursor=None): cursor_or_null = f'"{cursor}"' if cursor else "null" query = f""" {{ repository(owner: "bokeh", name: "bokeh") {{ milestone(number: {milestone_number}) {{ {kind}(first: 100, after: {cursor_or_null}) {{ edges {{ node {{ number title state labels(first: 20) {{ edges {{ node {{ name }} }} }} }} }} pageInfo {{ endCursor }} }} }} }} }} """ data = query_github(query, token) if not data: print("error: graphql query failure", file=sys.stderr) sys.exit(1) items = data["repository"]["milestone"][kind] end_cursor = items["pageInfo"]["endCursor"] for edge in items["edges"]: edge["kind"] = kind results.append(edge) if end_cursor: helper(kind, end_cursor) helper("issues") helper("pullRequests") return results
Generates a bokeh changelog which includes the given milestone. Requires that you set GH_TOKEN to your GitHub API Token. Exit code 2 indicates there was a verification problem whereas exit code 1 indicates a general error in the script. Otherwise you can expect an exit code of 0 for success.
def main(milestone, log_level, verbose, check_only, allow_closed): """ Generates a bokeh changelog which includes the given milestone. Requires that you set GH_TOKEN to your GitHub API Token. Exit code 2 indicates there was a verification problem whereas exit code 1 indicates a general error in the script. Otherwise you can expect an exit code of 0 for success. """ log_level = "DEBUG" if verbose else log_level logging.basicConfig(level=log_level) token = os.environ.get("GH_TOKEN", None) if not token: print("error: GH_TOKEN is not set", file=sys.stderr) sys.exit(1) items = get_milestone_items(milestone, token, allow_closed) if not items: print(f"error: no such milestone: {milestone}", file=sys.stderr) sys.exit(1) problems = check_milestone_items(items) for problem in problems: print(problem, file=sys.stderr) if len(problems) > 0: sys.exit(2) elif check_only: sys.exit(0) CHANGELOG = REPO_ROOT / "docs" / "CHANGELOG" with open(CHANGELOG) as f: old_changelog = f.read() out = open(CHANGELOG, mode="w") out.write(f"{datetime.date.today()} {milestone:>8}:\n") out.write("--------------------\n") grouping = lambda item: get_label_type(item) or "none" items = sorted(items, key=grouping) for group_type, group in groupby(items, grouping): if group_type == "bug": out.write(" * bugfixes:\n") elif group_type == "feature": out.write(" * features:\n") elif group_type == "task": out.write(" * tasks:\n") elif group_type == "none": continue for item in group: out.write(f" - {description(item)}\n") out.write("\n") out.write(old_changelog)
Return a driver function that can advance a "bounced" sequence of values. .. code-block:: none seq = [0, 1, 2, 3] # bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...] Args: sequence (seq) : a sequence of values for the driver to bounce
def bounce(sequence: Sequence[int]) -> partial[Callable[[], None]]: ''' Return a driver function that can advance a "bounced" sequence of values. .. code-block:: none seq = [0, 1, 2, 3] # bounce(seq) => [0, 1, 2, 3, 3, 2, 1, 0, 0, 1, 2, ...] Args: sequence (seq) : a sequence of values for the driver to bounce ''' N = len(sequence) def f(i: int) -> int: div, mod = divmod(i, N) if div % 2 == 0: return sequence[mod] else: return sequence[N-mod-1] return partial(force, sequence=_advance(f))
Return a driver function that can advance a sequence of cosine values. .. code-block:: none value = A * cos(w*i + phi) + offset Args: w (float) : a frequency for the cosine driver A (float) : an amplitude for the cosine driver phi (float) : a phase offset to start the cosine driver with offset (float) : a global offset to add to the driver values
def cosine(w: float, A: float = 1, phi: float = 0, offset: float = 0) -> partial[Callable[[], None]]: ''' Return a driver function that can advance a sequence of cosine values. .. code-block:: none value = A * cos(w*i + phi) + offset Args: w (float) : a frequency for the cosine driver A (float) : an amplitude for the cosine driver phi (float) : a phase offset to start the cosine driver with offset (float) : a global offset to add to the driver values ''' from math import cos def f(i: float) -> float: return A * cos(w*i + phi) + offset return partial(force, sequence=_advance(f))
Return a driver function that can advance a simple count.
def count() -> partial[Callable[[], None]]: ''' Return a driver function that can advance a simple count. ''' return partial(force, sequence=_advance(lambda x: x))
Return a decorator that can "force" a function with an arbitrary supplied generator Args: sequence (iterable) : generator to drive f with Returns: decorator
def force(f: Callable[[Any], None], sequence: Iterator[Any]) -> Callable[[], None]: ''' Return a decorator that can "force" a function with an arbitrary supplied generator Args: sequence (iterable) : generator to drive f with Returns: decorator ''' def wrapper() -> None: f(next(sequence)) return wrapper
Return a driver function that can advance a sequence of linear values. .. code-block:: none value = m * i + b Args: m (float) : a slope for the linear driver x (float) : an offset for the linear driver
def linear(m: float = 1, b: float = 0) -> partial[Callable[[], None]]: ''' Return a driver function that can advance a sequence of linear values. .. code-block:: none value = m * i + b Args: m (float) : a slope for the linear driver x (float) : an offset for the linear driver ''' def f(i: float) -> float: return m * i + b return partial(force, sequence=_advance(f))
Return a driver function that can advance a repeated of values. .. code-block:: none seq = [0, 1, 2, 3] # repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...] Args: sequence (seq) : a sequence of values for the driver to bounce
def repeat(sequence: Sequence[int]) -> partial[Callable[[], None]]: ''' Return a driver function that can advance a repeated of values. .. code-block:: none seq = [0, 1, 2, 3] # repeat(seq) => [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, ...] Args: sequence (seq) : a sequence of values for the driver to bounce ''' N = len(sequence) def f(i: int) -> int: return sequence[i%N] return partial(force, sequence=_advance(f))
Return a driver function that can advance a sequence of sine values. .. code-block:: none value = A * sin(w*i + phi) + offset Args: w (float) : a frequency for the sine driver A (float) : an amplitude for the sine driver phi (float) : a phase offset to start the sine driver with offset (float) : a global offset to add to the driver values
def sine(w: float, A: float = 1, phi: float = 0, offset: float = 0) -> partial[Callable[[], None]]: ''' Return a driver function that can advance a sequence of sine values. .. code-block:: none value = A * sin(w*i + phi) + offset Args: w (float) : a frequency for the sine driver A (float) : an amplitude for the sine driver phi (float) : a phase offset to start the sine driver with offset (float) : a global offset to add to the driver values ''' from math import sin def f(i: float) -> float: return A * sin(w*i + phi) + offset return partial(force, sequence=_advance(f))
Yield a sequence generated by calling a given function with successively incremented integer values. Args: f (callable) : The function to advance Yields: f(i) where i increases each call
def _advance(f: Callable[[int], T]) -> Iterable[T]: ''' Yield a sequence generated by calling a given function with successively incremented integer values. Args: f (callable) : The function to advance Yields: f(i) where i increases each call ''' i = 0 while True: yield f(i) i += 1
Initialize a directory as a new bokeh extension. Arguments: base_dir (str) : The location of the extension. interactive (bool) : Guide the user step-by-step. verbose (bool) : Display detailed build information. bokehjs_version (str) : Use a specific version of bokehjs. debug (bool) : Allow for remote debugging. Returns: bool
def init( base_dir: PathLike, *, interactive: bool = False, verbose: bool = False, bokehjs_version: str | None = None, debug: bool = False, ) -> bool: """ Initialize a directory as a new bokeh extension. Arguments: base_dir (str) : The location of the extension. interactive (bool) : Guide the user step-by-step. verbose (bool) : Display detailed build information. bokehjs_version (str) : Use a specific version of bokehjs. debug (bool) : Allow for remote debugging. Returns: bool """ args: list[str] = [] if interactive: args.append("--interactive") if verbose: args.append("--verbose") if bokehjs_version: args.extend(["--bokehjs-version", bokehjs_version]) proc = _run_command("init", base_dir, args, debug) return proc.returncode == 0
Build a bokeh extension in the given directory. Arguments: base_dir (str) : The location of the extension. rebuild (bool) : Ignore caches and rebuild from scratch. verbose (bool) : Display detailed build information. debug (bool) : Allow for remote debugging. Returns: bool
def build(base_dir: PathLike, *, rebuild: bool = False, verbose: bool = False, debug: bool = False) -> bool: """ Build a bokeh extension in the given directory. Arguments: base_dir (str) : The location of the extension. rebuild (bool) : Ignore caches and rebuild from scratch. verbose (bool) : Display detailed build information. debug (bool) : Allow for remote debugging. Returns: bool """ args: list[str] = [] if rebuild: args.append("--rebuild") if verbose: args.append("--verbose") proc = _run_command("build", base_dir, args, debug) return proc.returncode == 0
Create a row of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.LayoutDOM` ): A list of instances for the row. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Row: A row of LayoutDOM objects all with the same sizing_mode. Examples: >>> row(plot1, plot2) >>> row(children=[widgets, plot], sizing_mode='stretch_both')
def row(*children: UIElement | list[UIElement], sizing_mode: SizingModeType | None = None, **kwargs: Any) -> Row: """ Create a row of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.LayoutDOM` ): A list of instances for the row. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Row: A row of LayoutDOM objects all with the same sizing_mode. Examples: >>> row(plot1, plot2) >>> row(children=[widgets, plot], sizing_mode='stretch_both') """ _children = _parse_children_arg(*children, children=kwargs.pop("children", None)) _handle_child_sizing(_children, sizing_mode, widget="row") return Row(children=_children, sizing_mode=sizing_mode, **kwargs)
Create a column of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.LayoutDOM` ): A list of instances for the column. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Column: A column of LayoutDOM objects all with the same sizing_mode. Examples: >>> column(plot1, plot2) >>> column(children=[widgets, plot], sizing_mode='stretch_both')
def column(*children: UIElement | list[UIElement], sizing_mode: SizingModeType | None = None, **kwargs: Any) -> Column: """ Create a column of Bokeh Layout objects. Forces all objects to have the same sizing_mode, which is required for complex layouts to work. Args: children (list of :class:`~bokeh.models.LayoutDOM` ): A list of instances for the column. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Column: A column of LayoutDOM objects all with the same sizing_mode. Examples: >>> column(plot1, plot2) >>> column(children=[widgets, plot], sizing_mode='stretch_both') """ _children = _parse_children_arg(*children, children=kwargs.pop("children", None)) _handle_child_sizing(_children, sizing_mode, widget="column") return Column(children=_children, sizing_mode=sizing_mode, **kwargs)
Create a grid-based arrangement of Bokeh Layout objects. Args: children (list of lists of :class:`~bokeh.models.LayoutDOM` ): A list of lists of instances for a grid layout. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Column: A column of ``Row`` layouts of the children, all with the same sizing_mode. Examples: >>> layout([[plot_1, plot_2], [plot_3, plot_4]]) >>> layout( children=[ [widget_1, plot_1], [slider], [widget_2, plot_2, plot_3] ], sizing_mode='fixed', )
def layout(*args: UIElement, children: list[UIElement] | None = None, sizing_mode: SizingModeType | None = None, **kwargs: Any) -> Column: """ Create a grid-based arrangement of Bokeh Layout objects. Args: children (list of lists of :class:`~bokeh.models.LayoutDOM` ): A list of lists of instances for a grid layout. Can be any of the following - |Plot|, :class:`~bokeh.models.Widget`, :class:`~bokeh.models.Row`, :class:`~bokeh.models.Column`, :class:`~bokeh.models.Spacer`. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. Returns: Column: A column of ``Row`` layouts of the children, all with the same sizing_mode. Examples: >>> layout([[plot_1, plot_2], [plot_3, plot_4]]) >>> layout( children=[ [widget_1, plot_1], [slider], [widget_2, plot_2, plot_3] ], sizing_mode='fixed', ) """ _children = _parse_children_arg(*args, children=children) return _create_grid(_children, sizing_mode, **kwargs)
Create a grid of plots rendered on separate canvases. The ``gridplot`` function builds a single toolbar for all the plots in the grid. ``gridplot`` is designed to layout a set of plots. For general grid layout, use the :func:`~bokeh.layouts.layout` function. Args: children (list of lists of |Plot|): An array of plots to display in a grid, given as a list of lists of Plot objects. To leave a position in the grid empty, pass None for that position in the children list. OR list of |Plot| if called with ncols. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. toolbar_location (``above``, ``below``, ``left``, ``right`` ): Where the toolbar will be located, with respect to the grid. Default is ``above``. If set to None, no toolbar will be attached to the grid. ncols (int, optional): Specify the number of columns you would like in your grid. You must only pass an un-nested list of plots (as opposed to a list of lists of plots) when using ncols. width (int, optional): The width you would like all your plots to be height (int, optional): The height you would like all your plots to be. toolbar_options (dict, optional) : A dictionary of options that will be used to construct the grid's toolbar (an instance of :class:`~bokeh.models.Toolbar`). If none is supplied, Toolbar's defaults will be used. merge_tools (``True``, ``False``): Combine tools from all child plots into a single toolbar. Returns: GridPlot: Examples: >>> gridplot([[plot_1, plot_2], [plot_3, plot_4]]) >>> gridplot([plot_1, plot_2, plot_3, plot_4], ncols=2, width=200, height=100) >>> gridplot( children=[[plot_1, plot_2], [None, plot_3]], toolbar_location='right' sizing_mode='fixed', toolbar_options=dict(logo='gray') )
def gridplot( children: list[list[UIElement | None]], *, sizing_mode: SizingModeType | None = None, toolbar_location: LocationType | None = "above", ncols: int | None = None, width: int | None = None, height: int | None = None, toolbar_options: dict[ToolbarOptions, Any] | None = None, merge_tools: bool = True) -> GridPlot: ''' Create a grid of plots rendered on separate canvases. The ``gridplot`` function builds a single toolbar for all the plots in the grid. ``gridplot`` is designed to layout a set of plots. For general grid layout, use the :func:`~bokeh.layouts.layout` function. Args: children (list of lists of |Plot|): An array of plots to display in a grid, given as a list of lists of Plot objects. To leave a position in the grid empty, pass None for that position in the children list. OR list of |Plot| if called with ncols. sizing_mode (``"fixed"``, ``"stretch_both"``, ``"scale_width"``, ``"scale_height"``, ``"scale_both"`` ): How will the items in the layout resize to fill the available space. Default is ``"fixed"``. For more information on the different modes see :attr:`~bokeh.models.LayoutDOM.sizing_mode` description on :class:`~bokeh.models.LayoutDOM`. toolbar_location (``above``, ``below``, ``left``, ``right`` ): Where the toolbar will be located, with respect to the grid. Default is ``above``. If set to None, no toolbar will be attached to the grid. ncols (int, optional): Specify the number of columns you would like in your grid. You must only pass an un-nested list of plots (as opposed to a list of lists of plots) when using ncols. width (int, optional): The width you would like all your plots to be height (int, optional): The height you would like all your plots to be. toolbar_options (dict, optional) : A dictionary of options that will be used to construct the grid's toolbar (an instance of :class:`~bokeh.models.Toolbar`). If none is supplied, Toolbar's defaults will be used. merge_tools (``True``, ``False``): Combine tools from all child plots into a single toolbar. Returns: GridPlot: Examples: >>> gridplot([[plot_1, plot_2], [plot_3, plot_4]]) >>> gridplot([plot_1, plot_2, plot_3, plot_4], ncols=2, width=200, height=100) >>> gridplot( children=[[plot_1, plot_2], [None, plot_3]], toolbar_location='right' sizing_mode='fixed', toolbar_options=dict(logo='gray') ) ''' if toolbar_options is None: toolbar_options = {} if toolbar_location: if not hasattr(Location, toolbar_location): raise ValueError(f"Invalid value of toolbar_location: {toolbar_location}") children = _parse_children_arg(children=children) if ncols: if any(isinstance(child, list) for child in children): raise ValueError("Cannot provide a nested list when using ncols") children = list(_chunks(children, ncols)) # Additional children set-up for grid plot if not children: children = [] # Make the grid toolbars: list[Toolbar] = [] items: list[tuple[UIElement, int, int]] = [] for y, row in enumerate(children): for x, item in enumerate(row): if item is None: continue elif isinstance(item, LayoutDOM): if merge_tools: for plot in item.select(dict(type=Plot)): toolbars.append(plot.toolbar) plot.toolbar_location = None if width is not None: item.width = width if height is not None: item.height = height if sizing_mode is not None and _has_auto_sizing(item): item.sizing_mode = sizing_mode items.append((item, y, x)) elif isinstance(item, UIElement): continue else: raise ValueError("Only UIElement and LayoutDOM items can be inserted into a grid") def merge(cls: type[Tool], group: list[Tool]) -> Tool | ToolProxy | None: if issubclass(cls, SaveTool | CopyTool | ExamineTool | FullscreenTool): return cls() else: return None tools: list[Tool | ToolProxy] = [] for toolbar in toolbars: tools.extend(toolbar.tools) if merge_tools: tools = group_tools(tools, merge=merge) logos = [ toolbar.logo for toolbar in toolbars ] autohides = [ toolbar.autohide for toolbar in toolbars ] active_drags = [ toolbar.active_drag for toolbar in toolbars ] active_inspects = [ toolbar.active_inspect for toolbar in toolbars ] active_scrolls = [ toolbar.active_scroll for toolbar in toolbars ] active_taps = [ toolbar.active_tap for toolbar in toolbars ] active_multis = [ toolbar.active_multi for toolbar in toolbars ] V = TypeVar("V") def assert_unique(values: list[V], name: ToolbarOptions) -> V | UndefinedType: if name in toolbar_options: return toolbar_options[name] n = len(set(values)) if n == 0: return Undefined elif n > 1: warn(f"found multiple competing values for 'toolbar.{name}' property; using the latest value") return values[-1] logo = assert_unique(logos, "logo") autohide = assert_unique(autohides, "autohide") active_drag = assert_unique(active_drags, "active_drag") active_inspect = assert_unique(active_inspects, "active_inspect") active_scroll = assert_unique(active_scrolls, "active_scroll") active_tap = assert_unique(active_taps, "active_tap") active_multi = assert_unique(active_multis, "active_multi") toolbar = Toolbar( tools=tools, logo=logo, autohide=autohide, active_drag=active_drag, active_inspect=active_inspect, active_scroll=active_scroll, active_tap=active_tap, active_multi=active_multi, ) gp = GridPlot( children=items, toolbar=toolbar, toolbar_location=toolbar_location, sizing_mode=sizing_mode, ) return gp
Conveniently create a grid of layoutable objects. Grids are created by using ``GridBox`` model. This gives the most control over the layout of a grid, but is also tedious and may result in unreadable code in practical applications. ``grid()`` function remedies this by reducing the level of control, but in turn providing a more convenient API. Supported patterns: 1. Nested lists of layoutable objects. Assumes the top-level list represents a column and alternates between rows and columns in subsequent nesting levels. One can use ``None`` for padding purpose. >>> grid([p1, [[p2, p3], p4]]) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 2. Nested ``Row`` and ``Column`` instances. Similar to the first pattern, just instead of using nested lists, it uses nested ``Row`` and ``Column`` models. This can be much more readable that the former. Note, however, that only models that don't have ``sizing_mode`` set are used. >>> grid(column(p1, row(column(p2, p3), p4))) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 3. Flat list of layoutable objects. This requires ``nrows`` and/or ``ncols`` to be set. The input list will be rearranged into a 2D array accordingly. One can use ``None`` for padding purpose. >>> grid([p1, p2, p3, p4], ncols=2) GridBox(children=[ (p1, 0, 0, 1, 1), (p2, 0, 1, 1, 1), (p3, 1, 0, 1, 1), (p4, 1, 1, 1, 1), ])
def grid(children: Any = [], sizing_mode: SizingModeType | None = None, nrows: int | None = None, ncols: int | None = None) -> GridBox: """ Conveniently create a grid of layoutable objects. Grids are created by using ``GridBox`` model. This gives the most control over the layout of a grid, but is also tedious and may result in unreadable code in practical applications. ``grid()`` function remedies this by reducing the level of control, but in turn providing a more convenient API. Supported patterns: 1. Nested lists of layoutable objects. Assumes the top-level list represents a column and alternates between rows and columns in subsequent nesting levels. One can use ``None`` for padding purpose. >>> grid([p1, [[p2, p3], p4]]) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 2. Nested ``Row`` and ``Column`` instances. Similar to the first pattern, just instead of using nested lists, it uses nested ``Row`` and ``Column`` models. This can be much more readable that the former. Note, however, that only models that don't have ``sizing_mode`` set are used. >>> grid(column(p1, row(column(p2, p3), p4))) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 3. Flat list of layoutable objects. This requires ``nrows`` and/or ``ncols`` to be set. The input list will be rearranged into a 2D array accordingly. One can use ``None`` for padding purpose. >>> grid([p1, p2, p3, p4], ncols=2) GridBox(children=[ (p1, 0, 0, 1, 1), (p2, 0, 1, 1, 1), (p3, 1, 0, 1, 1), (p4, 1, 1, 1, 1), ]) """ @dataclass class row: children: list[row | col] @dataclass class col: children: list[row | col] @dataclass class Item: layout: LayoutDOM r0: int c0: int r1: int c1: int @dataclass class Grid: nrows: int ncols: int items: list[Item] def flatten(layout) -> GridBox: def gcd(a: int, b: int) -> int: a, b = abs(a), abs(b) while b != 0: a, b = b, a % b return a def lcm(a: int, *rest: int) -> int: for b in rest: a = (a*b) // gcd(a, b) return a def nonempty(child: Grid) -> bool: return child.nrows != 0 and child.ncols != 0 def _flatten(layout: row | col | LayoutDOM) -> Grid: if isinstance(layout, row): children = list(filter(nonempty, map(_flatten, layout.children))) if not children: return Grid(0, 0, []) nrows = lcm(*[ child.nrows for child in children ]) ncols = sum(child.ncols for child in children) items: list[Item] = [] offset = 0 for child in children: factor = nrows//child.nrows for i in child.items: items.append(Item(i.layout, factor*i.r0, i.c0 + offset, factor*i.r1, i.c1 + offset)) offset += child.ncols return Grid(nrows, ncols, items) elif isinstance(layout, col): children = list(filter(nonempty, map(_flatten, layout.children))) if not children: return Grid(0, 0, []) nrows = sum(child.nrows for child in children) ncols = lcm(*[ child.ncols for child in children ]) items = [] offset = 0 for child in children: factor = ncols//child.ncols for i in child.items: items.append(Item(i.layout, i.r0 + offset, factor*i.c0, i.r1 + offset, factor*i.c1)) offset += child.nrows return Grid(nrows, ncols, items) else: return Grid(1, 1, [Item(layout, 0, 0, 1, 1)]) grid = _flatten(layout) children = [] for i in grid.items: if i.layout is not None: children.append((i.layout, i.r0, i.c0, i.r1 - i.r0, i.c1 - i.c0)) return GridBox(children=children) layout: row | col if isinstance(children, list): if nrows is not None or ncols is not None: N = len(children) if ncols is None: ncols = math.ceil(N/nrows) layout = col([ row(children[i:i+ncols]) for i in range(0, N, ncols) ]) else: def traverse(children: list[LayoutDOM], level: int = 0): if isinstance(children, list): container = col if level % 2 == 0 else row return container([ traverse(child, level+1) for child in children ]) else: return children layout = traverse(children) elif isinstance(children, LayoutDOM): def is_usable(child: LayoutDOM) -> bool: return _has_auto_sizing(child) and child.spacing == 0 def traverse(item: LayoutDOM, top_level: bool = False): if isinstance(item, FlexBox) and (top_level or is_usable(item)): container = col if isinstance(item, Column) else row return container(list(map(traverse, item.children))) else: return item layout = traverse(children, top_level=True) elif isinstance(children, str): raise NotImplementedError else: raise ValueError("expected a list, string or model") grid = flatten(layout) if sizing_mode is not None: grid.sizing_mode = sizing_mode for child in grid.children: layout = child[0] if _has_auto_sizing(layout): layout.sizing_mode = sizing_mode return grid
Group common tools into tool proxies.
def group_tools(tools: list[Tool | ToolProxy], *, merge: MergeFn[Tool] | None = None, ignore: set[str] | None = None) -> list[Tool | ToolProxy]: """ Group common tools into tool proxies. """ @dataclass class ToolEntry: tool: Tool props: Any by_type: defaultdict[type[Tool], list[ToolEntry]] = defaultdict(list) computed: list[Tool | ToolProxy] = [] if ignore is None: ignore = {"overlay", "renderers"} for tool in tools: if isinstance(tool, ToolProxy): computed.append(tool) else: props = tool.properties_with_values() for attr in ignore: if attr in props: del props[attr] by_type[tool.__class__].append(ToolEntry(tool, props)) for cls, entries in by_type.items(): if merge is not None: merged = merge(cls, [entry.tool for entry in entries]) if merged is not None: computed.append(merged) continue while entries: head, *tail = entries group: list[Tool] = [head.tool] for item in list(tail): if item.props == head.props: group.append(item.tool) entries.remove(item) entries.remove(head) if len(group) == 1: computed.append(group[0]) elif merge is not None and (tool := merge(cls, group)) is not None: computed.append(tool) else: computed.append(ToolProxy(tools=group)) return computed
Recursively create grid from input lists.
def _create_grid(iterable: Iterable[UIElement | list[UIElement]], sizing_mode: SizingModeType | None, layer: int = 0, **kwargs) -> Row | Column: """Recursively create grid from input lists.""" return_list: list[UIElement] = [] for item in iterable: if isinstance(item, list): return_list.append(_create_grid(item, sizing_mode, layer + 1)) elif isinstance(item, LayoutDOM): if sizing_mode is not None and _has_auto_sizing(item): item.sizing_mode = sizing_mode return_list.append(item) elif isinstance(item, UIElement): return_list.append(item) else: raise ValueError( f"""Only LayoutDOM items can be inserted into a layout. Tried to insert: {item} of type {type(item)}""", ) if layer % 2 == 0: return column(children=return_list, sizing_mode=sizing_mode, **kwargs) else: return row(children=return_list, sizing_mode=sizing_mode, **kwargs)
Yield successive n-sized chunks from list, l.
def _chunks(l: Sequence[I], ncols: int) -> Iterator[Sequence[I]]: """Yield successive n-sized chunks from list, l.""" assert isinstance(ncols, int), "ncols must be an integer" for i in range(0, len(l), ncols): yield l[i: i + ncols]
Generate a new palette as a subset of a given palette. Given an input ``palette``, take ``n`` colors from it by dividing its length into ``n`` (approximately) evenly spaced indices. Args: palette (seq[str]) : a sequence of hex RGB color strings n (int) : the size of the output palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n > len(palette)
def linear_palette(palette: Palette, n: int) -> Palette: """ Generate a new palette as a subset of a given palette. Given an input ``palette``, take ``n`` colors from it by dividing its length into ``n`` (approximately) evenly spaced indices. Args: palette (seq[str]) : a sequence of hex RGB color strings n (int) : the size of the output palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n > len(palette) """ if n > len(palette): raise ValueError(f"Requested {n} colors, function can only return colors up to the base palette's length ({len(palette)})") return tuple( palette[int(math.floor(i))] for i in np.linspace(0, len(palette)-1, num=n) )
Generate a new palette by combining exactly two input palettes. Given an input ``palette1`` and ``palette2``, take a combined ``n`` colors, and combine input palettes at the relative ``midpoint``. ``palette1`` and ``palette2`` are meant to be sequential palettes that proceed left to right from perceptually dark to light colors. In that case the returned palette is comprised of the input palettes connected at perceptually light ends. Palettes are combined by piecewise linear interpolation. Args: palette1 (seq[str]) : A sequence of hex RGB color strings for the first palette palette2 (seq[str]) : A sequence of hex RGB color strings for the second palette n (int) : The size of the output palette to generate midpoint (float, optional) : Relative position in the returned palette where input palettes are connected (default: 0.5) Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the possible combined length the input palettes
def diverging_palette(palette1: Palette, palette2: Palette, n: int, midpoint: float = 0.5) -> Palette: """ Generate a new palette by combining exactly two input palettes. Given an input ``palette1`` and ``palette2``, take a combined ``n`` colors, and combine input palettes at the relative ``midpoint``. ``palette1`` and ``palette2`` are meant to be sequential palettes that proceed left to right from perceptually dark to light colors. In that case the returned palette is comprised of the input palettes connected at perceptually light ends. Palettes are combined by piecewise linear interpolation. Args: palette1 (seq[str]) : A sequence of hex RGB color strings for the first palette palette2 (seq[str]) : A sequence of hex RGB color strings for the second palette n (int) : The size of the output palette to generate midpoint (float, optional) : Relative position in the returned palette where input palettes are connected (default: 0.5) Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the possible combined length the input palettes """ # flip palette2 so that perceptually light colors are joined palette2 = palette2[::-1] # determine number of colors from each palette n1 = int(round(midpoint * n)) n2 = int(round((1 - midpoint) * n)) # return piecewise linear interpolation of colors return linear_palette(palette1, n1) + linear_palette(palette2, n2)
Generate a palette that is a single color with linearly varying alpha. Alpha may vary from low to high or high to low, depending on the values of ``start_alpha`` and ``end_alpha``. Args: color (str) : Named color or RGB(A) hex color string. Any alpha component is combined with the ``start_alpha`` to ``end_alpha`` range by multiplying them together, so it is the maximum possible alpha that can be obtained. n (int, optional) : The size of the palette to generate. If not specified uses the maximum number of colors such that adjacent colors differ by an alpha of 1. start_alpha (int, optional) : The alpha component of the start of the palette is this value (in the range 0 to 255) multiplied by the alpha component of the ``color`` argument. end_alpha (int, optional) : The alpha component of the end of the palette is this value (in the range 0 to 255) multiplied by the alpha component of the ``color`` argument. Returns: seq[str] : a sequence of hex RGBA color strings Raises: ValueError if ``color`` is not recognisable as a string name or hex RGB(A) string, or if ``start_alpha`` or ``end_alpha`` are outside the range 0 to 255 inclusive.
def varying_alpha_palette(color: str, n: int | None = None, start_alpha: int = 0, end_alpha: int = 255) -> Palette: """ Generate a palette that is a single color with linearly varying alpha. Alpha may vary from low to high or high to low, depending on the values of ``start_alpha`` and ``end_alpha``. Args: color (str) : Named color or RGB(A) hex color string. Any alpha component is combined with the ``start_alpha`` to ``end_alpha`` range by multiplying them together, so it is the maximum possible alpha that can be obtained. n (int, optional) : The size of the palette to generate. If not specified uses the maximum number of colors such that adjacent colors differ by an alpha of 1. start_alpha (int, optional) : The alpha component of the start of the palette is this value (in the range 0 to 255) multiplied by the alpha component of the ``color`` argument. end_alpha (int, optional) : The alpha component of the end of the palette is this value (in the range 0 to 255) multiplied by the alpha component of the ``color`` argument. Returns: seq[str] : a sequence of hex RGBA color strings Raises: ValueError if ``color`` is not recognisable as a string name or hex RGB(A) string, or if ``start_alpha`` or ``end_alpha`` are outside the range 0 to 255 inclusive. """ if not (0 <= start_alpha <= 255): raise ValueError(f"start_alpha {start_alpha} must be in the range 0 to 255") if not (0 <= end_alpha <= 255): raise ValueError(f"end_alpha {end_alpha} must be in the range 0 to 255") # Take a copy of RGB color as do not want to alter named colors rgba = NamedColor.from_string(color).copy() if rgba.a < 1.0: start_alpha = round(start_alpha*rgba.a) end_alpha = round(end_alpha*rgba.a) if n is None or n < 1: nn = int(abs(end_alpha - start_alpha)) + 1 else: nn = n # Convert alpha to range 0 to 1. norm_start_alpha = start_alpha / 255.0 norm_end_alpha = end_alpha / 255.0 def set_alpha(rgba: RGB, i: int) -> RGB: rgba.a = norm_start_alpha + (norm_end_alpha - norm_start_alpha)*i / (nn-1.0) return rgba palette = tuple(set_alpha(rgba, i).to_hex() for i in range(nn)) return palette
Generate a new palette by interpolating a given palette. Linear interpolation is performed separately on each of the RGBA components. Args: palette (seq[str]) : A sequence of hex RGB(A) color strings to create new palette from n (int) : The size of the palette to generate Returns: tuple[str] : a sequence of hex RGB(A) color strings Raises: ValueError if ``n`` is negative or the supplied ``palette`` is empty.
def interp_palette(palette: Palette, n: int) -> Palette: """ Generate a new palette by interpolating a given palette. Linear interpolation is performed separately on each of the RGBA components. Args: palette (seq[str]) : A sequence of hex RGB(A) color strings to create new palette from n (int) : The size of the palette to generate Returns: tuple[str] : a sequence of hex RGB(A) color strings Raises: ValueError if ``n`` is negative or the supplied ``palette`` is empty. """ npalette = len(palette) if npalette < 1: raise ValueError("palette must contain at least one color") if n < 0: raise ValueError("requested palette length cannot be negative") rgba_array = to_rgba_array(palette) integers = np.arange(npalette) fractions = np.linspace(0, npalette-1, n) r = np.interp(fractions, integers, rgba_array[:, 0]).astype(np.uint8) g = np.interp(fractions, integers, rgba_array[:, 1]).astype(np.uint8) b = np.interp(fractions, integers, rgba_array[:, 2]).astype(np.uint8) a = np.interp(fractions, integers, rgba_array[:, 3]) / 255.0 # Remains floating-point return tuple(RGB(*args).to_hex() for args in zip(r, g, b, a))
Generate a palette of colors from the Magma palette. The full Magma palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`magma(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> magma(6) ('#000003', '#3B0F6F', '#8C2980', '#DD4968', '#FD9F6C', '#FBFCBF') The resulting palette looks like: :bokeh-palette:`magma(6)`
def magma(n: int) -> Palette: """ Generate a palette of colors from the Magma palette. The full Magma palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`magma(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> magma(6) ('#000003', '#3B0F6F', '#8C2980', '#DD4968', '#FD9F6C', '#FBFCBF') The resulting palette looks like: :bokeh-palette:`magma(6)` """ return linear_palette(Magma256, n)
Generate a palette of colors from the Inferno palette. The full Inferno palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`inferno(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> inferno(6) ('#000003', '#410967', '#932567', '#DC5039', '#FBA40A', '#FCFEA4') The resulting palette looks like: :bokeh-palette:`inferno(6)`
def inferno(n: int) -> Palette: """ Generate a palette of colors from the Inferno palette. The full Inferno palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`inferno(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> inferno(6) ('#000003', '#410967', '#932567', '#DC5039', '#FBA40A', '#FCFEA4') The resulting palette looks like: :bokeh-palette:`inferno(6)` """ return linear_palette(Inferno256, n)
Generate a palette of colors from the Plasma palette. The full Plasma palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`plasma(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> plasma(6) ('#0C0786', '#6A00A7', '#B02A8F', '#E06461', '#FCA635', '#EFF821') The resulting palette looks like: :bokeh-palette:`plasma(6)`
def plasma(n: int) -> Palette: """ Generate a palette of colors from the Plasma palette. The full Plasma palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`plasma(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> plasma(6) ('#0C0786', '#6A00A7', '#B02A8F', '#E06461', '#FCA635', '#EFF821') The resulting palette looks like: :bokeh-palette:`plasma(6)` """ return linear_palette(Plasma256, n)
Generate a palette of colors from the Viridis palette. The full Viridis palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`viridis(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> viridis(6) ('#440154', '#404387', '#29788E', '#22A784', '#79D151', '#FDE724') The resulting palette looks like: :bokeh-palette:`viridis(6)`
def viridis(n: int) -> Palette: """ Generate a palette of colors from the Viridis palette. The full Viridis palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`viridis(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> viridis(6) ('#440154', '#404387', '#29788E', '#22A784', '#79D151', '#FDE724') The resulting palette looks like: :bokeh-palette:`viridis(6)` """ return linear_palette(Viridis256, n)
Generate a palette of colors from the Cividis palette. The full Cividis palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`cividis(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> cividis(6) ('#00204C', '#31446B', '#666870', '#958F78', '#CAB969', '#FFE945') The resulting palette looks like: :bokeh-palette:`cividis(6)`
def cividis(n: int) -> Palette: """ Generate a palette of colors from the Cividis palette. The full Cividis palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`cividis(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> cividis(6) ('#00204C', '#31446B', '#666870', '#958F78', '#CAB969', '#FFE945') The resulting palette looks like: :bokeh-palette:`cividis(6)` """ return linear_palette(Cividis256, n)
Generate a palette of colors from the Turbo palette. Turbo is described here: https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html The full Turbo palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`turbo(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> turbo(6) ('#00204C', '#31446B', '#666870', '#958F78', '#CAB969', '#FFE945') The resulting palette looks like: :bokeh-palette:`turbo(6)`
def turbo(n: int) -> Palette: """ Generate a palette of colors from the Turbo palette. Turbo is described here: https://ai.googleblog.com/2019/08/turbo-improved-rainbow-colormap-for.html The full Turbo palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`turbo(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> turbo(6) ('#00204C', '#31446B', '#666870', '#958F78', '#CAB969', '#FFE945') The resulting palette looks like: :bokeh-palette:`turbo(6)` """ return linear_palette(Turbo256, n)
Generate a palette of colors from the Greys palette. The full Greys palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`grey(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> grey(6) ('#000000', '#333333', '#666666', '#999999', '#cccccc', '#ffffff') The resulting palette looks like: :bokeh-palette:`gray(6)` .. note:: This function also has the alternate spelling ``gray``
def grey(n: int) -> Palette: """ Generate a palette of colors from the Greys palette. The full Greys palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`grey(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> grey(6) ('#000000', '#333333', '#666666', '#999999', '#cccccc', '#ffffff') The resulting palette looks like: :bokeh-palette:`gray(6)` .. note:: This function also has the alternate spelling ``gray`` """ return linear_palette(Greys256, n)
Generate a palette of colors or from the Greys palette. The full Greys palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`grey(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> gray(6) ('#000000', '#333333', '#666666', '#999999', '#cccccc', '#ffffff') The resulting palette looks like: :bokeh-palette:`grey(6)` .. note:: This function also has the alternate spelling ``grey``
def gray(n: int) -> Palette: """ Generate a palette of colors or from the Greys palette. The full Greys palette that serves as input for deriving new palettes has 256 colors, and looks like: :bokeh-palette:`grey(256)` Args: n (int) : size of the palette to generate Returns: seq[str] : a sequence of hex RGB color strings Raises: ValueError if n is greater than the base palette length of 256 Examples: .. code-block:: python >>> gray(6) ('#000000', '#333333', '#666666', '#999999', '#cccccc', '#ffffff') The resulting palette looks like: :bokeh-palette:`grey(6)` .. note:: This function also has the alternate spelling ``grey`` """ return linear_palette(Greys256, n)
Convert palette to a numpy array of uint8 RGBA components.
def to_rgba_array(palette: Palette) -> npt.NDArray[np.uint8]: """ Convert palette to a numpy array of uint8 RGBA components. """ rgba_array = np.empty((len(palette), 4), dtype=np.uint8) for i, color in enumerate(palette): rgba = NamedColor.from_string(color) rgba_array[i] = (rgba.r, rgba.g, rgba.b, rgba.a*255) return rgba_array
Report all versions that have SRI hashes. Returns: tuple
def get_all_sri_versions() -> tuple[str, ...]: """ Report all versions that have SRI hashes. Returns: tuple """ files = (ROOT_DIR / "_sri").glob("*.json") return set(file.stem for file in files)
Report SRI script hashes for a specific version of BokehJS. Bokeh provides `Subresource Integrity`_ hashes for all JavaScript files that are published to CDN for full releases. This function returns a dictionary that maps JavaScript filenames to their hashes, for a single version of Bokeh. Args: version (str) : The Bokeh version to return SRI hashes for. Hashes are only provided for full releases, e.g "1.4.0", and not for "dev" builds or release candidates. Returns: dict Raises: ValueError: if the specified version does not exist Example: The returned dict for a single version will map filenames for that version to their SRI hashes: .. code-block:: python { 'bokeh-1.4.0.js': 'vn/jmieHiN+ST+GOXzRU9AFfxsBp8gaJ/wvrzTQGpIKMsdIcyn6U1TYtvzjYztkN', 'bokeh-1.4.0.min.js': 'mdMpUZqu5U0cV1pLU9Ap/3jthtPth7yWSJTu1ayRgk95qqjLewIkjntQDQDQA5cZ', 'bokeh-api-1.4.0.js': 'Y3kNQHt7YjwAfKNIzkiQukIOeEGKzUU3mbSrraUl1KVfrlwQ3ZAMI1Xrw5o3Yg5V', 'bokeh-api-1.4.0.min.js': '4oAJrx+zOFjxu9XLFp84gefY8oIEr75nyVh2/SLnyzzg9wR+mXXEi+xyy/HzfBLM', 'bokeh-tables-1.4.0.js': 'I2iTMWMyfU/rzKXWJ2RHNGYfsXnyKQ3YjqQV2RvoJUJCyaGBrp0rZcWiTAwTc9t6', 'bokeh-tables-1.4.0.min.js': 'pj14Cq5ZSxsyqBh+pnL2wlBS3UX25Yz1gVxqWkFMCExcnkN3fl4mbOF8ZUKyh7yl', 'bokeh-widgets-1.4.0.js': 'scpWAebHEUz99AtveN4uJmVTHOKDmKWnzyYKdIhpXjrlvOwhIwEWUrvbIHqA0ke5', 'bokeh-widgets-1.4.0.min.js': 'xR3dSxvH5hoa9txuPVrD63jB1LpXhzFoo0ho62qWRSYZVdyZHGOchrJX57RwZz8l' } .. _Subresource Integrity: https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity
def get_sri_hashes_for_version(version: str) -> Hashes: """ Report SRI script hashes for a specific version of BokehJS. Bokeh provides `Subresource Integrity`_ hashes for all JavaScript files that are published to CDN for full releases. This function returns a dictionary that maps JavaScript filenames to their hashes, for a single version of Bokeh. Args: version (str) : The Bokeh version to return SRI hashes for. Hashes are only provided for full releases, e.g "1.4.0", and not for "dev" builds or release candidates. Returns: dict Raises: ValueError: if the specified version does not exist Example: The returned dict for a single version will map filenames for that version to their SRI hashes: .. code-block:: python { 'bokeh-1.4.0.js': 'vn/jmieHiN+ST+GOXzRU9AFfxsBp8gaJ/wvrzTQGpIKMsdIcyn6U1TYtvzjYztkN', 'bokeh-1.4.0.min.js': 'mdMpUZqu5U0cV1pLU9Ap/3jthtPth7yWSJTu1ayRgk95qqjLewIkjntQDQDQA5cZ', 'bokeh-api-1.4.0.js': 'Y3kNQHt7YjwAfKNIzkiQukIOeEGKzUU3mbSrraUl1KVfrlwQ3ZAMI1Xrw5o3Yg5V', 'bokeh-api-1.4.0.min.js': '4oAJrx+zOFjxu9XLFp84gefY8oIEr75nyVh2/SLnyzzg9wR+mXXEi+xyy/HzfBLM', 'bokeh-tables-1.4.0.js': 'I2iTMWMyfU/rzKXWJ2RHNGYfsXnyKQ3YjqQV2RvoJUJCyaGBrp0rZcWiTAwTc9t6', 'bokeh-tables-1.4.0.min.js': 'pj14Cq5ZSxsyqBh+pnL2wlBS3UX25Yz1gVxqWkFMCExcnkN3fl4mbOF8ZUKyh7yl', 'bokeh-widgets-1.4.0.js': 'scpWAebHEUz99AtveN4uJmVTHOKDmKWnzyYKdIhpXjrlvOwhIwEWUrvbIHqA0ke5', 'bokeh-widgets-1.4.0.min.js': 'xR3dSxvH5hoa9txuPVrD63jB1LpXhzFoo0ho62qWRSYZVdyZHGOchrJX57RwZz8l' } .. _Subresource Integrity: https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity """ if version not in _ALL_SRI_HASHES: try: with open(ROOT_DIR / "_sri" / f"{version}.json") as f: _ALL_SRI_HASHES[version] = json.load(f) except Exception as e: raise ValueError(f"Missing SRI hash for version {version}") from e return _ALL_SRI_HASHES[version]
Verify the SRI hashes in a full release package. This function compares the computed SRI hashes for the BokehJS files in a full release package to the values in the SRI manifest file. Returns None if all hashes match, otherwise an exception will be raised. .. note:: This function can only be called on full release (e.g "1.2.3") packages. Returns: None Raises: ValueError If called outside a full release package RuntimeError If there are missing, extra, or mismatched files
def verify_sri_hashes() -> None: """ Verify the SRI hashes in a full release package. This function compares the computed SRI hashes for the BokehJS files in a full release package to the values in the SRI manifest file. Returns None if all hashes match, otherwise an exception will be raised. .. note:: This function can only be called on full release (e.g "1.2.3") packages. Returns: None Raises: ValueError If called outside a full release package RuntimeError If there are missing, extra, or mismatched files """ if not is_full_release(): raise ValueError("verify_sri_hashes() can only be used with full releases") paths = list((settings.bokehjs_path() / "js").glob("bokeh*.js")) hashes = get_sri_hashes_for_version(__version__) if len(hashes) < len(paths): raise RuntimeError("There are unexpected 'bokeh*.js' files in the package") if len(hashes) > len(paths): raise RuntimeError("There are 'bokeh*.js' files missing in the package") bad: list[Path] = [] for path in paths: name, suffix = str(path.name).split(".", 1) filename = f"{name}-{__version__}.{suffix}" sri_hash = _compute_single_hash(path) if hashes[filename] != sri_hash: bad.append(path) if bad: raise RuntimeError(f"SRI Hash mismatches in the package: {bad!r}")
Return a string as-is.
def convert_str(value: str) -> str: ''' Return a string as-is. ''' return value
Convert a string to an integer.
def convert_int(value: int | str) -> int: ''' Convert a string to an integer. ''' return int(value)
Convert a string to True or False. If a boolean is passed in, it is returned as-is. Otherwise the function maps the following strings, ignoring case: * "yes", "1", "on" -> True * "no", "0", "off" -> False Args: value (str): A string value to convert to bool Returns: bool Raises: ValueError
def convert_bool(value: bool | str) -> bool: ''' Convert a string to True or False. If a boolean is passed in, it is returned as-is. Otherwise the function maps the following strings, ignoring case: * "yes", "1", "on" -> True * "no", "0", "off" -> False Args: value (str): A string value to convert to bool Returns: bool Raises: ValueError ''' if isinstance(value, bool): return value val = value.lower() if val in ["yes", "1", "on", "true", "True"]: return True if val in ["no", "0", "off", "false", "False"]: return False raise ValueError(f"Cannot convert {value} to boolean value")
Convert a string to a list of strings. If a list or tuple is passed in, it is returned as-is. Args: value (seq[str] or str) : A string to convert to a list of strings Returns list[str]
def convert_str_seq(value: list[str] | str) -> list[str]: ''' Convert a string to a list of strings. If a list or tuple is passed in, it is returned as-is. Args: value (seq[str] or str) : A string to convert to a list of strings Returns list[str] ''' if isinstance(value, list | tuple): return value try: return value.split(",") except Exception: raise ValueError(f"Cannot convert {value} to list value")
Convert a string to a Python logging level If a log level is passed in, it is returned as-is. Otherwise the function understands the following strings, ignoring case: * "critical" * "error" * "warning" * "info" * "debug" * "trace" * "none" Args: value (str): A string value to convert to a logging level Returns: int or None Raises: ValueError
def convert_logging(value: str | int) -> PyLogLevel: '''Convert a string to a Python logging level If a log level is passed in, it is returned as-is. Otherwise the function understands the following strings, ignoring case: * "critical" * "error" * "warning" * "info" * "debug" * "trace" * "none" Args: value (str): A string value to convert to a logging level Returns: int or None Raises: ValueError ''' if value is None or isinstance(value, int): if value in set(_log_levels.values()): return value else: value = value.upper() if value in _log_levels: return _log_levels[value] raise ValueError(f"Cannot convert {value} to log level, valid values are: {', '.join(_log_levels)}")
Convert a string to a validation level If a validation level is passed in, it is returned as-is. Args: value (str): A string value to convert to a validation level Returns: string Raises: ValueError
def convert_validation(value: str | ValidationLevel) -> ValidationLevel: '''Convert a string to a validation level If a validation level is passed in, it is returned as-is. Args: value (str): A string value to convert to a validation level Returns: string Raises: ValueError ''' VALID_LEVELS = {"none", "errors", "all"} lowered = value.lower() if lowered in VALID_LEVELS: return cast(ValidationLevel, lowered) raise ValueError(f"Cannot convert {value!r} to validation level, valid values are: {VALID_LEVELS!r}")
Convert a string to an .ico path Args: value (str): A string value to convert to a .ico path Returns: string Raises: ValueError
def convert_ico_path(value: str) -> str: '''Convert a string to an .ico path Args: value (str): A string value to convert to a .ico path Returns: string Raises: ValueError ''' lowered = value.lower() if lowered == "none": return "none" if lowered == "default": return str(server_path() / "views" / "bokeh.ico") # undocumented if lowered == "default-dev": return str(server_path() / "views" / "bokeh-dev.ico") if not value.endswith(".ico"): raise ValueError(f"Cannot convert {value!r} to valid .ico path") return value
Create a ``DataSpec`` dict to generate a ``CumSum`` expression for a ``ColumnDataSource``. Args: field_name (str) : a field name to configure ``CumSum`` with include_zero (bool, optional) : whether to include zero in the sum (default: False) Examples: .. code-block:: python p.wedge(start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), ...) will generate a ``CumSum`` expressions that sum the ``"angle"`` column of a data source. For the ``start_angle`` value, the cumulative sums will start with a zero value. For ``end_angle``, no initial zero will be added (i.e. the sums will start with the first angle value, and include the last).
def cumsum(field_name: str, include_zero: bool = False) -> Expr: ''' Create a ``DataSpec`` dict to generate a ``CumSum`` expression for a ``ColumnDataSource``. Args: field_name (str) : a field name to configure ``CumSum`` with include_zero (bool, optional) : whether to include zero in the sum (default: False) Examples: .. code-block:: python p.wedge(start_angle=cumsum('angle', include_zero=True), end_angle=cumsum('angle'), ...) will generate a ``CumSum`` expressions that sum the ``"angle"`` column of a data source. For the ``start_angle`` value, the cumulative sums will start with a zero value. For ``end_angle``, no initial zero will be added (i.e. the sums will start with the first angle value, and include the last). ''' return Expr(CumSum(field=field_name, include_zero=include_zero))
Create a ``DataSpec`` dict that applies a client-side ``Dodge`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with value (float) : the fixed offset to add to column data range (Range, optional) : a range to use for computing synthetic coordinates when necessary, e.g. a ``FactorRange`` when the column data is categorical (default: None) Returns: Field
def dodge(field_name: str, value: float, range: Range | None = None) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``Dodge`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with value (float) : the fixed offset to add to column data range (Range, optional) : a range to use for computing synthetic coordinates when necessary, e.g. a ``FactorRange`` when the column data is categorical (default: None) Returns: Field ''' return Field(field_name, Dodge(value=value, range=range))
Create a ``DataSpec`` dict that applies a client-side ``EqHistColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray")
def eqhist_cmap( field_name: str, palette: Sequence[ColorLike], low: float, high: float, low_color: ColorLike | None = None, high_color: ColorLike | None = None, nan_color: ColorLike = "gray", ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``EqHistColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") ''' return Field( field_name, EqHistColorMapper( palette=palette, low=low, high=high, nan_color=nan_color, low_color=low_color, high_color=high_color, ), )
Create a ``DataSpec`` dict that applies a client-side ``CategoricalColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping factors (seq) : a sequence of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") Returns: Field
def factor_cmap( field_name: str, palette: Sequence[ColorLike], factors: Factors, start: float = 0, end: float | None = None, nan_color: ColorLike = "gray", ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping factors (seq) : a sequence of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") Returns: Field ''' return Field( field_name, CategoricalColorMapper( palette=palette, factors=factors, start=start, end=end, nan_color=nan_color, ), )
Create a ``DataSpec`` dict that applies a client-side ``CategoricalPatternMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with patterns (seq[string]) : a list of hatch patterns to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: Field Added in version 1.1.1
def factor_hatch( field_name: str, patterns: Sequence[str], factors: Factors, start: float = 0, end: float | None = None, ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalPatternMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with patterns (seq[string]) : a list of hatch patterns to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: Field Added in version 1.1.1 ''' return Field( field_name, CategoricalPatternMapper( patterns=patterns, factors=factors, start=start, end=end, ), )
Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: Field
def factor_mark( field_name: str, markers: Sequence[str], factors: Factors, start: float = 0, end: float | None = None, ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``CategoricalMarkerMapper`` transformation to a ``ColumnDataSource`` column. .. note:: This transform is primarily only useful with ``scatter``, which can be parameterized by glyph type. Args: field_name (str) : a field name to configure ``DataSpec`` with markers (seq[string]) : a list of markers to use to map to factors (seq) : a sequences of categorical factors corresponding to the palette start (int, optional) : a start slice index to apply when the column data has factors with multiple levels. (default: 0) end (int, optional) : an end slice index to apply when the column data has factors with multiple levels. (default: None) Returns: Field ''' return Field( field_name, CategoricalMarkerMapper( markers=markers, factors=factors, start=start, end=end, ), )
Create a ``DataSpec`` dict that applies a client-side ``Jitter`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with width (float) : the width of the random distribution to apply mean (float, optional) : an offset to apply (default: 0) distribution (str, optional) : ``"uniform"`` or ``"normal"`` (default: ``"uniform"``) range (Range, optional) : a range to use for computing synthetic coordinates when necessary, e.g. a ``FactorRange`` when the column data is categorical (default: None) Returns: Field
def jitter( field_name: str, width: float, mean: float = 0, distribution: JitterRandomDistributionType = "uniform", range: Range | None = None, ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``Jitter`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with width (float) : the width of the random distribution to apply mean (float, optional) : an offset to apply (default: 0) distribution (str, optional) : ``"uniform"`` or ``"normal"`` (default: ``"uniform"``) range (Range, optional) : a range to use for computing synthetic coordinates when necessary, e.g. a ``FactorRange`` when the column data is categorical (default: None) Returns: Field ''' return Field( field_name, Jitter( mean=mean, width=width, distribution=distribution, range=range, ), )
Create a ``DataSpec`` dict that applies a client-side ``LinearColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray")
def linear_cmap( field_name: str, palette: Sequence[ColorLike], low: float, high: float, low_color: ColorLike | None = None, high_color: ColorLike | None = None, nan_color: ColorLike = "gray", ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``LinearColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") ''' return Field( field_name, LinearColorMapper( palette=palette, low=low, high=high, nan_color=nan_color, low_color=low_color, high_color=high_color, ), )
Create a ``DataSpec`` dict that applies a client-side ``LogColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray")
def log_cmap( field_name: str, palette: Sequence[ColorLike], low: float, high: float, low_color: ColorLike | None = None, high_color: ColorLike | None = None, nan_color: ColorLike = "gray", ) -> Field: ''' Create a ``DataSpec`` dict that applies a client-side ``LogColorMapper`` transformation to a ``ColumnDataSource`` column. Args: field_name (str) : a field name to configure ``DataSpec`` with palette (seq[color]) : a list of colors to use for colormapping low (float) : a minimum value of the range to map into the palette. Values below this are clamped to ``low``. high (float) : a maximum value of the range to map into the palette. Values above this are clamped to ``high``. low_color (color, optional) : color to be used if data is lower than ``low`` value. If None, values lower than ``low`` are mapped to the first color in the palette. (default: None) high_color (color, optional) : color to be used if data is higher than ``high`` value. If None, values higher than ``high`` are mapped to the last color in the palette. (default: None) nan_color (color, optional) : a default color to use when mapping data from a column does not succeed (default: "gray") ''' return Field( field_name, LogColorMapper( palette=palette, low=low, high=high, nan_color=nan_color, low_color=low_color, high_color=high_color, ), )