text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import random
from pathlib import Path
from typing import List, Optional
from overrides import overrides
from tqdm import tqdm
from archai.common.ordered_dict_logger import OrderedDictLogger
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.api.search_results import SearchResults
from archai.discrete_search.api.search_space import EvolutionarySearchSpace
from archai.discrete_search.api.searcher import Searcher
logger = OrderedDictLogger(source=__name__)
class EvolutionParetoSearch(Searcher):
"""Evolutionary multi-objective search algorithm that greedily
evolves Pareto frontier models.
It starts from an evaluated random subset of models. In each iteration, the algorithm
evaluates new subset of models generated from mutations (`mutation_per_parent`) and
crossovers (`num_crossovers`) of the current pareto frontier, and a new random subset
of models (`num_random_mix`). The process is repeated until `num_iters` is reached.
"""
def __init__(
self,
search_space: EvolutionarySearchSpace,
search_objectives: SearchObjectives,
output_dir: str,
num_iters: Optional[int] = 10,
init_num_models: Optional[int] = 10,
initial_population_paths: Optional[List[str]] = None,
num_random_mix: Optional[int] = 5,
max_unseen_population: Optional[int] = 100,
mutations_per_parent: Optional[int] = 1,
num_crossovers: Optional[int] = 5,
clear_evaluated_models: bool = True,
save_pareto_model_weights: bool = True,
seed: Optional[int] = 1,
):
"""Initialize the evolutionary search algorithm.
Args:
search_space: Discrete search space compatible with evolutionary algorithms.
search_objectives: Search objectives.
output_dir: Output directory.
num_iters: Number of iterations.
init_num_models: Number of initial models to evaluate.
initial_population_paths: List of paths to the initial population of models.
If `None`, `init_num_models` random models are used.
num_random_mix: Number of random models to mix with the population in each iteration.
max_unseen_population: Maximum number of unseen models to evaluate in each iteration.
mutations_per_parent: Number of distinct mutations generated for each Pareto frontier member.
num_crossovers: Total number of crossovers generated per iteration.
clear_evaluated_models: Optimizes memory usage by clearing the architecture
of `ArchaiModel` after each iteration. Defaults to True
save_pareto_model_weights: If `True`, saves the weights of the pareto models. Defaults to True
seed: Random seed.
"""
super(EvolutionParetoSearch, self).__init__()
assert isinstance(
search_space, EvolutionarySearchSpace
), f"{str(search_space.__class__)} is not compatible with {str(self.__class__)}"
self.iter_num = 0
self.search_space = search_space
self.so = search_objectives
self.output_dir = Path(output_dir)
self.output_dir.mkdir(exist_ok=True, parents=True)
# Algorithm settings
self.num_iters = num_iters
self.init_num_models = init_num_models
self.initial_population_paths = initial_population_paths
self.num_random_mix = num_random_mix
self.max_unseen_population = max_unseen_population
self.mutations_per_parent = mutations_per_parent
self.num_crossovers = num_crossovers
# Utils
self.clear_evaluated_models = clear_evaluated_models
self.save_pareto_model_weights = save_pareto_model_weights
self.search_state = SearchResults(search_space, self.so)
self.seed = seed
self.rng = random.Random(seed)
self.seen_archs = set()
self.num_sampled_archs = 0
assert self.init_num_models > 0
assert self.num_iters > 0
assert self.num_random_mix > 0
assert self.max_unseen_population > 0
def sample_models(self, num_models: int, patience: Optional[int] = 5) -> List[ArchaiModel]:
"""Sample models from the search space.
Args:
num_models: Number of models to sample.
patience: Number of tries to sample a valid model.
Returns:
List of sampled models.
"""
nb_tries, valid_sample = 0, []
while len(valid_sample) < num_models and nb_tries < patience:
sample = [self.search_space.random_sample() for _ in range(num_models)]
_, valid_indices = self.so.validate_constraints(sample)
valid_sample += [sample[i] for i in valid_indices]
nb_tries += 1
return valid_sample[:num_models]
def mutate_parents(
self, parents: List[ArchaiModel], mutations_per_parent: Optional[int] = 1, patience: Optional[int] = 20
) -> List[ArchaiModel]:
"""Mutate parents to generate new models.
Args:
parents: List of parent models.
mutations_per_parent: Number of mutations to apply to each parent.
patience: Number of tries to sample a valid model.
Returns:
List of mutated models.
"""
mutations = {}
for p in tqdm(parents, desc="Mutating parents"):
candidates = {}
nb_tries = 0
while len(candidates) < mutations_per_parent and nb_tries < patience:
nb_tries += 1
mutated_model = self.search_space.mutate(p)
mutated_model.metadata["parent"] = p.archid
if not self.so.is_model_valid(mutated_model):
continue
if mutated_model.archid not in self.seen_archs:
mutated_model.metadata["generation"] = self.iter_num
candidates[mutated_model.archid] = mutated_model
mutations.update(candidates)
return list(mutations.values())
def crossover_parents(
self, parents: List[ArchaiModel], num_crossovers: Optional[int] = 1, patience: Optional[int] = 30
) -> List[ArchaiModel]:
"""Crossover parents to generate new models.
Args:
parents: List of parent models.
num_crossovers: Number of crossovers to apply.
patience: Number of tries to sample a valid model.
Returns:
List of crossovered models.
"""
# Randomly samples k distinct pairs from `parents`
children, children_ids = [], set()
if len(parents) >= 2:
pairs = [self.rng.sample(parents, 2) for _ in range(num_crossovers)]
for p1, p2 in pairs:
child = self.search_space.crossover([p1, p2])
nb_tries = 0
while not self.so.is_model_valid(child) and nb_tries < patience:
child = self.search_space.crossover([p1, p2])
nb_tries += 1
if child and self.so.is_model_valid(child):
if child.archid not in children_ids and child.archid not in self.seen_archs:
child.metadata["generation"] = self.iter_num
child.metadata["parents"] = f"{p1.archid},{p2.archid}"
children.append(child)
children_ids.add(child.archid)
return children
def on_calc_task_accuracy_end(self, current_pop: List[ArchaiModel]) -> None:
"""Callback function called right after calc_task_accuracy()."""
pass
def on_search_iteration_start(self, current_pop: List[ArchaiModel]) -> None:
"""Callback function called right before each search iteration."""
pass
def select_next_population(self, current_pop: List[ArchaiModel]) -> List[ArchaiModel]:
"""Select the next population from the current population
Args:
current_pop: Current population.
Returns:
Next population.
"""
self.rng.shuffle(current_pop)
return current_pop[: self.max_unseen_population]
@overrides
def search(self) -> SearchResults:
self.iter_num = 0
if self.initial_population_paths:
logger.info(f"Loading initial population from {len(self.initial_population_paths)} architectures ...")
unseen_pop = [self.search_space.load_arch(path) for path in self.initial_population_paths]
else:
logger.info(f"Using {self.init_num_models} random architectures as the initial population ...")
unseen_pop = self.sample_models(self.init_num_models)
self.all_pop = unseen_pop
for i in range(self.num_iters):
self.iter_num = i + 1
self.on_start_iteration(self.iter_num)
logger.info(f"Iteration {i+1}/{self.num_iters}")
self.on_search_iteration_start(unseen_pop)
# Calculates objectives
logger.info(f"Calculating search objectives {list(self.so.objective_names)} for {len(unseen_pop)} models ...")
results = self.so.eval_all_objs(unseen_pop)
if len(results) == 0:
raise Exception("Search is finding no valid models")
self.search_state.add_iteration_results(
unseen_pop,
results,
# Mutation and crossover info
extra_model_data={
"parent": [p.metadata.get("parent", None) for p in unseen_pop],
"parents": [p.metadata.get("parents", None) for p in unseen_pop],
},
)
# Records evaluated archs to avoid computing the same architecture twice
self.seen_archs.update([m.archid for m in unseen_pop])
# update the pareto frontier
logger.info("Updating Pareto frontier ...")
pareto = self.search_state.get_pareto_frontier()["models"]
logger.info(f"Found {len(pareto)} members.")
# Saves search iteration results
# NOTE: There is a dependency on these file naming schemas on archai.common.notebook_helper
self.search_state.save_search_state(str(self.output_dir / f"search_state_{self.iter_num}.csv"))
self.search_state.save_pareto_frontier_models(
str(self.output_dir / f"pareto_models_iter_{self.iter_num}"),
save_weights=self.save_pareto_model_weights
)
self.search_state.save_all_2d_pareto_evolution_plots(str(self.output_dir))
# Optimizes memory usage by clearing architectures from memory
if self.clear_evaluated_models:
logger.info("Optimzing memory usage ...")
[model.clear() for model in unseen_pop]
parents = pareto
logger.info(f"Choosing {len(parents)} parents ...")
# mutate random 'k' subsets of the parents
# while ensuring the mutations fall within
# desired constraint limits
mutated = self.mutate_parents(parents, self.mutations_per_parent)
logger.info(f"Mutation: {len(mutated)} new models.")
# crossover random 'k' subsets of the parents
# while ensuring the mutations fall within
# desired constraint limits
crossovered = self.crossover_parents(parents, self.num_crossovers)
logger.info(f"Crossover: {len(crossovered)} new models.")
# sample some random samples to add to the parent mix
# to mitigage local minima
rand_mix = self.sample_models(self.num_random_mix)
unseen_pop = crossovered + mutated + rand_mix
# shuffle before we pick a smaller population for the next stage
logger.info(f"Total unseen population: {len(unseen_pop)}.")
unseen_pop = self.select_next_population(unseen_pop)
logger.info(f"Total unseen population after `max_unseen_population` restriction: {len(unseen_pop)}.")
# update the set of architectures ever visited
self.all_pop.extend(unseen_pop)
return self.search_state
|
archai/archai/discrete_search/algos/evolution_pareto.py/0
|
{
"file_path": "archai/archai/discrete_search/algos/evolution_pareto.py",
"repo_id": "archai",
"token_count": 5288
}
| 318 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Callable, Optional
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.model_evaluator import ModelEvaluator
class EvaluationFunction(ModelEvaluator):
"""Custom function evaluator.
This evaluator is used to wrap a custom evaluation function.
"""
def __init__(self, evaluation_fn: Callable) -> None:
"""Initialize the evaluator.
Args:
evaluation_fn: Evaluation function that receives the parameters
(model: ArchaiModel, dataloader: torch.utils.data.Dataloader, budget: float) and outputs
a float.
"""
self.evaluation_fn = evaluation_fn
@overrides
def evaluate(self, model: ArchaiModel, budget: Optional[float] = None) -> float:
return self.evaluation_fn(model, budget)
|
archai/archai/discrete_search/evaluators/functional.py/0
|
{
"file_path": "archai/archai/discrete_search/evaluators/functional.py",
"repo_id": "archai",
"token_count": 357
}
| 319 |
from .backbones import *
from .model import LanguageModel
from .search_space import TfppSearchSpace
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/__init__.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/__init__.py",
"repo_id": "archai",
"token_count": 26
}
| 320 |
from typing import Optional
import torch
from torch import nn
from transformers.models.reformer.modeling_reformer import ReformerConfig
from .lsh_utils.modeling_reformer import ReformerAttention
from archai.discrete_search.search_spaces.config import ArchConfig
class LSHAttention(nn.Module):
def __init__(self, arch_config: ArchConfig, hidden_size: int, total_heads: int,
op_heads: int, auto_pick_num_buckets: bool = True, autopad: bool = True,
**kwargs):
assert hidden_size % total_heads == 0, 'hidden size must be divisible by total heads'
super().__init__()
self.hidden_size = hidden_size
self.total_heads = total_heads
self.op_heads = op_heads
self.op_size = (self.hidden_size // self.total_heads) * self.op_heads
self.num_hashes = arch_config.pick('num_hashes')
self.bucket_size = arch_config.pick('bucket_size')
self.num_buckets = arch_config.pick('num_buckets') if not auto_pick_num_buckets else None
self.autopad = autopad
self.config = ReformerConfig(
attn_layers=['lsh'],
hidden_size=hidden_size,
num_attention_heads=op_heads,
num_hashes=self.num_hashes,
lsh_attn_chunk_length=self.bucket_size,
num_buckets=self.num_buckets,
attention_head_size=(self.hidden_size // self.total_heads),
axial_pos_embds=False,
is_decoder=True,
use_cache=False
)
self.attn = ReformerAttention(self.config)
# Overrides the output layer to be identity to make it
# return an output of `op_size` instead of `hidden_size`
self.attn.output = nn.Identity()
def forward(self, hidden_states, bin_attention_mask: Optional[torch.FloatTensor] = None,
past_buckets_states: Optional[torch.Tensor] = None, use_cache: bool = False,
*args, **kwargs):
seq_len = hidden_states.size(1)
# Pads input to be divisible by bucket size
if self.autopad and seq_len % self.bucket_size != 0:
pad_size = (self.bucket_size - seq_len % self.bucket_size) % self.bucket_size
# Pads hidden states and attention mask with zeros so attn is not computed for padded tokens
p_hidden_states = torch.nn.functional.pad(hidden_states, (0, 0, pad_size, 0))
p_bin_attention_mask = torch.nn.functional.pad(bin_attention_mask, (pad_size, 0))
# Computes attention with padded input and unpads output
output = self.attn(p_hidden_states, attention_mask=p_bin_attention_mask)
return output[0][:, pad_size:], output[1:]
return self.attn(hidden_states, attention_mask=bin_attention_mask)
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/lsh_attn.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/lsh_attn.py",
"repo_id": "archai",
"token_count": 1250
}
| 321 |
# TD [2023-01-05]: Copied from https://github.com/HazyResearch/state-spaces/blob/06dbbdfd0876501a7f12bf3262121badbc7658af/src/models/functional/vandermonde.py
# We add the interface to the log vandermonde CUDA code
"""pykeops implementations of the Vandermonde matrix multiplication kernel used in the S4D kernel."""
import math
import torch
from einops import rearrange, repeat
from opt_einsum import contract
import os
try:
import pykeops
from pykeops.torch import LazyTensor, Genred
except:
pass
try:
from cauchy_mult import vand_log_mult_sym_fwd, vand_log_mult_sym_bwd
except:
vand_log_mult_sym_fwd, vand_log_mult_sym_bwd = None, None
_conj = lambda x: torch.cat([x, x.conj()], dim=-1)
def _broadcast_dims(*tensors):
max_dim = max([len(tensor.shape) for tensor in tensors])
tensors = [tensor.view((1,)*(max_dim-len(tensor.shape))+tensor.shape) for tensor in tensors]
return tensors
def _c2r(x): return torch.view_as_real(x)
def _r2c(x): return torch.view_as_complex(x)
def vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
if conj:
x = _conj(x)
v = _conj(v)
vandermonde_matrix = x.unsqueeze(-1) ** torch.arange(L).to(x) # (... N L)
vandermonde_prod = torch.sum(v.unsqueeze(-1) * vandermonde_matrix, dim=-2) # (... L)
return vandermonde_prod
def log_vandermonde_naive(v, x, L, conj=True):
"""
v: (..., N)
x: (..., N)
returns: (..., L) \sum v x^l
"""
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... n, ... n l -> ... l', v, vandermonde_matrix) # (... L)
if conj:
return 2*vandermonde_prod.real
else:
return vandermonde_prod
def log_vandermonde_lazy(v, x, L, conj=True):
if conj:
v = _conj(v)
x = _conj(x)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v_l = LazyTensor(rearrange(v, '... N -> ... N 1 1'))
x_l = LazyTensor(rearrange(x, '... N -> ... N 1 1'))
l_l = LazyTensor(rearrange(l, '... L -> ... 1 L 1'))
# exp
vand = (x_l * l_l).exp()
s = (v_l*vand).sum(dim=len(v_l.shape)-2)
return s.squeeze(-1)
def log_vandermonde(v, x, L, conj=True):
expr = 'ComplexMult(v, ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'v = Vj(2)',
'x = Vj(2)',
'l = Vi(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
v, x, l = _broadcast_dims(v, x, l)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(v, x, l, backend='GPU')
if conj:
return 2*_r2c(r).real
else:
return _r2c(r)
def log_vandermonde_transpose_naive(u, v, x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
vandermonde_prod = contract('... l, ... n, ... n l -> ... n', u.to(x), v.to(x), vandermonde_matrix) # (... L)
return vandermonde_prod
def log_vandermonde_transpose(u, v, x, L):
"""
u: ... H L
v: ... H N
x: ... H N
Returns: ... H N
V = Vandermonde(a, L) : (H N L)
contract_L(V * u * v)
"""
expr = 'ComplexMult(ComplexMult(v, u), ComplexExp(ComplexMult(x, l)))'
vandermonde_mult = Genred(
expr,
[
'u = Vj(2)',
'v = Vi(2)',
'x = Vi(2)',
'l = Vj(2)',
],
reduction_op='Sum',
axis=1,
)
l = torch.arange(L).to(x)
u, v, x, l = _broadcast_dims(u, v, x, l)
u = _c2r(u)
v = _c2r(v)
x = _c2r(x)
l = _c2r(l)
r = vandermonde_mult(u, v, x, l, backend='GPU')
return _r2c(r)
def _log_vandermonde_matmul(x, L):
vandermonde_matrix = torch.exp(x.unsqueeze(-1) * torch.arange(L).to(x)) # (... N L)
return vandermonde_matrix
def log_vandermonde_matmul(v, K):
prod = contract('...n, ...nl -> ...l', v, K)
return 2*prod.real
class LogVandMultiplySymmetric(torch.autograd.Function):
@staticmethod
def forward(ctx, v, x, L):
batch, N = v.shape
supported_N_values = [1 << log_n for log_n in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]]
if not N in supported_N_values:
raise NotImplementedError(f'Only support N values in {supported_N_values}')
max_L_value = 32 * 1024 * 64 * 1024
if L > max_L_value:
raise NotImplementedError(f'Only support L values <= {max_L_value}')
if not v.is_cuda and x.is_cuda:
raise NotImplementedError(f'Only support CUDA tensors')
ctx.save_for_backward(v, x)
return vand_log_mult_sym_fwd(v, x, L)
@staticmethod
def backward(ctx, dout):
v, x = ctx.saved_tensors
dv, dx = vand_log_mult_sym_bwd(v, x, dout)
return dv, dx, None
if vand_log_mult_sym_fwd and vand_log_mult_sym_bwd is not None:
log_vandermonde_fast = LogVandMultiplySymmetric.apply
else:
log_vandermonde_fast = None
|
archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/vandermonde.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/tfpp/ops/ssm_utils/ssm_ops/vandermonde.py",
"repo_id": "archai",
"token_count": 2551
}
| 322 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# Copyright (c) 2018, NVIDIA CORPORATION.
# Licensed under the Apache License, Version 2.0.
from typing import List, Optional, Tuple
import torch
import torch.nn as nn
from transformers.file_utils import ModelOutput
from transformers.models.transfo_xl.modeling_transfo_xl import (
TransfoXLModel,
TransfoXLPreTrainedModel,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.configuration_mem_transformer import (
MemTransformerConfig,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.mem_transformer_utils.adaptive_embedding import (
AdaptiveEmbedding,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.mem_transformer_utils.positional_embedding import (
PositionalEmbedding,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.mem_transformer_utils.projected_adaptive_log_softmax import (
ProjectedAdaptiveLogSoftmax,
)
from archai.discrete_search.search_spaces.nlp.transformer_flex.models.mem_transformer_utils.rel_partial_learnable_decoder import (
RelPartialLearnableDecoderLayer,
)
class MemTransformerBaseOutput(ModelOutput):
last_hidden_state: torch.FloatTensor
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
class MemTransformerOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
prediction_scores: Optional[torch.FloatTensor] = None
past_key_values: Optional[Tuple[torch.FloatTensor]] = None
mems: Optional[List[torch.FloatTensor]] = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
@property
def logits(self) -> torch.FloatTensor:
return self.prediction_scores
class MemTransformerModel(TransfoXLModel):
config_class = MemTransformerConfig
def __init__(self, config: MemTransformerConfig) -> None:
super().__init__(config)
self.word_emb = AdaptiveEmbedding(
config.vocab_size,
config.d_embed,
config.d_model,
config.cutoffs,
div_val=config.div_val,
fp16=config.fp16,
)
self.layers = nn.ModuleList()
for _ in range(config.n_layer):
layer_i = RelPartialLearnableDecoderLayer(
config.n_head,
config.d_model,
config.d_head,
config.d_inner,
config.dropout,
dropatt=config.dropatt,
primer_conv=config.primer_conv,
primer_square=config.primer_square,
pre_lnorm=config.pre_lnorm,
layer_norm_epsilon=config.layer_norm_epsilon,
r_w_bias=None if config.untie_r else self.r_w_bias,
r_r_bias=None if config.untie_r else self.r_r_bias,
)
self.layers.append(layer_i)
self.pos_embeds = PositionalEmbedding(self.config.d_model)
self.init_weights()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
mems: Optional[List[torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> MemTransformerBaseOutput:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# Original Transformer-XL uses [q_length, batch_size], where
# we prefer to use [batch_size, q_length]
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both `input_ids` and `inputs_embeds` at the same time")
elif input_ids is not None:
input_ids = input_ids.transpose(0, 1).contiguous()
q_length, batch_size = input_ids.size()
elif inputs_embeds is not None:
inputs_embeds = inputs_embeds.transpose(0, 1).contiguous()
q_length, batch_size = inputs_embeds.shape[0], inputs_embeds.shape[1]
else:
raise ValueError("You have to specify either `input_ids` or `inputs_embeds`")
if mems is None:
mems = self.init_mems(batch_size)
# (n_hidden_layers, q_length, k_length, batch_size, n_head)
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0)
head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1)
# Guarantees 16-bit floating point compatibility
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.n_layer
if inputs_embeds is not None:
word_embeds = inputs_embeds
else:
word_embeds = self.word_emb(input_ids)
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * self.n_layer)
else:
past_length = past_key_values[0][0].size(0)
mem_length = mems[0].size(0) if mems is not None else 0
k_length = mem_length + q_length
if self.same_length:
all_ones = word_embeds.new_ones((q_length, k_length + past_length), dtype=torch.uint8)
mask_length = k_length - self.mem_len
if mask_length > 0:
mask_shifted_length = q_length - mask_length
else:
mask_shifted_length = q_length
dec_attn_mask = (
torch.triu(all_ones, 1 + mem_length + past_length) + torch.tril(all_ones, -mask_shifted_length)
)[:, :, None]
else:
dec_attn_mask = torch.triu(
word_embeds.new_ones((q_length, k_length + past_length), dtype=torch.uint8),
diagonal=1 + mem_length + past_length,
)[:, :, None]
hidden_states = []
attentions = [] if output_attentions else None
presents = () if use_cache else None
pos_sequence = torch.arange(
k_length + past_length - 1,
past_length - 1,
-1.0,
device=word_embeds.device,
dtype=word_embeds.dtype,
)
if self.clamp_len > 0:
pos_sequence.clamp_(max=self.clamp_len)
pos_embeds = self.pos_emb(pos_sequence)
pos_embeds = self.drop(pos_embeds)
output = self.drop(word_embeds)
for i, (layer, layer_past) in enumerate(zip(self.layers, past_key_values)):
hidden_states.append(output)
mems_i = None if mems is None else mems[i]
layer_output = layer(
output,
pos_embeds,
layer_past=layer_past,
dec_attn_mask=dec_attn_mask,
mems=mems_i,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
)
output = layer_output[0]
if use_cache is True:
presents = presents + (layer_output[1],)
if output_attentions:
attentions.append(layer_output[2])
output = self.drop(output)
new_mems = self._update_mems(hidden_states, mems, mem_length, q_length)
if output_hidden_states:
# (batch_size, length, d_model)
hidden_states.append(output)
hidden_states = tuple(t.transpose(0, 1).contiguous() for t in hidden_states)
else:
hidden_states = None
if output_attentions:
# (batch_size, n_heads, q_length, k_length)
attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions)
# (batch_size, length, d_model)
output = output.transpose(0, 1).contiguous()
if not return_dict:
return tuple(v for v in [output, presents, new_mems, hidden_states, attentions] if v is not None)
return MemTransformerBaseOutput(
last_hidden_state=output,
past_key_values=presents,
mems=new_mems,
hidden_states=hidden_states,
attentions=attentions,
)
class MemTransformerLMHeadModel(TransfoXLPreTrainedModel):
config_class = MemTransformerConfig
def __init__(self, config: MemTransformerConfig) -> None:
super().__init__(config)
self.transformer = MemTransformerModel(config)
if self.config.tie_word_embeddings:
emb_weights = [emb_layer.weight for emb_layer in self.transformer.word_emb.emb_layers]
else:
emb_weights = None
emb_projs = self.transformer.word_emb.emb_projs
self.crit = ProjectedAdaptiveLogSoftmax(
config.vocab_size,
config.d_embed,
config.d_model,
config.cutoffs,
config.tie_projs,
emb_projs=emb_projs,
emb_weights=emb_weights,
div_val=config.div_val,
)
self.init_weights()
def tie_weights(self) -> None:
# Mockup to disable weight tieing as it is already being done
pass
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
mems: Optional[List[torch.FloatTensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> MemTransformerOutput:
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None:
batch_size, target_length = input_ids.size(0), input_ids.size(1)
elif inputs_embeds is not None:
batch_size, target_length = inputs_embeds.size(0), inputs_embeds.size(1)
else:
raise ValueError("You have to specify either `input_ids` or `inputs_embeds`")
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
mems=mems,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = transformer_outputs[0]
pred_hidden_state = last_hidden_state[:, -target_length:]
if labels is not None:
# Prevents all labels being -100 and throwing an error
# when backwarding the loss
miss_valid_label = labels[0, 1:].sum() == (labels.size(1) - 1) * -100
if miss_valid_label:
# Sets an <EOS> token, just to prevent loss from being NaN
labels[0, 1] = self.config.eos_token_id
softmax_output = self.crit(pred_hidden_state, labels)
if labels is not None:
prediction_scores = self.crit(pred_hidden_state, None).detach()
prediction_scores = prediction_scores.view(batch_size, target_length, -1)
loss = softmax_output.view(batch_size, target_length - 1)
loss = loss[loss != 0].mean()
else:
prediction_scores = softmax_output.view(batch_size, target_length, -1)
loss = None
if not return_dict:
output = (prediction_scores,) + transformer_outputs[1:]
if loss is not None:
return (loss,) + output
return output
return MemTransformerOutput(
loss=loss,
prediction_scores=prediction_scores,
past_key_values=transformer_outputs.past_key_values,
mems=transformer_outputs.mems,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
|
archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/modeling_mem_transformer.py/0
|
{
"file_path": "archai/archai/discrete_search/search_spaces/nlp/transformer_flex/models/modeling_mem_transformer.py",
"repo_id": "archai",
"token_count": 6023
}
| 323 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import List, Optional, Tuple
from onnx import GraphProto, ModelProto, NodeProto, TensorProto, ValueInfoProto, helper
from onnxruntime.transformers.fusion_attention import AttentionMask, FusionAttention
from onnxruntime.transformers.fusion_layernorm import FusionLayerNormalization
from onnxruntime.transformers.fusion_reshape import FusionReshape
from onnxruntime.transformers.fusion_shape import FusionShape
from onnxruntime.transformers.fusion_skiplayernorm import (
FusionBiasSkipLayerNormalization,
FusionSkipLayerNormalization,
)
from onnxruntime.transformers.fusion_utils import FusionUtils
from onnxruntime.transformers.onnx_model import OnnxModel
from archai.onnx.optimization_utils.fusion_options import FusionOptions
class TransfoXLOnnxModel(OnnxModel):
"""ONNX model optimized for Transformer-XL models.
This model extends the `OnnxModel` class by enabling additional ONNX optimizations.
"""
def __init__(self, model: ModelProto) -> None:
"""Initialize the `TransfoXLOnnxModel` instance.
Args:
model: ONNX-based model.
"""
super().__init__(model)
self.attention_mask = AttentionMask(self)
self.utils = FusionUtils(self)
def change_graph_input_type(
self,
graph: GraphProto,
graph_input: ValueInfoProto,
new_type: Optional[int] = TensorProto.INT32,
) -> Tuple[NodeProto, List[NodeProto]]:
"""Change the input type of the graph and add `Cast` nodes if necessary.
Args:
graph: Graph instance.
graph_input: Graph input value.
new_type: New data type.
Returns:
A tuple containing a `Cast` node to be added and a list of `Cast` nodes to be removed.
"""
assert isinstance(graph, GraphProto)
assert isinstance(graph_input, ValueInfoProto)
assert self.find_graph_input(graph_input.name)
if graph_input.type.tensor_type.elem_type == int(new_type):
return None, []
new_cast_node = None
nodes_to_remove = []
input_name_to_nodes = self.input_name_to_nodes()
if graph_input.name in input_name_to_nodes:
nodes = input_name_to_nodes[graph_input.name]
nodes_not_cast = [node for node in nodes if node.op_type != "Cast"]
if nodes_not_cast:
node_name = self.create_node_name("Cast")
output_name = node_name + "_" + graph_input.name
new_value_info = graph.value_info.add()
new_value_info.CopyFrom(graph_input)
new_value_info.name = output_name
new_cast_node = helper.make_node(
"Cast",
[graph_input.name],
[output_name],
to=int(graph_input.type.tensor_type.elem_type),
name=node_name,
)
graph.node.extend([new_cast_node])
for node in nodes_not_cast:
OnnxModel.replace_node_input(node, graph_input.name, output_name)
nodes_cast = [node for node in nodes if node.op_type == "Cast"]
for node in nodes_cast:
if OnnxModel.get_node_attribute(node, "to") == int(new_type):
self.replace_input_of_all_nodes(node.output[0], graph_input.name)
if not self.find_graph_output(node.output[0]):
nodes_to_remove.append(node)
if nodes_to_remove:
self.remove_nodes(nodes_to_remove)
graph_input.type.tensor_type.elem_type = int(new_type)
return new_cast_node, nodes_to_remove
def change_graph_inputs_to_int32(self) -> None:
"""Change the inputs to `int32`."""
graph = self.graph()
add_cast_count = 0
remove_cast_count = 0
for graph_input in graph.input:
new_node, removed_nodes = self.change_graph_input_type(graph, graph_input, TensorProto.INT32)
if new_node:
add_cast_count += 1
remove_cast_count += len(removed_nodes)
def fuse_layer_norm(self) -> None:
"""Fuse the appropriate nodes into a `LayerNormalization` layer."""
fusion = FusionLayerNormalization(self)
fusion.apply()
def fuse_skip_layer_norm(self) -> None:
"""Fuse the appropriate nodes into a `SkipLayerNormalization` layer."""
fusion = FusionSkipLayerNormalization(self)
fusion.apply()
def fuse_add_bias_skip_layer_norm(self) -> None:
"""Fuse the appropriate nodes into a `BiasSkipLayerNormalization` layer."""
fusion = FusionBiasSkipLayerNormalization(self)
fusion.apply()
def fuse_attention(self) -> None:
"""Fuse the appropriate nodes into an `Attention` layer."""
fusion = FusionAttention(self, 0, 0, self.attention_mask)
fusion.apply()
def fuse_reshape(self) -> None:
"""Fuse the appropriate nodes into a `Reshape` layer."""
fusion = FusionReshape(self)
fusion.apply()
def fuse_shape(self) -> None:
"""Fuse the appropriate nodes into a `Shape` layer."""
fusion = FusionShape(self)
fusion.apply()
def use_dynamic_axes(
self,
dynamic_batch_dim: Optional[str] = "batch",
dynamic_seq_len: Optional[str] = "sequence",
) -> None:
"""Update inputs and outputs shapes to use dynamic axes.
Args:
dynamic_batch_dim: Name of batch size dimension.
dynamic_seq_len: Name of sequence length dimension.
"""
graph_inputs = self.get_graph_inputs_from_fused_nodes(casted=True) + self.get_graph_inputs_from_fused_nodes(
casted=False
)
for inp in self.model.graph.input:
if inp.name in graph_inputs:
dim_proto = inp.type.tensor_type.shape.dim[0]
dim_proto.dim_param = dynamic_batch_dim
if dynamic_seq_len is not None:
dim_proto = inp.type.tensor_type.shape.dim[1]
dim_proto.dim_param = dynamic_seq_len
for out in self.model.graph.output:
dim_proto = out.type.tensor_type.shape.dim[0]
dim_proto.dim_param = dynamic_batch_dim
def adjust_reshape_and_expand(self) -> None:
"""Clean up unncessary reshape nodes."""
nodes_to_remove = []
for node in self.nodes():
if node.op_type == "Reshape":
reshape_shape = self.get_constant_value(node.input[1])
if reshape_shape is not None and reshape_shape.size == 0:
nodes_to_remove.extend([node])
self.replace_input_of_all_nodes(node.output[0], node.input[0])
continue
reshape_path = self.match_parent_path(
node,
["Expand", "Expand", "Reshape", "Slice"],
[0, 0, 0, 0],
self.output_name_to_node(),
)
if reshape_path is not None:
expand_node = reshape_path[-3]
expand_shape_value = self.get_constant_value(expand_node.input[1])
reshape_before_expand = reshape_path[-2]
shape_value = self.get_constant_value(reshape_before_expand.input[1])
slice_node = reshape_path[-1]
if (
expand_shape_value is not None
and shape_value is not None
and len(expand_shape_value) == 2
and len(shape_value) == 1
and expand_shape_value[1] == shape_value[0]
):
node.input[0] = slice_node.output[0]
if nodes_to_remove:
self.remove_nodes(nodes_to_remove)
def clean_graph(self) -> None:
"""Clean the graph after fusing nodes."""
output_name_to_node = self.output_name_to_node()
nodes_to_remove = []
for node in self.nodes():
op_input_id = {"EmbedLayerNormalization": 1, "ReduceSum": 0, "Attention": 3}
if node.op_type in op_input_id:
i = op_input_id[node.op_type]
parent_nodes = self.match_parent_path(
node,
[
"Cast",
"ConstantOfShape",
"Concat",
"Unsqueeze",
"Gather",
"Shape",
],
[i, 0, 0, 0, 0, 0],
output_name_to_node,
)
if parent_nodes is not None:
(
cast,
constantOfShape,
concat,
unsqueeze,
gather,
shape,
) = parent_nodes
if shape.input[0] == self.graph().input[0].name:
constantOfShape.input[0] = shape.output[0]
output_name_to_node = self.output_name_to_node()
if node.op_type == "Attention":
parent_nodes = self.match_parent_path(
node,
["ReduceSum", "Cast", "ConstantOfShape", "Shape"],
[3, 0, 0, 0],
output_name_to_node,
)
if parent_nodes is not None:
if parent_nodes[-1].input[0] == self.graph().input[0].name:
attention_node = helper.make_node(
"Attention",
inputs=node.input[0 : len(node.input) - 1],
outputs=node.output,
name=node.name + "_remove_mask",
)
attention_node.domain = "com.microsoft"
attention_node.attribute.extend([helper.make_attribute("num_heads", self.num_heads)])
self.add_node(attention_node, self.get_graph_by_node(attention_node).name)
nodes_to_remove.append(node)
self.remove_nodes(nodes_to_remove)
def optimize(
self,
options: Optional[FusionOptions] = None,
add_dynamic_axes: Optional[bool] = False,
) -> None:
"""Perform additional transformer-based optimization.
Args:
options: Options holding which operators should be fused.
add_dynamic_axes: Whether dynamic axes should be added.
"""
if (options is None) or options.enable_layer_norm:
self.fuse_layer_norm()
# Pre-processing step
self.adjust_reshape_and_expand()
self.fuse_reshape()
if (options is None) or options.enable_skip_layer_norm:
self.fuse_skip_layer_norm()
# if (options is None) or options.enable_attention:
# if options is not None:
# self.attention_mask.set_mask_format(options.attention_mask_format)
# self.fuse_attention()
self.fuse_shape()
# Post-processing step
self.utils.remove_useless_reshape_nodes(self)
self.clean_graph()
self.prune_graph()
if (options is None) or options.enable_bias_skip_layer_norm:
self.fuse_add_bias_skip_layer_norm()
self.remove_unused_constant()
if add_dynamic_axes:
self.use_dynamic_axes()
|
archai/archai/onnx/optimization_utils/transfo_xl_onnx_model.py/0
|
{
"file_path": "archai/archai/onnx/optimization_utils/transfo_xl_onnx_model.py",
"repo_id": "archai",
"token_count": 5921
}
| 324 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import Iterator
import torch
from torch import Tensor, autograd, nn
from torch.nn.modules.loss import _Loss
from torch.optim.optimizer import Optimizer
from archai.common import ml_utils
from archai.common.config import Config
from archai.common.utils import zip_eq
from archai.supergraph.nas.model import Model
def _flatten_concate(xs):
"""
flatten all tensor from [d1,d2,...dn] to [d]
and then concat all [d_1] to [d_1+d_2+d_3+...]
:param xs:
:return:
"""
return torch.cat([x.view(-1) for x in xs])
def _get_alphas(model:Model)->Iterator[nn.Parameter]:
return model.all_owned().param_by_kind('alphas')
def _get_loss(model:Model, lossfn, x, y):
logits, *_ = model(x) # might also return aux tower logits
return lossfn(logits, y)
class BilevelOptimizer:
def __init__(self, conf_alpha_optim:Config, w_momentum: float, w_decay: float,
model: Model, lossfn: _Loss) -> None:
self._w_momentum = w_momentum # momentum for w
self._w_weight_decay = w_decay # weight decay for w
self._lossfn = lossfn
self._model = model # main model with respect to w and alpha
self._alphas = list(_get_alphas(self._model))
# this is the optimizer to optimize alphas parameter
self._alpha_optim = ml_utils.create_optimizer(conf_alpha_optim, self._alphas)
def state_dict(self)->dict:
return {
'alpha_optim': self._alpha_optim.state_dict()
}
def load_state_dict(self, state_dict)->None:
self._alpha_optim.load_state_dict(state_dict['alpha_optim'])
def _unrolled_model(self, x, y, lr: float, main_optim: Optimizer)->Model:
# TODO: should this loss be stored for later use?
loss = _get_loss(self._model, self._lossfn, x, y)
params = _flatten_concate(self._model.parameters()).detach()
try:
moment = _flatten_concate(main_optim.state[v]['momentum_buffer'] for v in self._model.parameters())
moment.mul_(self._w_momentum)
except:
moment = torch.zeros_like(params)
# flatten all gradients
grads = _flatten_concate(autograd.grad(loss, self._model.parameters())).data
# indeed, here we implement a simple SGD with momentum and weight decay
# theta = theta - eta * (moment + weight decay + dtheta)
params = params.sub(lr, moment + grads + self._w_weight_decay*params)
# construct a new model
return self._params2model(params)
def _params2model(self, params)->Model:
"""
construct a new model with initialized weight from params
it use .state_dict() and load_state_dict() instead of
.parameters() + fill_()
:params: flatten weights, need to reshape to original shape
:return:
"""
params_d, offset = {}, 0
for k, v in self._model.named_parameters():
v_length = v.numel()
# restore params[] value to original shape
params_d[k] = params[offset: offset + v_length].view(v.size())
offset += v_length
assert offset == len(params)
model_new = copy.deepcopy(self._model)
model_dict = self._model.state_dict()
model_dict.update(params_d)
model_new.load_state_dict(model_dict)
return model_new.cuda()
def step(self, x_train: Tensor, y_train: Tensor, x_valid: Tensor, y_valid: Tensor,
main_optim: Optimizer) -> None:
# TODO: unlike darts paper, we get lr from optimizer insead of scheduler
lr = main_optim.param_groups[0]['lr']
self._alpha_optim.zero_grad()
# compute the gradient and write it into tensor.grad
# instead of generated by loss.backward()
self._backward_bilevel(x_train, y_train, x_valid, y_valid,
lr, main_optim)
# at this point we should have model with updated gradients for w and alpha
self._alpha_optim.step()
def _backward_bilevel(self, x_train, y_train, x_valid, y_valid, lr, main_optim):
""" Compute unrolled loss and backward its gradients """
# update vmodel with w', but leave alphas as-is
# w' = w - lr * grad
unrolled_model = self._unrolled_model(x_train, y_train, lr, main_optim)
# compute loss on validation set for model with w'
# wrt alphas. The autograd.grad is used instead of backward()
# to avoid having to loop through params
vloss = _get_loss(unrolled_model, self._lossfn, x_valid, y_valid)
vloss.backward()
dalpha = [v.grad for v in _get_alphas(unrolled_model)]
dparams = [v.grad.data for v in unrolled_model.parameters()]
hessian = self._hessian_vector_product(dparams, x_train, y_train)
# dalpha we have is from the unrolled model so we need to
# transfer those grades back to our main model
# update final gradient = dalpha - xi*hessian
# TODO: currently alphas lr is same as w lr
with torch.no_grad():
for alpha, da, h in zip_eq(self._alphas, dalpha, hessian):
alpha.grad = da - lr*h
# now that model has both w and alpha grads,
# we can run main_optim.step() to update the param values
def _hessian_vector_product(self, dw, x, y, epsilon_unit=1e-2):
"""
Implements equation 8
dw = dw` {L_val(w`, alpha)}
w+ = w + eps * dw
w- = w - eps * dw
hessian = (dalpha {L_trn(w+, alpha)} -dalpha {L_trn(w-, alpha)})/(2*eps)
eps = 0.01 / ||dw||
"""
"""scale epsilon with grad magnitude. The dw
is a multiplier on RHS of eq 8. So this scalling is essential
in making sure that finite differences approximation is not way off
Below, we flatten each w, concate all and then take norm"""
# TODO: is cat along dim 0 correct?
dw_norm = torch.cat([w.view(-1) for w in dw]).norm()
epsilon = epsilon_unit / dw_norm
# w+ = w + epsilon * grad(w')
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
p += epsilon * v
# Now that we have model with w+, we need to compute grads wrt alphas
# This loss needs to be on train set, not validation set
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_plus = autograd.grad(
loss, self._alphas) # dalpha{L_trn(w+)}
# get model with w- and then compute grads wrt alphas
# w- = w - eps*dw`
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
# we had already added dw above so sutracting twice gives w-
p -= 2. * epsilon * v
# similarly get dalpha_minus
loss = _get_loss(self._model, self._lossfn, x, y)
dalpha_minus = autograd.grad(loss, self._alphas)
# reset back params to original values by adding dw
with torch.no_grad():
for p, v in zip_eq(self._model.parameters(), dw):
p += epsilon * v
# apply eq 8, final difference to compute hessian
h = [(p - m) / (2. * epsilon)
for p, m in zip_eq(dalpha_plus, dalpha_minus)]
return h
|
archai/archai/supergraph/algos/darts/bilevel_optimizer_slow.py/0
|
{
"file_path": "archai/archai/supergraph/algos/darts/bilevel_optimizer_slow.py",
"repo_id": "archai",
"token_count": 3185
}
| 325 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
class Wmr:
""" Implements the Randomized Weighted Majority algorithm by Littlestone and Warmuth
We use the version in Fig 1 in The Multiplicative Weight Update with the gain version """
def __init__(self, num_items:int, eta:float):
assert num_items > 0
assert eta >= 0.0 and eta <= 0.5
self._num_items = num_items
self._eta = eta
self._weights = self._normalize(np.ones(self._num_items))
self._round_counter = 0
@property
def weights(self):
return self._weights
def _normalize(self, weights:np.array)->None:
return weights / np.sum(weights)
def update(self, rewards:np.array)->None:
assert len(rewards.shape) == 1
assert rewards.shape[0] == self._num_items
assert np.all(rewards) >= -1 and np.all(rewards) <= 1.0
# # annealed learning rate
# self._round_counter += 1
# eta = self._eta / np.sqrt(self._round_counter)
eta = self._eta
self._weights = self._weights * (1.0 + eta * rewards)
self._weights = self._normalize(self._weights)
def sample(self)->int:
return np.random.choice(self._num_items, p=self._normalize(self._weights))
|
archai/archai/supergraph/algos/divnas/wmr.py/0
|
{
"file_path": "archai/archai/supergraph/algos/divnas/wmr.py",
"repo_id": "archai",
"token_count": 526
}
| 326 |
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions used by generate_graph.py."""
from __future__ import absolute_import, division, print_function
import hashlib
import itertools
import numpy as np
def gen_is_edge_fn(bits):
"""Generate a boolean function for the edge connectivity.
Given a bitstring FEDCBA and a 4x4 matrix, the generated matrix is
[[0, A, B, D],
[0, 0, C, E],
[0, 0, 0, F],
[0, 0, 0, 0]]
Note that this function is agnostic to the actual matrix dimension due to
order in which elements are filled out (column-major, starting from least
significant bit). For example, the same FEDCBA bitstring (0-padded) on a 5x5
matrix is
[[0, A, B, D, 0],
[0, 0, C, E, 0],
[0, 0, 0, F, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
Args:
bits: integer which will be interpreted as a bit mask.
Returns:
vectorized function that returns True when an edge is present.
"""
def is_edge(x, y):
"""Is there an edge from x to y (0-indexed)?"""
if x >= y:
return 0
# Map x, y to index into bit string
index = x + (y * (y - 1) // 2)
return (bits >> index) % 2 == 1
return np.vectorize(is_edge)
def is_full_dag(matrix):
"""Full DAG == all vertices on a path from vert 0 to (V-1).
i.e. no disconnected or "hanging" vertices.
It is sufficient to check for:
1) no rows of 0 except for row V-1 (only output vertex has no out-edges)
2) no cols of 0 except for col 0 (only input vertex has no in-edges)
Args:
matrix: V x V upper-triangular adjacency matrix
Returns:
True if the there are no dangling vertices.
"""
shape = np.shape(matrix)
rows = matrix[:shape[0]-1, :] == 0
rows = np.all(rows, axis=1) # Any row with all 0 will be True
rows_bad = np.any(rows)
cols = matrix[:, 1:] == 0
cols = np.all(cols, axis=0) # Any col with all 0 will be True
cols_bad = np.any(cols)
return (not rows_bad) and (not cols_bad)
def num_edges(matrix):
"""Computes number of edges in adjacency matrix."""
return np.sum(matrix)
def hash_module(matrix, labeling):
"""Computes a graph-invariance MD5 hash of the matrix and label pair.
Args:
matrix: np.ndarray square upper-triangular adjacency matrix.
labeling: list of int labels of length equal to both dimensions of
matrix.
Returns:
MD5 hash of the matrix and labeling.
"""
vertices = np.shape(matrix)[0]
in_edges = np.sum(matrix, axis=0).tolist()
out_edges = np.sum(matrix, axis=1).tolist()
assert len(in_edges) == len(out_edges) == len(labeling)
hashes = list(zip(out_edges, in_edges, labeling))
hashes = [hashlib.md5(str(h).encode('utf-8')).hexdigest() for h in hashes]
# Computing this up to the diameter is probably sufficient but since the
# operation is fast, it is okay to repeat more times.
for _ in range(vertices):
new_hashes = []
for v in range(vertices):
in_neighbors = [hashes[w] for w in range(vertices) if matrix[w, v]]
out_neighbors = [hashes[w] for w in range(vertices) if matrix[v, w]]
new_hashes.append(hashlib.md5(
(''.join(sorted(in_neighbors)) + '|' +
''.join(sorted(out_neighbors)) + '|' +
hashes[v]).encode('utf-8')).hexdigest())
hashes = new_hashes
fingerprint = hashlib.md5(str(sorted(hashes)).encode('utf-8')).hexdigest()
return fingerprint
def permute_graph(graph, label, permutation):
"""Permutes the graph and labels based on permutation.
Args:
graph: np.ndarray adjacency matrix.
label: list of labels of same length as graph dimensions.
permutation: a permutation list of ints of same length as graph dimensions.
Returns:
np.ndarray where vertex permutation[v] is vertex v from the original graph
"""
# vertex permutation[v] in new graph is vertex v in the old graph
forward_perm = zip(permutation, list(range(len(permutation))))
inverse_perm = [x[1] for x in sorted(forward_perm)]
edge_fn = lambda x, y: graph[inverse_perm[x], inverse_perm[y]] == 1
new_matrix = np.fromfunction(np.vectorize(edge_fn),
(len(label), len(label)),
dtype=np.int8)
new_label = [label[inverse_perm[i]] for i in range(len(label))]
return new_matrix, new_label
def is_isomorphic(graph1, graph2):
"""Exhaustively checks if 2 graphs are isomorphic."""
matrix1, label1 = np.array(graph1[0]), graph1[1]
matrix2, label2 = np.array(graph2[0]), graph2[1]
assert np.shape(matrix1) == np.shape(matrix2)
assert len(label1) == len(label2)
vertices = np.shape(matrix1)[0]
# Note: input and output in our constrained graphs always map to themselves
# but this script does not enforce that.
for perm in itertools.permutations(range(0, vertices)):
pmatrix1, plabel1 = permute_graph(matrix1, label1, perm)
if np.array_equal(pmatrix1, matrix2) and plabel1 == label2:
return True
return False
|
archai/archai/supergraph/algos/nasbench101/graph_util.py/0
|
{
"file_path": "archai/archai/supergraph/algos/nasbench101/graph_util.py",
"repo_id": "archai",
"token_count": 1989
}
| 327 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from typing import List
# latest verion of ray works on Windows as well
import ray
from overrides import overrides
from archai.common import common
from archai.common.common import CommonState
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.algos.petridish.petridish_utils import (
ConvexHullPoint,
JobStage,
plot_frontier,
plot_pool,
plot_seed_model_stats,
sample_from_hull,
save_hull,
save_hull_frontier,
)
from archai.supergraph.nas import nas_utils
from archai.supergraph.nas.arch_trainer import TArchTrainer
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model import Model
from archai.supergraph.nas.model_desc import (
CellType,
EdgeDesc,
ModelDesc,
NodeDesc,
OpDesc,
)
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.search_combinations import SearchCombinations
from archai.supergraph.nas.searcher import SearchResult
logger = get_global_logger()
class SearcherPetridish(SearchCombinations):
@overrides
def search(self, conf_search:Config, model_desc_builder:ModelDescBuilder,
trainer_class:TArchTrainer, finalizers:Finalizers)->SearchResult:
logger.pushd('search')
# region config vars
self.conf_search = conf_search
conf_checkpoint = conf_search['checkpoint']
resume = conf_search['resume']
conf_post_train = conf_search['post_train']
final_desc_foldername = conf_search['final_desc_foldername']
conf_petridish = conf_search['petridish']
# petridish distributed search related parameters
self._convex_hull_eps = conf_petridish['convex_hull_eps']
self._max_madd = conf_petridish['max_madd']
self._max_hull_points = conf_petridish['max_hull_points']
self._checkpoints_foldername = conf_petridish['checkpoints_foldername']
# endregion
self._checkpoint = nas_utils.create_checkpoint(conf_checkpoint, resume)
# parent models list
self._hull_points: List[ConvexHullPoint] = []
self._ensure_dataset_download(conf_search)
# checkpoint will restore the hull we had
is_restored = self._restore_checkpoint()
# seed the pool with many models of different
# macro parameters like number of cells, reductions etc if parent pool
# could not be restored and/or this is the first time this job has been run.
future_ids = [] if is_restored else self._create_seed_jobs(conf_search,
model_desc_builder)
while not self._is_search_done():
logger.info(f'Ray jobs running: {len(future_ids)}')
if future_ids:
# get first completed job
job_id_done, future_ids = ray.wait(future_ids)
hull_point = ray.get(job_id_done[0])
logger.info(f'Hull point id {hull_point.id} with stage {hull_point.job_stage.name} completed')
if hull_point.is_trained_stage():
self._update_convex_hull(hull_point)
# sample a point and search
sampled_point = sample_from_hull(self._hull_points,
self._convex_hull_eps)
future_id = SearcherPetridish.search_model_desc_dist.remote(self,
conf_search, sampled_point, model_desc_builder, trainer_class,
finalizers, common.get_state())
future_ids.append(future_id)
logger.info(f'Added sampled point {sampled_point.id} for search')
elif hull_point.job_stage==JobStage.SEARCH:
# create the job to train the searched model
future_id = SearcherPetridish.train_model_desc_dist.remote(self,
conf_post_train, hull_point, common.get_state())
future_ids.append(future_id)
logger.info(f'Added sampled point {hull_point.id} for post-search training')
else:
raise RuntimeError(f'Job stage "{hull_point.job_stage}" is not expected in search loop')
# cancel any remaining jobs to free up gpus for the eval phase
for future_id in future_ids:
ray.cancel(future_id, force=True) # without force, main process stops
ray.wait([future_id])
# plot and save the hull
expdir = common.get_expdir()
assert expdir
plot_frontier(self._hull_points, self._convex_hull_eps, expdir)
best_point = save_hull_frontier(self._hull_points, self._convex_hull_eps,
final_desc_foldername, expdir)
save_hull(self._hull_points, expdir)
plot_pool(self._hull_points,expdir )
# return best point as search result
search_result = SearchResult(best_point.model_desc, search_metrics=None,
train_metrics=best_point.metrics)
self.clean_log_result(conf_search, search_result)
logger.popd()
return search_result
@staticmethod
@ray.remote(num_gpus=1)
def search_model_desc_dist(searcher:'SearcherPetridish', conf_search:Config,
hull_point:ConvexHullPoint, model_desc_builder:ModelDescBuilder,
trainer_class:TArchTrainer, finalizers:Finalizers, common_state:CommonState)\
->ConvexHullPoint:
# as this runs in different process, initialize globals
common.init_from(common_state)
#register ops as we are in different process now
conf_model_desc = conf_search['model_desc']
model_desc_builder.pre_build(conf_model_desc)
assert hull_point.is_trained_stage()
# cloning is strictly not needed but just in case if we run this
# function in same process, it would be good to avoid surprise
model_desc = hull_point.model_desc.clone()
searcher._add_node(model_desc, model_desc_builder)
model_desc, search_metrics = searcher.search_model_desc(conf_search,
model_desc, trainer_class, finalizers)
cells, reductions, nodes = hull_point.cells_reductions_nodes
new_point = ConvexHullPoint(JobStage.SEARCH, hull_point.id,
hull_point.sampling_count, model_desc,
(cells, reductions, nodes+1), # we added a node
metrics=search_metrics)
return new_point
@staticmethod
@ray.remote(num_gpus=1)
def train_model_desc_dist(searcher:'SearcherPetridish', conf_train:Config,
hull_point:ConvexHullPoint, common_state:CommonState)\
->ConvexHullPoint:
# as this runs in different process, initialize globals
common.init_from(common_state)
assert not hull_point.is_trained_stage()
model_metrics = searcher.train_model_desc(hull_point.model_desc, conf_train)
model_stats = nas_utils.get_model_stats(model_metrics.model)
new_point = ConvexHullPoint(hull_point.next_stage(), hull_point.id, hull_point.
sampling_count, hull_point.model_desc,
hull_point.cells_reductions_nodes,
model_metrics.metrics,
model_stats)
return new_point
def _add_node(self, model_desc:ModelDesc, model_desc_builder:ModelDescBuilder)->None:
for ci, cell_desc in enumerate(model_desc.cell_descs()):
reduction = (cell_desc.cell_type==CellType.Reduction)
nodes = cell_desc.nodes()
# petridish must seed with one node
assert len(nodes) > 0
# input/output channels for all nodes are same
conv_params = nodes[0].conv_params
# assign input IDs to nodes, s0 and s1 have IDs 0 and 1
# however as we will be inserting new node before last one
input_ids = list(range(len(nodes) + 1))
assert len(input_ids) >= 2 # 2 stem inputs
op_desc = OpDesc('petridish_reduction_op' if reduction else 'petridish_normal_op',
params={
'conv': conv_params,
# specify strides for each input, later we will
# give this to each primitive
'_strides':[2 if reduction and j < 2 else 1 \
for j in input_ids],
}, in_len=len(input_ids), trainables=None, children=None)
edge = EdgeDesc(op_desc, input_ids=input_ids)
new_node = NodeDesc(edges=[edge], conv_params=conv_params)
nodes.insert(len(nodes)-1, new_node)
# output shape of all nodes are same
node_shapes = cell_desc.node_shapes
new_node_shape = copy.deepcopy(node_shapes[-1])
node_shapes.insert(len(node_shapes)-1, new_node_shape)
# post op needs rebuilding because number of inputs to it has changed so input/output channels may be different
post_op_shape, post_op_desc = model_desc_builder.build_cell_post_op(cell_desc.stem_shapes,
node_shapes, cell_desc.conf_cell, ci)
cell_desc.reset_nodes(nodes, node_shapes,
post_op_desc, post_op_shape)
def _ensure_dataset_download(self, conf_search:Config)->None:
conf_loader = conf_search['loader']
self.get_data(conf_loader)
def _is_search_done(self)->bool:
'''Terminate search if max MAdd or number of points exceeded'''
if not self._hull_points:
return False
max_madd_parent = max(self._hull_points, key=lambda p:p.model_stats.MAdd)
return max_madd_parent.model_stats.MAdd > self._max_madd or \
len(self._hull_points) > self._max_hull_points
def _create_seed_jobs(self, conf_search:Config, model_desc_builder:ModelDescBuilder)->list:
conf_model_desc = conf_search['model_desc']
conf_seed_train = conf_search['seed_train']
future_ids = [] # ray job IDs
seed_model_stats = [] # seed model stats for visualization and debugging
macro_combinations = list(self.get_combinations(conf_search))
for reductions, cells, nodes in macro_combinations:
# if N R N R N R cannot be satisfied, ignore combination
if cells < reductions * 2 + 1:
continue
# create seed model
model_desc = self.build_model_desc(model_desc_builder,
conf_model_desc,
reductions, cells, nodes)
hull_point = ConvexHullPoint(JobStage.SEED, 0, 0, model_desc,
(cells, reductions, nodes))
# pre-train the seed model
future_id = SearcherPetridish.train_model_desc_dist.remote(self,
conf_seed_train, hull_point, common.get_state())
future_ids.append(future_id)
# build a model so we can get its model stats
temp_model = Model(model_desc, droppath=True, affine=True)
seed_model_stats.append(nas_utils.get_model_stats(temp_model))
# save the model stats in a plot and tsv file so we can
# visualize the spread on the x-axis
expdir = common.get_expdir()
assert expdir
plot_seed_model_stats(seed_model_stats, expdir)
return future_ids
def _update_convex_hull(self, new_point:ConvexHullPoint)->None:
assert new_point.is_trained_stage() # only add models for which we have metrics and stats
self._hull_points.append(new_point)
if self._checkpoint is not None:
self._checkpoint.new()
self._checkpoint['convex_hull_points'] = self._hull_points
self._checkpoint.commit()
logger.info(f'Added to convex hull points: MAdd {new_point.model_stats.MAdd}, '
f'num cells {len(new_point.model_desc.cell_descs())}, '
f'num nodes in cell {len(new_point.model_desc.cell_descs()[0].nodes())}')
def _restore_checkpoint(self)->bool:
can_restore = self._checkpoint is not None \
and 'convex_hull_points' in self._checkpoint
if can_restore:
self._hull_points = self._checkpoint['convex_hull_points']
logger.warn({'Hull restored': True})
return can_restore
@overrides
def build_model_desc(self, model_desc_builder:ModelDescBuilder,
conf_model_desc:Config,
reductions:int, cells:int, nodes:int)->ModelDesc:
# reset macro params in copy of config
conf_model_desc = copy.deepcopy(conf_model_desc)
conf_model_desc['n_reductions'] = reductions
conf_model_desc['n_cells'] = cells
conf_model_desc['cell']['n_nodes'] = nodes
# create model desc for search using model config
# we will build model without call to model_desc_builder for pre-training
model_desc = model_desc_builder.build(conf_model_desc, template=None)
return model_desc
|
archai/archai/supergraph/algos/petridish/searcher_petridish.py/0
|
{
"file_path": "archai/archai/supergraph/algos/petridish/searcher_petridish.py",
"repo_id": "archai",
"token_count": 6199
}
| 328 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from torch.utils.data import Dataset
class MetaDataset(Dataset):
def __init__(self, source:Dataset, transform=None, target_transform=None) -> None:
self._source = source
self.transform = transform if transform is not None else lambda x: x
self.target_transform = target_transform if target_transform is not None else lambda x: x
self._meta = [{'idx':i} for i in range(len(source))]
def __len__(self):
return len(self._source)
def __getitem__(self, idx):
x, y = self._source[idx]
return (self.transform(x), self.target_transform(y), self._meta[idx])
|
archai/archai/supergraph/datasets/meta_dataset.py/0
|
{
"file_path": "archai/archai/supergraph/datasets/meta_dataset.py",
"repo_id": "archai",
"token_count": 257
}
| 329 |
import torch
from torch import nn
from torch.nn import DataParallel
from .densenet import *
from .googlenet import *
from .inception import *
from .mobilenetv2 import *
from .pyramidnet import PyramidNet
from .resnet import *
from .resnet_orig import *
from .shakeshake.shake_resnet import ShakeResNet
from .shakeshake.shake_resnext import ShakeResNeXt
from .vgg import *
from .wideresnet import WideResNet
# from torchvision import models
def get_model(conf, num_class=10):
name = conf['type']
if name == 'resnet50':
model = ResNet(dataset='imagenet', depth=50, n_classes=num_class, bottleneck=True)
elif name == 'resnet200':
model = ResNet(dataset='imagenet', depth=200, n_classes=num_class, bottleneck=True)
elif name == 'wresnet40_2':
model = WideResNet(40, 2, dropout_rate=0.0, n_classes=num_class)
elif name == 'wresnet28_10':
model = WideResNet(28, 10, dropout_rate=0.0, n_classes=num_class)
elif name == 'shakeshake26_2x32d':
model = ShakeResNet(26, 32, num_class)
elif name == 'shakeshake26_2x64d':
model = ShakeResNet(26, 64, num_class)
elif name == 'shakeshake26_2x96d':
model = ShakeResNet(26, 96, num_class)
elif name == 'shakeshake26_2x112d':
model = ShakeResNet(26, 112, num_class)
elif name == 'shakeshake26_2x96d_next':
model = ShakeResNeXt(26, 96, 4, num_class)
elif name == 'pyramid':
model = PyramidNet('cifar10', depth=conf['depth'], alpha=conf['alpha'], n_classes=num_class, bottleneck=conf['bottleneck'])
else:
raise NameError('no model named, %s' % name)
def num_class(dataset):
return {
'cifar10': 10,
'reduced_cifar10': 10,
'cifar10.1': 10,
'cifar100': 100,
'svhn': 10,
'reduced_svhn': 10,
'imagenet': 1000,
'reduced_imagenet': 120,
}[dataset]
|
archai/archai/supergraph/models/__init__.py/0
|
{
"file_path": "archai/archai/supergraph/models/__init__.py",
"repo_id": "archai",
"token_count": 828
}
| 330 |
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
init.xavier_uniform_(m.weight, gain=np.sqrt(2))
init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes, momentum=0.9)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes, momentum=0.9)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, n_classes):
super(WideResNet, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = int((depth - 4) / 6)
k = widen_factor
nStages = [16, 16*k, 32*k, 64*k]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(WideBasic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], n_classes)
# self.apply(conv_init)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
# out = F.avg_pool2d(out, 8)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
archai/archai/supergraph/models/wideresnet.py/0
|
{
"file_path": "archai/archai/supergraph/models/wideresnet.py",
"repo_id": "archai",
"token_count": 1420
}
| 331 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import math
import os
from typing import Iterator, Optional, Tuple
import yaml
from overrides import overrides
from archai.common import utils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.nas import nas_utils
from archai.supergraph.nas.arch_trainer import TArchTrainer
from archai.supergraph.nas.finalizers import Finalizers
from archai.supergraph.nas.model_desc_builder import ModelDescBuilder
from archai.supergraph.nas.searcher import ModelMetrics, Searcher, SearchResult
from archai.supergraph.utils.metrics import Metrics
logger = get_global_logger()
class SearchCombinations(Searcher):
@overrides
def search(self, conf_search:Config, model_desc_builder:ModelDescBuilder,
trainer_class:TArchTrainer, finalizers:Finalizers)->SearchResult:
# region config vars
conf_model_desc = conf_search['model_desc']
conf_post_train = conf_search['post_train']
conf_checkpoint = conf_search['checkpoint']
resume = conf_search['resume']
# endregion
self._checkpoint = nas_utils.create_checkpoint(conf_checkpoint, resume)
macro_combinations = list(self.get_combinations(conf_search))
start_macro_i, best_search_result = self.restore_checkpoint(conf_search,
macro_combinations)
best_macro_comb = -1,-1,-1 # reductions, cells, nodes
for macro_comb_i in range(start_macro_i, len(macro_combinations)):
reductions, cells, nodes = macro_combinations[macro_comb_i]
logger.pushd(f'r{reductions}.c{cells}.n{nodes}')
# build model description that we will search on
model_desc = self.build_model_desc(model_desc_builder, conf_model_desc,
reductions, cells, nodes)
# perform search on model description
model_desc, search_metrics = self.search_model_desc(conf_search,
model_desc, trainer_class, finalizers)
# train searched model for few epochs to get some perf metrics
model_metrics = self.train_model_desc(model_desc,
conf_post_train)
assert model_metrics is not None, "'post_train' section in yaml should have non-zero epochs if running combinations search"
# save result
self.save_trained(conf_search, reductions, cells, nodes, model_metrics)
# update the best result so far
if self.is_better_metrics(best_search_result.search_metrics,
model_metrics.metrics):
best_search_result = SearchResult(model_desc, search_metrics,
model_metrics.metrics)
best_macro_comb = reductions, cells, nodes
# checkpoint
assert best_search_result is not None
self.record_checkpoint(macro_comb_i, best_search_result)
logger.popd() # reductions, cells, nodes
assert best_search_result is not None
self.clean_log_result(conf_search, best_search_result)
logger.info({'best_macro_comb':best_macro_comb})
return best_search_result
def is_better_metrics(self, metrics1:Optional[Metrics],
metrics2:Optional[Metrics])->bool:
if metrics1 is None or metrics2 is None:
return True
return metrics2.best_val_top1() >= metrics1.best_val_top1()
def restore_checkpoint(self, conf_search:Config, macro_combinations)\
->Tuple[int, Optional[SearchResult]]:
conf_pareto = conf_search['pareto']
pareto_summary_filename = conf_pareto['summary_filename']
summary_filepath = utils.full_path(pareto_summary_filename)
# if checkpoint is available then restart from last combination we were running
checkpoint_avail = self._checkpoint is not None
resumed, state = False, None
start_macro_i, best_result = 0, None
if checkpoint_avail:
state = self._checkpoint.get('search', None)
if state is not None:
start_macro_i = state['start_macro_i']
assert start_macro_i >= 0 and start_macro_i < len(macro_combinations)
best_result = yaml.load(state['best_result'], Loader=yaml.Loader)
start_macro_i += 1 # resume after the last checkpoint
resumed = True
if not resumed:
# erase previous file left over from run
utils.zero_file(summary_filepath)
logger.warn({'resumed': resumed, 'checkpoint_avail': checkpoint_avail,
'checkpoint_val': state is not None,
'start_macro_i': start_macro_i,
'total_macro_combinations': len(macro_combinations)})
return start_macro_i, best_result
def record_checkpoint(self, macro_comb_i:int, best_result:SearchResult)->None:
if self._checkpoint is not None:
state = {'start_macro_i': macro_comb_i,
'best_result': yaml.dump(best_result)}
self._checkpoint.new()
self._checkpoint['search'] = state
self._checkpoint.commit()
def get_combinations(self, conf_search:Config)->Iterator[Tuple[int, int, int]]:
conf_pareto = conf_search['pareto']
conf_model_desc = conf_search['model_desc']
min_cells = conf_model_desc['n_cells']
min_reductions = conf_model_desc['n_reductions']
min_nodes = conf_model_desc['cell']['n_nodes']
max_cells = conf_pareto['max_cells']
max_reductions = conf_pareto['max_reductions']
max_nodes = conf_pareto['max_nodes']
logger.info({'min_reductions': min_reductions,
'min_cells': min_cells,
'min_nodes': min_nodes,
'max_reductions': max_reductions,
'max_cells': max_cells,
'max_nodes': max_nodes
})
# TODO: what happens when reductions is 3 but cells is 2? have to step
# through code and check
for reductions in range(min_reductions, max_reductions+1):
for cells in range(min_cells, max_cells+1):
for nodes in range(min_nodes, max_nodes+1):
yield reductions, cells, nodes
def save_trained(self, conf_search:Config, reductions:int, cells:int, nodes:int,
model_metrics:ModelMetrics)->None:
"""Save the model and metric info into a log file"""
metrics_dir = conf_search['metrics_dir']
# construct path where we will save
subdir = utils.full_path(metrics_dir.format(**vars()), create=True)
model_stats = nas_utils.get_model_stats(model_metrics.model)
# save model_stats in its own file
model_stats_filepath = os.path.join(subdir, 'model_stats.yaml')
if model_stats_filepath:
with open(model_stats_filepath, 'w') as f:
yaml.dump(model_stats, f)
# save just metrics separately for convinience
metrics_filepath = os.path.join(subdir, 'metrics.yaml')
if metrics_filepath:
with open(metrics_filepath, 'w') as f:
yaml.dump(model_stats.metrics, f)
logger.info({'model_stats_filepath': model_stats_filepath,
'metrics_filepath': metrics_filepath})
# append key info in root pareto data
if self._summary_filepath:
train_top1 = val_top1 = train_epoch = val_epoch = math.nan
# extract metrics
if model_metrics.metrics:
best_metrics = model_metrics.metrics.run_metrics.best_epoch()
train_top1 = best_metrics[0].top1.avg
train_epoch = best_metrics[0].index
if best_metrics[1]:
val_top1 = best_metrics[1].top1.avg if len(best_metrics)>1 else math.nan
val_epoch = best_metrics[1].index if len(best_metrics)>1 else math.nan
# extract model stats
flops = model_stats.Flops
parameters = model_stats.parameters
inference_memory = model_stats.inference_memory
inference_duration = model_stats.duration
utils.append_csv_file(self._summary_filepath, [
('reductions', reductions),
('cells', cells),
('nodes', nodes),
('train_top1', train_top1),
('train_epoch', train_epoch),
('val_top1', val_top1),
('val_epoch', val_epoch),
('flops', flops),
('params', parameters),
('inference_memory', inference_memory),
('inference_duration', inference_duration)
])
|
archai/archai/supergraph/nas/search_combinations.py/0
|
{
"file_path": "archai/archai/supergraph/nas/search_combinations.py",
"repo_id": "archai",
"token_count": 4168
}
| 332 |
# Copyright (c) 2019 abhuse.
# Licensed under the MIT license.
# https://github.com/abhuse/cyclic-cosine-decay/blob/master/scheduler.py
from collections.abc import Iterable
from math import cos, floor, log, pi
from typing import List, Optional, Union
import torch
from torch.optim.lr_scheduler import _LRScheduler
class CyclicCosineDecayLR(_LRScheduler):
"""A learning rate scheduler for cyclic cosine annealing.
This scheduler is useful when doing QAT provinding
a ~0.3 ppl boost over the traditional cosine annealing scheduler.
For more details and code, see the project's GitHub repository:
https://github.com/abhuse/cyclic-cosine-decay
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
init_decay_epochs: int,
min_decay_lr: Union[float, List[float]],
restart_interval: Optional[int] = None,
restart_interval_multiplier: Optional[float] = None,
restart_lr: Optional[Union[float, List[float]]] = None,
warmup_epochs: Optional[int] = None,
warmup_start_lr: Optional[Union[float, List[float]]] = None,
last_epoch: Optional[int] = -1,
verbose: Optional[bool] = False,
) -> None:
"""Override the initialization of `_LRScheduler` with custom attributes.
Args:
optimizer: The optimizer to use. This should be an instance of
`torch.optim.Optimizer`.
init_decay_epochs: The number of epochs for the initial decay period.
min_decay_lr: The learning rate at the end of the initial decay period.
restart_interval: The interval between restarts of the cyclic schedule.
This should be a positive integer, or `None` to disable restarts.
restart_interval_multiplier: The coefficient used to increase the
restart interval between cycles. This should be a positive float,
or `None` to use a fixed restart interval.
restart_lr: The learning rate at the start of a cycle. This should be
a positive float or a list of floats with the same length as
`optimizer.param_groups`, or `None` to use the current learning
rates of the optimizer.
warmup_epochs: The number of epochs to use for a warmup period. This
should be a positive integer, or `None` to disable warmup.
warmup_start_lr: The learning rate at the start of the warmup period.
This should be a positive float or a list of floats with the same
length as `optimizer.param_groups`, and must be set if `warmup_epochs`
is not `None`.
last_epoch: The index of the last epoch. This is used when resuming a
training job.
verbose: Whether to print a message to stdout for each update.
"""
if not isinstance(init_decay_epochs, int) or init_decay_epochs < 1:
raise ValueError("init_decay_epochs must be positive integer, got {} instead".format(init_decay_epochs))
if isinstance(min_decay_lr, Iterable) and len(min_decay_lr) != len(optimizer.param_groups):
raise ValueError(
"Expected len(min_decay_lr) to be equal to len(optimizer.param_groups), "
"got {} and {} instead".format(len(min_decay_lr), len(optimizer.param_groups))
)
if restart_interval is not None and (not isinstance(restart_interval, int) or restart_interval < 1):
raise ValueError("restart_interval must be positive integer, got {} instead".format(restart_interval))
if restart_interval_multiplier is not None and (
not isinstance(restart_interval_multiplier, float) or restart_interval_multiplier <= 0
):
raise ValueError(
"restart_interval_multiplier must be positive float, got {} instead".format(restart_interval_multiplier)
)
if isinstance(restart_lr, Iterable) and len(restart_lr) != len(optimizer.param_groups):
raise ValueError(
"Expected len(restart_lr) to be equal to len(optimizer.param_groups), "
"got {} and {} instead".format(len(restart_lr), len(optimizer.param_groups))
)
if warmup_epochs is not None:
if not isinstance(warmup_epochs, int) or warmup_epochs < 1:
raise ValueError(
"Expected warmup_epochs to be positive integer, got {} instead".format(type(warmup_epochs))
)
if warmup_start_lr is None:
raise ValueError("warmup_start_lr must be set when warmup_epochs is not None")
if not (isinstance(warmup_start_lr, float) or isinstance(warmup_start_lr, Iterable)):
raise ValueError(
"warmup_start_lr must be either float or iterable of floats, got {} instead".format(warmup_start_lr)
)
if isinstance(warmup_start_lr, Iterable) and len(warmup_start_lr) != len(optimizer.param_groups):
raise ValueError(
"Expected len(warmup_start_lr) to be equal to len(optimizer.param_groups), "
"got {} and {} instead".format(len(warmup_start_lr), len(optimizer.param_groups))
)
group_num = len(optimizer.param_groups)
self._warmup_start_lr = [warmup_start_lr] * group_num if isinstance(warmup_start_lr, float) else warmup_start_lr
self._warmup_epochs = 0 if warmup_epochs is None else warmup_epochs
self._init_decay_epochs = init_decay_epochs
self._min_decay_lr = [min_decay_lr] * group_num if isinstance(min_decay_lr, float) else min_decay_lr
self._restart_lr = [restart_lr] * group_num if isinstance(restart_lr, float) else restart_lr
self._restart_interval = restart_interval
self._restart_interval_multiplier = restart_interval_multiplier
super(CyclicCosineDecayLR, self).__init__(optimizer, last_epoch, verbose=verbose)
def get_lr(self) -> float:
if self._warmup_epochs > 0 and self.last_epoch < self._warmup_epochs:
return self._calc(self.last_epoch, self._warmup_epochs, self._warmup_start_lr, self.base_lrs)
elif self.last_epoch < self._init_decay_epochs + self._warmup_epochs:
return self._calc(
self.last_epoch - self._warmup_epochs, self._init_decay_epochs, self.base_lrs, self._min_decay_lr
)
else:
if self._restart_interval is not None:
if self._restart_interval_multiplier is None:
cycle_epoch = (
self.last_epoch - self._init_decay_epochs - self._warmup_epochs
) % self._restart_interval
lrs = self.base_lrs if self._restart_lr is None else self._restart_lr
return self._calc(cycle_epoch, self._restart_interval, lrs, self._min_decay_lr)
else:
n = self._get_n(self.last_epoch - self._warmup_epochs - self._init_decay_epochs)
sn_prev = self._partial_sum(n)
cycle_epoch = self.last_epoch - sn_prev - self._warmup_epochs - self._init_decay_epochs
interval = self._restart_interval * self._restart_interval_multiplier**n
lrs = self.base_lrs if self._restart_lr is None else self._restart_lr
return self._calc(cycle_epoch, interval, lrs, self._min_decay_lr)
else:
return self._min_decay_lr
def _calc(self, t: int, T: int, lrs: List[float], min_lrs: List[float]) -> List[float]:
return [min_lr + (lr - min_lr) * ((1 + cos(pi * t / T)) / 2) for lr, min_lr in zip(lrs, min_lrs)]
def _get_n(self, epoch: int) -> int:
_t = 1 - (1 - self._restart_interval_multiplier) * epoch / self._restart_interval
return floor(log(_t, self._restart_interval_multiplier))
def _partial_sum(self, n: int) -> float:
return (
self._restart_interval
* (1 - self._restart_interval_multiplier**n)
/ (1 - self._restart_interval_multiplier)
)
|
archai/archai/trainers/cyclic_cosine_scheduler.py/0
|
{
"file_path": "archai/archai/trainers/cyclic_cosine_scheduler.py",
"repo_id": "archai",
"token_count": 3670
}
| 333 |
__include__: "darts.yaml" # just use darts defaults
nas:
eval:
loader:
train_batch: 68
search:
# options are mutual information based 'mi', 'mi_ranked' or 'random' or 'default'.
# NOTE: 'default' is not compatible with 'noalpha' trainer as 'default' uses
# the darts finalizer and needs alphas
# 'mi_ranked' is not compatible with 'noalpha' as it requires alphas.
finalizer: 'mi_ranked'
divnas:
sigma: 168
archtrainer: 'bilevel' # options are 'bilevel', 'noalpha'
trainer:
epochs: 50
|
archai/confs/algos/divnas.yaml/0
|
{
"file_path": "archai/confs/algos/divnas.yaml",
"repo_id": "archai",
"token_count": 206
}
| 334 |
common:
checkpoint:
freq: 10
dataset:
max_batches: -1
autoaug:
loader:
epochs: 600
batch: 2048
optimizer:
type: "cocob"
alpha: 100
lr_schedule:
type: null
min_lr: null
model:
type: 'resnet50'
|
archai/confs/aug/aug_cifar_cocob_resnet50.yaml/0
|
{
"file_path": "archai/confs/aug/aug_cifar_cocob_resnet50.yaml",
"repo_id": "archai",
"token_count": 161
}
| 335 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
from archai.datasets.cv.mnist_dataset_provider import MnistDatasetProvider
def main():
""" This script is in a different folder from the other scripts because this way ensures
maximum reuse of the output dataset during the development of your other training script.
Often times those need more debugging and this will save on cloud compute by maximizing
the reuse of this node in each submitted Azure ML pipeline """
# input and output arguments
print("Starting prep_data_store...")
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, help="root folder to place the downloaded model")
args = parser.parse_args()
path = args.path
print(f'Writing MNIST dataset to: {path}')
if not path or not os.path.exists(path):
raise ValueError(f'Missing path: {path}')
provider = MnistDatasetProvider(root=path)
# now force the full download to happen to that root folder.
provider.get_train_dataset()
provider.get_val_dataset()
provider.get_test_dataset()
if __name__ == "__main__":
main()
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/data_prep/prep_data_store.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/data_prep/prep_data_store.py",
"repo_id": "archai",
"token_count": 365
}
| 336 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
import json
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import TQDMProgressBar
from model import MyModel
from mnist_data_module import MNistDataModule
from archai.common.store import ArchaiStore
import mlflow
from shutil import copyfile
def print_auto_logged_info(r):
tags = {k: v for k, v in r.data.tags.items() if not k.startswith("mlflow.")}
artifacts = [f.path for f in mlflow.MlflowClient().list_artifacts(r.info.run_id, "model")]
print("run_id: {}".format(r.info.run_id))
print("artifacts: {}".format(artifacts))
print("params: {}".format(r.data.params))
print("metrics: {}".format(r.data.metrics))
print("tags: {}".format(tags))
def main():
""" This program trains a model, exports the model as onnx and updates the status of this
training job in an Azure storage table. """
parser = argparse.ArgumentParser()
parser.add_argument("--name", required=True, type=str, help="The globally unique name of this model")
parser.add_argument("--storage_account_key", required=True, type=str, help="Azure model store key")
parser.add_argument("--storage_account_name", required=True, type=str, help="Azure model store name")
parser.add_argument("--save_models", action='store_true', help="save models to azure storage")
parser.add_argument("--model_params", type=str, help="json string containing model parameters")
parser.add_argument("--data_dir", type=str, help="location of dataset", default='dataset')
parser.add_argument('--epochs', type=float, help='number of epochs to train', default=0.001)
parser.add_argument("--output", type=str, help="place to write the results", default='output')
pipeline_id = os.getenv('AZUREML_ROOT_RUN_ID')
args = parser.parse_args()
save_models = args.save_models
output_folder = args.output
if save_models and not os.path.exists(output_folder):
os.makedirs(output_folder, exist_ok=True)
print(f'Training model: {args.name} with architecture {args.model_params}')
name = args.name
store = ArchaiStore(args.storage_account_name, args.storage_account_key)
e = store.lock(name, 'training')
epochs = args.epochs
try:
model = MyModel.from_archid(args.model_params)
if model is None:
e['status'] = 'failed'
e['error'] = 'invalid model parameters'
store.merge_status_entity(e)
return
e['nb_layers'] = model.nb_layers
e['kernel_size'] = model.kernel_size
e['hidden_dim'] = model.hidden_dim
e['epochs'] = epochs
if pipeline_id is not None:
e['pipeline_id'] = pipeline_id
store.merge_status_entity(e)
data = MNistDataModule(args.data_dir)
trainer = Trainer(accelerator='gpu', max_epochs=1, callbacks=[TQDMProgressBar(refresh_rate=100)])
mlflow.pytorch.autolog(log_models=save_models, registered_model_name=name)
with mlflow.start_run() as run:
trainer.fit(model, data)
print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id))
result = trainer.validate(model, data)
val_acc = result[0]['accuracy']
if save_models:
# this writes the results to the output folder.
model.export_onnx(data.input_shape, os.path.join(output_folder, 'model.onnx'))
config = {
'name': name,
'vac_acc': val_acc,
'epochs': epochs,
'nb_layers': model.nb_layers,
'kernel_size': model.kernel_size,
'hidden_dim': model.hidden_dim,
}
json_file = os.path.join(output_folder, 'results.json')
with open(json_file, 'w') as fp:
json.dump(config, fp)
# post updated progress to our unified status table.
e['val_acc'] = float(val_acc)
e['status'] = 'completed'
store.merge_status_entity(e)
store.unlock(name)
if os.path.isfile('model_summary.txt'):
copyfile('model_summary.txt', os.path.join(output_folder, 'model_summary.txt'))
print(f"Training job completed successfully with validation accuracy {val_acc}")
except Exception as ex:
print(f"Training job failed with err {str(ex)}")
e['status'] = 'failed'
e['error'] = str(ex)
store.merge_status_entity(e)
store.unlock(name)
sys.exit(1)
if __name__ == "__main__":
main()
|
archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/train.py/0
|
{
"file_path": "archai/docs/advanced_guide/cloud/azure/notebooks/multi_node_search/scripts/train.py",
"repo_id": "archai",
"token_count": 1904
}
| 337 |
<jupyter_start><jupyter_text>Implementing a Custom Dataset ProviderAbstract base classes (ABCs) define a blueprint for a class, specifying its methods and attributes, but not its implementation. They are important in implementing a consistent interface, as they enforce a set of requirements on implementing classes and make it easier to write code that can work with multiple implementations.First, we define a boilerplate for the `DatasetProvider` class, which is the same implemented in `archai.api.dataset_provider` module.<jupyter_code>from abc import abstractmethod
from typing import Any
from overrides import EnforceOverrides
class DatasetProvider(EnforceOverrides):
def __init__(self) -> None:
pass
@abstractmethod
def get_train_dataset(self) -> Any:
pass
@abstractmethod
def get_val_dataset(self) -> Any:
pass
@abstractmethod
def get_test_dataset(self) -> Any:
pass<jupyter_output><empty_output><jupyter_text>Torchvision-based Dataset ProviderIn the context of a custom dataset provider, using ABCs can help ensure that the provider implements the required methods and provides a consistent interface for loading and processing data. In this example, we will implement a Torchvision-based dataset provider, as follows:<jupyter_code>from typing import Callable, Optional
from overrides import overrides
from torch.utils.data import Dataset
from torchvision.datasets import MNIST, CIFAR10
from torchvision.transforms import ToTensor
class TorchvisionDatasetProvider(DatasetProvider):
SUPPORTED_DATASETS = {
"mnist": MNIST,
"cifar10": CIFAR10
}
def __init__(self, dataset: str, root: Optional[str] = "dataroot") -> None:
super().__init__()
self.dataset = dataset
self.root = root
@overrides
def get_train_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return self.SUPPORTED_DATASETS[self.dataset](
self.root,
train=True,
download=True,
transform=transform or ToTensor(),
target_transform=target_transform,
)
@overrides
def get_val_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
return self.SUPPORTED_DATASETS[self.dataset](
self.root,
train=False,
download=True,
transform=transform or ToTensor(),
target_transform=target_transform,
)
@overrides
def get_test_dataset(
self,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
) -> Dataset:
print(f"Testing set not available for `{self.dataset}`. Returning validation set ...")
return self.get_val_dataset(transform=transform, target_transform=target_transform)<jupyter_output><empty_output><jupyter_text>Using the Dataset ProviderFinally, one need to call the implemented methods to retrieve the datasets, as follows:<jupyter_code>dataset_provider = TorchvisionDatasetProvider("mnist")
train_dataset = dataset_provider.get_train_dataset()
val_dataset = dataset_provider.get_val_dataset()
print(train_dataset, val_dataset)
# As there is no `test_dataset` available, it returns the validation set
test_dataset = dataset_provider.get_test_dataset()
print(test_dataset)<jupyter_output>Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to dataroot\MNIST\raw\train-images-idx3-ubyte.gz
|
archai/docs/getting_started/notebooks/api/dataset_provider.ipynb/0
|
{
"file_path": "archai/docs/getting_started/notebooks/api/dataset_provider.ipynb",
"repo_id": "archai",
"token_count": 1383
}
| 338 |
Common Packages
===============
APEX Utilities
--------------
.. automodule:: archai.common.apex_utils
:members:
:undoc-members:
Atomic File Handler
-------------------
.. automodule:: archai.common.atomic_file_handler
:members:
:undoc-members:
AzureML Helper
--------------
.. automodule:: archai.common.azureml_helper
:members:
:undoc-members:
Common
------
.. automodule:: archai.common.common
:members:
:undoc-members:
Configuration
-------------
.. automodule:: archai.common.config
:members:
:undoc-members:
Delimited Text
--------------
.. automodule:: archai.common.delimited_text
:members:
:undoc-members:
Deprecation (Utilities)
-----------------------
.. automodule:: archai.common.deprecation_utils
:members:
:undoc-members:
Distributed (Utilities)
-----------------------
.. automodule:: archai.common.distributed_utils
:members:
:undoc-members:
File-Related (Utilities)
------------------------
.. automodule:: archai.common.file_utils
:members:
:undoc-members:
ML Performance (Utilities)
--------------------------
.. automodule:: archai.common.ml_perf_utils
:members:
:undoc-members:
ML (Utilities)
--------------
.. automodule:: archai.common.ml_utils
:members:
:undoc-members:
Model Summary
-------------
.. automodule:: archai.common.model_summary
:members:
:undoc-members:
Notebook Helper
---------------
.. automodule:: archai.common.notebook_helper
:members:
:undoc-members:
Ordered Dict Logger
-------------------
.. automodule:: archai.common.ordered_dict_logger
:members:
:undoc-members:
Ordered Dict Logger (Utilities)
-------------------------------
.. automodule:: archai.common.ordered_dict_logger_utils
:members:
:undoc-members:
Stopwatch
---------
.. automodule:: archai.common.stopwatch
:members:
:undoc-members:
Store
-----
.. automodule:: archai.common.store
:members:
:undoc-members:
Timing
------
.. automodule:: archai.common.timing
:members:
:undoc-members:
Utilities
---------
.. automodule:: archai.common.utils
:members:
:undoc-members:
YAML (Utilities)
----------------
.. automodule:: archai.common.yaml_utils
:members:
:undoc-members:
|
archai/docs/reference/api/archai.common.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.common.rst",
"repo_id": "archai",
"token_count": 793
}
| 339 |
Computer Vision
===============
.. toctree::
:maxdepth: 2
archai.discrete_search.search_spaces.cv.segmentation_dag
|
archai/docs/reference/api/archai.discrete_search.search_spaces.cv.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.discrete_search.search_spaces.cv.rst",
"repo_id": "archai",
"token_count": 45
}
| 340 |
Manual
======
Evaluater
---------
.. automodule:: archai.supergraph.algos.manual.manual_evaluater
:members:
:undoc-members:
Experiment Runner
-----------------
.. automodule:: archai.supergraph.algos.manual.manual_exp_runner
:members:
:undoc-members:
Searcher
--------
.. automodule:: archai.supergraph.algos.manual.manual_searcher
:members:
:undoc-members:
|
archai/docs/reference/api/archai.supergraph.algos.manual.rst/0
|
{
"file_path": "archai/docs/reference/api/archai.supergraph.algos.manual.rst",
"repo_id": "archai",
"token_count": 147
}
| 341 |
Changelog
=========
This section of the documentation is designed to provide a clear and concise overview of the changes that have been made to Archai, allowing users to stay informed about the latest developments and improvements.
The changelog is organized by version, with the most recent changes appearing at the top of the list. Each entry in the changelog includes the date of the change, a brief description of the change, and any relevant notes or details.
.. attention::
If a specific release is not depicted here, please check at `GitHub <https://github.com/microsoft/archai/releases>`_.
|
archai/docs/reference/changelog.rst/0
|
{
"file_path": "archai/docs/reference/changelog.rst",
"repo_id": "archai",
"token_count": 140
}
| 342 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import torch
from transformers.generation.stopping_criteria import StoppingCriteria
class MultipleTokenStoppingCriteria(StoppingCriteria):
def __init__(self, stop_tokens: torch.LongTensor) -> None:
self.stop_tokens = stop_tokens
self.max_stop_tokens = stop_tokens.shape[-1]
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
# Only gathers the maximum number of inputs compatible with stop tokens
# and checks whether generated inputs are equal to stop_tokens
generated_inputs = input_ids[0, -self.max_stop_tokens :]
equal_generated_inputs = torch.all(torch.eq(generated_inputs, self.stop_tokens), dim=1)
return torch.any(equal_generated_inputs)
|
archai/research/lm_eval_harness/lm_eval_harness/utils/multiple_token_stopping_criteria.py/0
|
{
"file_path": "archai/research/lm_eval_harness/lm_eval_harness/utils/multiple_token_stopping_criteria.py",
"repo_id": "archai",
"token_count": 299
}
| 343 |
import json
from archai.common.common import common_init, expdir_abspath
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.utils.augmented_trainer import train_and_eval
logger = get_global_logger()
if __name__ == "__main__":
conf = common_init(
config_filepath="confs/aug/aug_train_cifar.yaml",
param_args=["--autoaug.loader.aug", "fa_reduced_cifar10", "--common.experiment_name", "autoaug_train"],
)
import time
t = time.time()
save_path = expdir_abspath("model.pth")
# result = train_and_eval(conf, val_ratio=conf['val_ratio'], val_fold=conf['val_fold'],
# save_path=save_path, only_eval=conf['only_eval'], metric='test')
# TODO: Will fail if val_ratio=0 since we are not using latest training infrastructure
# TODO: Move val_ratio, val_fold, metric to config file
result = train_and_eval(conf, val_ratio=0.2, val_fold=0, save_path=save_path, only_eval=False, metric="test")
elapsed = time.time() - t
logger.info("training done.")
logger.info("model: %s" % conf["autoaug"]["model"])
logger.info("augmentation: %s" % conf["autoaug"]["loader"]["aug"])
logger.info("\n" + json.dumps(result, indent=4))
logger.info("elapsed time: %.3f Hours" % (elapsed / 3600.0))
logger.info("top1 error in testset: %.4f" % (1.0 - result["top1_test"]))
logger.info("Save path: %s" % save_path)
|
archai/scripts/supergraph/augmented_train.py/0
|
{
"file_path": "archai/scripts/supergraph/augmented_train.py",
"repo_id": "archai",
"token_count": 582
}
| 344 |
# Experiment: {exp_name}
Results dir: {results_dir}
Report dir {out_dir}
Job count: {job_count}
{summary_text}
{details_text}
|
archai/scripts/supergraph/reports/details.md/0
|
{
"file_path": "archai/scripts/supergraph/reports/details.md",
"repo_id": "archai",
"token_count": 50
}
| 345 |
# Face Segmentation
The purpose of this example/tutorial is to demonstrate how to perform multi-objective NAS for image segmentation models
using Archai. This approach allows us to optimize the model's performance with respect to multiple objectives, such as
Intersection Over Union (IOU) and inference time for various deployment targets. Specifically, we will focus on
performing the search for both regular CPU targets and Qualcomm's Snapdragon processor, enabling us to optimize the
models for deployment on mobile devices as well.
## Dataset
We will use the [Face Synthetics dataset](https://github.com/microsoft/FaceSynthetics) for this example. The dataset
comprises 100,000 512x512 synthetic face images, each annotated with 19 per-pixel semantic segmentation labels. These
labels cover various aspects of the face, including background, skin, eyes, ears, nose, lips, clothing, and headwear.

## Search Space
The search space used for this example is based on the [Stacked HourglassNet
architecture](https://arxiv.org/abs/1603.06937). The search space consists in 3 different block types: `downsample`,
`upsample` and `skip`, with each block type containing one or more possible operations. Additionally, `downsample`
blocks also control the number of channels.

## Neural Architecture Search
To run a search job, use the following command
```shell
python3 search.py --dataset_dir [face_synthetics_dir] --output_dir [output_dir] --search_config [search_config_file]
```
Use `--search_config` to specify the search config file with the desired search settings. We provide two basic search
configurations based on the desired target (CPU or Snapdragon processor), `search.py` will use
[cpu_search.yaml](confs/cpu_search.yaml) if no search config file is passed.
* [cpu_search.yaml](confs/cpu_search.yaml)
* [snp_search.yaml](confs/snp_search.yaml)
By default, `search.py` will run multiple partial training jobs using Ray (2 jobs per GPU). To change the number of gpus
per job, set `--gpus_per_job`, or use the `--serial_training` flag to disable parallel training jobs altogether.
The pareto architecture files selected by the search algorithm can be found under `[output_dir]/pareto_models_iter_XX`.
A table with the partial training performance and other objectives can be found in the
`[output_dir]/search_state_XX.csv` file.
## Running the Search on Azure ML
You can run the `aml.py` script to start the search on Azure ML and perform the
partial training in parallel also on Azure ML.
See [Azure ML setup instructions](aml/readme.md).
Note: to use `snp_search.yaml` you will also need to follow these instructions.
## Final Training
To fully train one of the selected architectures by the NAS algorithm use the following command
```shell
python3 train.py [path_to_final_architecture.json] --dataset_dir [face_synthetics_dir] --output_dir [output_dir] --epochs [n_epochs]
```
## NAS Results (CPU Target)
### Search

The selected architectures for the search with the `cpu_search.yaml` config file can be found in the
[archs/cpu_target/](arch/cpu_target/) directory or in the table below.
### Final Training
The table below shows the final results after fully training the final pareto architectures for 30 epochs using the
[train.py](./train.py) script.
| Architecture | Search iteration | ONNX Latency (s) | Full training Validation mIOU |
|:------------------------------------------------------------------------------------------------|---------------------:|---------------------:|--------------------------------:|
| [d650d48bdc83e75ae6ace9f20c17caa65fb5048a](archs/cpu_target/d650d48bdc83e75ae6ace9f20c17caa65fb5048a.json) | 9 | 0.070 | 0.88 |
| [07d670b8f76d8b9ca1e39379798d8b0046695b6a](archs/cpu_target/07d670b8f76d8b9ca1e39379798d8b0046695b6a.json) | 6 | 0.035 | 0.87 |
| [0cf2105627cd8ef8b86bdafd4987714dc2519827](archs/cpu_target/0cf2105627cd8ef8b86bdafd4987714dc2519827.json) | 8 | 0.027 | 0.85 |
| [1531903d654ecc930a0659e31b42c3efe6fe6ef3](archs/cpu_target/1531903d654ecc930a0659e31b42c3efe6fe6ef3.json) | 6 | 0.022 | 0.85 |
| [f22f089ae8c618117f4869f20213b344189bab9a](archs/cpu_target/f22f089ae8c618117f4869f20213b344189bab9a.json) | 4 | 0.025 | 0.84 |
| [b049ce7b41268d956af5410a3e838a2992d29232](archs/cpu_target/b049ce7b41268d956af5410a3e838a2992d29232.json) | 4 | 0.026 | 0.84 |
| [31cc57fe423f06a0f4d6ba000fe1e3decd3a442c](archs/cpu_target/31cc57fe423f06a0f4d6ba000fe1e3decd3a442c.json) | 8 | 0.019 | 0.84 |
| [1f1a7d04c4925d17f0575418cc974327ab71a93a](archs/cpu_target/1f1a7d04c4925d17f0575418cc974327ab71a93a.json) | 8 | 0.015 | 0.83 |
| [0c74d6d48a3514be3e80a84593c5f6b3f656fb3c](archs/cpu_target/0c74d6d48a3514be3e80a84593c5f6b3f656fb3c.json) | 8 | 0.016 | 0.82 |
| [1ab34d5fb31ef986650b5b112cfa3eca104b8107](archs/cpu_target/1ab34d5fb31ef986650b5b112cfa3eca104b8107.json) | 8 | 0.018 | 0.82 |
| [e6b8640bd2b83212e3256907a2382ae9bb799b65](archs/cpu_target/e6b8640bd2b83212e3256907a2382ae9bb799b65.json) | 5 | 0.012 | 0.82 |
| [82419a2ad358a34c508444c86db261616cf45ec3](archs/cpu_target/82419a2ad358a34c508444c86db261616cf45ec3.json) | 3 | 0.011 | 0.81 |
| [15914e86631373b2d9c823873ba6a88a1dc548c7](archs/cpu_target/15914e86631373b2d9c823873ba6a88a1dc548c7.json) | 9 | 0.010 | 0.77 |
| [de9067fa95074057353c67f62036a5b395a2d6a2](archs/cpu_target/de9067fa95074057353c67f62036a5b395a2d6a2.json) | 8 | 0.009 | 0.76 |
| [be543f6a3d1eadc9a42496f0b40871d82d4931df](archs/cpu_target/be543f6a3d1eadc9a42496f0b40871d82d4931df.json) | 8 | 0.007 | 0.73 |
## NAS Results (Snapdragon Target)
### Search

The selected architectures for the search with the `snp_search.yaml` config file can be found in the
[archs/snp_target/](arch/snp_target/) directory or in the table below.
### Final Training
The table below shows the final results after fully training the final pareto architectures for 30 epochs using the
[train.py](./train.py) script.
| Architecture | Search iteration | SNP Quantized Latency (s) | Partial Training Val. IOU | Full training Validation mIOU |
|:-----------------------------------------------------------------------------------------------------------|--------------------:|-----------------------------:|-----------------------------:|------------------:|
| [b14a1f0a3d17ea0f62022c2cf61da032fd7c9971](archs/snp_target/b14a1f0a3d17ea0f62022c2cf61da032fd7c9971.json) | 5 | 0.007 | 0.769 | 0.88 |
| [946fb0e27ef6ab9659b128006697a1b5a90e674c](archs/snp_target/946fb0e27ef6ab9659b128006697a1b5a90e674c.json) | 13 | 0.007 | 0.768 | 0.87 |
| [69f28a4c45aef58a67e2e2e0ce2d087b60b03173](archs/snp_target/69f28a4c45aef58a67e2e2e0ce2d087b60b03173.json) | 12 | 0.008 | 0.783 | 0.87 |
| [7bd6a76ec04e9f85c27d69a48557f689b0af2037](archs/snp_target/7bd6a76ec04e9f85c27d69a48557f689b0af2037.json) | 5 | 0.006 | 0.761 | 0.87 |
| [fb5511d6bee3bf52abed1527850c829cc4293098](archs/snp_target/fb5511d6bee3bf52abed1527850c829cc4293098.json) | 7 | 0.005 | 0.758 | 0.86 |
| [4fca939c89bf725f9efa47606c640e302f8ae9cc](archs/snp_target/4fca939c89bf725f9efa47606c640e302f8ae9cc.json) | 10 | 0.004 | 0.752 | 0.86 |
| [0ef9945b08c953586848a8507bc5d057fab7278d](archs/snp_target/0ef9945b08c953586848a8507bc5d057fab7278d.json) | 14 | 0.004 | 0.749 | 0.85 |
| [81f407d6f62de129e917c6b4f58021143a5df050](archs/snp_target/81f407d6f62de129e917c6b4f58021143a5df050.json) | 7 | 0.003 | 0.703 | 0.84 |
| [d47fc530a155c9c182773fc918fc3f17ed27a9d5](archs/snp_target/d47fc530a155c9c182773fc918fc3f17ed27a9d5.json) | 13 | 0.003 | 0.712 | 0.84 |
| [2aa378e5fad84ecc2114f8855a2cd8b02658cbdc](archs/snp_target/2aa378e5fad84ecc2114f8855a2cd8b02658cbdc.json) | 14 | 0.003 | 0.709 | 0.84 |
| [a223144f3b12adf3144478e5060bd99ef2a64ae9](archs/snp_target/a223144f3b12adf3144478e5060bd99ef2a64ae9.json) | 13 | 0.003 | 0.693 | 0.83 |
| [115fc8c962797a6dfd9c3f24fd5ccb4b60df95df](archs/snp_target/115fc8c962797a6dfd9c3f24fd5ccb4b60df95df.json) | 10 | 0.003 | 0.682 | 0.83 |
| [206e6e499eca01389b46c46989588ff04a2f3a42](archs/snp_target/206e6e499eca01389b46c46989588ff04a2f3a42.json) | 14 | 0.003 | 0.688 | 0.83 |
| [230f1fe115fac89432f5bccad7a01c65e3bb2918](archs/snp_target/230f1fe115fac89432f5bccad7a01c65e3bb2918.json) | 10 | 0.003 | 0.666 | 0.82 |
| [78c76774f378e083c788e56e86978f6d1d9f267c](archs/snp_target/78c76774f378e083c788e56e86978f6d1d9f267c.json) | 10 | 0.003 | 0.659 | 0.82 |
| [604ee54bcc767722bbdd3a610246aadca5a32214](archs/snp_target/604ee54bcc767722bbdd3a610246aadca5a32214.json) | 11 | 0.003 | 0.657 | 0.82 |
| [c570e333fd94f2d514eb1955fafc9eeeb012e750](archs/snp_target/c570e333fd94f2d514eb1955fafc9eeeb012e750.json) | 9 | 0.003 | 0.636 | 0.80 |
| [4786c03a18be281ad2fed235c86a5fe952fb4b0a](archs/snp_target/4786c03a18be281ad2fed235c86a5fe952fb4b0a.json) | 9 | 0.002 | 0.562 | 0.79 |
|
archai/tasks/face_segmentation/README.md/0
|
{
"file_path": "archai/tasks/face_segmentation/README.md",
"repo_id": "archai",
"token_count": 6121
}
| 346 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import sys
from archai.common.store import ArchaiStore
CONNECTION_NAME = 'MODEL_STORAGE_CONNECTION_STRING'
def status(con_str, experiment_name):
parser = argparse.ArgumentParser(description='Print status in .csv format')
parser.add_argument('--status', help='Optional match for the status column (default None).')
parser.add_argument('--name', help='Optional name of single status row to return (default None).')
parser.add_argument('--not_equal', '-ne', help='Switch the match to a not-equal comparison.', action="store_true")
parser.add_argument('--locked', help='Find entities that are locked by a node.', action="store_true")
parser.add_argument('--cols', help='Comma separated list of columns to report (default is to print all)')
args = parser.parse_args()
storage_account_name, storage_account_key = ArchaiStore.parse_connection_string(con_str)
store = ArchaiStore(storage_account_name, storage_account_key, table_name=experiment_name)
entities = store.get_all_status_entities(args.status, args.not_equal)
if args.locked:
entities = [e for e in entities if 'node' in e and e['node']]
if args.name:
entities = [e for e in entities if 'name' in e and e['name'] == args.name]
columns = None
if args.cols:
columns = [x.strip() for x in args.cols.split(',')]
store.print_entities(entities, columns)
if __name__ == '__main__':
experiment_name = os.getenv("EXPERIMENT_NAME", "facesynthetics")
con_str = os.getenv(CONNECTION_NAME)
if not con_str:
print(f"Please specify your {CONNECTION_NAME} environment variable.")
sys.exit(1)
status(con_str, experiment_name)
|
archai/tasks/face_segmentation/aml/azure/status.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/azure/status.py",
"repo_id": "archai",
"token_count": 610
}
| 347 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from test_snpe import download_results
from shutil import rmtree
SNPE_OUTPUT_DIR = 'snpe_output'
files = [x for x in os.listdir('data/test') if x.endswith(".bin")]
files.sort()
output_dir = SNPE_OUTPUT_DIR
if os.path.isdir(output_dir):
rmtree(output_dir)
os.makedirs(output_dir)
print("Found {} input files".format(len(files)))
download_results(files, 0, output_dir)
|
archai/tasks/face_segmentation/aml/snpe/fetch_results.py/0
|
{
"file_path": "archai/tasks/face_segmentation/aml/snpe/fetch_results.py",
"repo_id": "archai",
"token_count": 167
}
| 348 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import numpy as np
import sys
import tempfile
from pathlib import Path
from archai.discrete_search.api import ArchaiModel
from archai.discrete_search.search_spaces.config import ArchConfig
from archai.common.config import Config
from aml.util.pareto import calc_pareto_frontier
from search_space.hgnet import StackedHourglass
from aml.training.aml_training_evaluator import AmlPartialTrainingEvaluator
from aml.util.setup import configure_store
def main():
# input and output arguments
parser = argparse.ArgumentParser(description="Fully trains the final pareto curve models in parallel on Azure ML.")
parser.add_argument("--config", type=str, help="location of the aml_search.yaml file", required=True)
parser.add_argument("--output", type=str, help="location of local output files", default='output')
parser.add_argument('--epochs', type=float, help='number of epochs to train (default 1)', default=30)
parser.add_argument('--timeout', type=int, help='Timeout for training (in seconds)(default 28800 seconds = 8 hours)', default=28800)
args = parser.parse_args()
config = Config(args.config, resolve_env_vars=True)
aml_config = config['aml']
store = configure_store(aml_config)
output_path = Path(os.path.realpath(args.output))
evaluator = AmlPartialTrainingEvaluator(config, output_path, args.epochs, args.timeout)
store = evaluator.store
experiment_name = aml_config['experiment_name']
training = config['training']
metric_key = training['metric_key']
search_config = config['search']
target_metric_key = search_config['target']['metric_key']
ss_config = search_config['search_space']
ss_config_params = ss_config.get('params', {})
num_classes = ss_config_params.get('num_classes', 18)
points = []
for e in store.get_all_status_entities(status='complete'):
if metric_key in e and target_metric_key in e:
y = float(e[metric_key])
x = float(e[target_metric_key])
points += [[x, y, e]]
if len(points) == 0:
print(f"No models found with required metrics '{metric_key}' and '{target_metric_key}'")
sys.exit(1)
sorted, pareto = calc_pareto_frontier(points)
print(f'Found {len(pareto)} models on pareto frontier')
# change the key so the evaluator updates a different field this time and
# does not think training is already complete.
evaluator.metric_key = 'final_val_iou'
training['metric_key'] = 'final_val_iou'
models = []
with tempfile.TemporaryDirectory() as tempdir:
for i in pareto:
x, y, e = sorted[i]
id = e['name']
iteration = int(e['iteration']) if 'iteration' in e else 0
training_metric = y
target_metric = x
file_name = f'{id}.json'
print(f'downloading {file_name} with {metric_key}={training_metric} and {target_metric_key}={target_metric} from iteration {iteration} ...')
found = store.download(f'{experiment_name}/{id}', tempdir, specific_file=file_name)
if len(found) == 1:
arch_config = ArchConfig.from_file(os.path.join(tempdir, file_name))
model = StackedHourglass(arch_config, num_classes=num_classes)
models += [ArchaiModel(model, archid=id[3:], metadata={'config' : arch_config, 'entity': e})]
else:
print("Skipping model {id} because the .json arch config file was not found in the store.")
# Ok, now fully train these models!
print(f'Kicking off full training on {len(models)} models...')
for model in models:
e = model.metadata['entity']
e = store.get_status(id)
e['status'] = 'preparing'
store.merge_status_entity(e)
evaluator.send(model)
evaluator.fetch_all()
if __name__ == "__main__":
main()
|
archai/tasks/face_segmentation/train_pareto.py/0
|
{
"file_path": "archai/tasks/face_segmentation/train_pareto.py",
"repo_id": "archai",
"token_count": 1536
}
| 349 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""Search space for facial landmark detection task"""
import copy
import json
import math
import random
import re
import sys
from hashlib import sha1
from typing import List
import numpy as np
import pandas as pd
import torch
from overrides.overrides import overrides
from pathlib import Path
from archai.common.common import logger
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_space import (
BayesOptSearchSpace,
DiscreteSearchSpace,
EvolutionarySearchSpace,
)
from quantizable_model import CustomQuantizableMobileNetV2, CustomQuantizableInvertedResidual
from model import CustomMobileNetV2
def _gen_tv_mobilenet(arch_def,
channel_multiplier=1.0,
depth_multiplier=1.0,
num_classes=1000,
qat: bool = False,
qat_skip_layers: int = 0):
"""generate mobilenet v2 from torchvision. Adapted from timm source code"""
assert len(arch_def) > 0, "arch_def is empty"
ir_setting = []
for block_def in arch_def:
parts = block_def[0].split("_")
t = 1
c = 32
n = 1
s = 1
k = 3
ds_block = False
for part in parts:
if part.startswith("ds"):
t = 1
ds_block = True
elif part.startswith("r"):
n = int(part[1:])
elif part.startswith("s"):
s = int(part[1:])
elif part.startswith("e"):
t = int(part[1:])
elif part.startswith("c"):
c = int(part[1:])
elif part.startswith("k"):
k = int(part[1:])
elif part.startswith("ir"):
pass
else:
raise Exception(f"Invalid block definition part {part}")
def make_divisible(v, divisor=8, min_value=None, round_limit=0.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
if not ds_block:
n = math.ceil(n * depth_multiplier)
c = make_divisible(c * channel_multiplier)
ir_setting.append([t, c, n, s, k])
if qat:
model = CustomQuantizableMobileNetV2(block=CustomQuantizableInvertedResidual, num_skip_qat_layers=qat_skip_layers, inverted_residual_setting=ir_setting, dropout=0, num_classes=num_classes)
else:
model = CustomMobileNetV2(inverted_residual_setting=ir_setting, dropout=0, num_classes=num_classes)
return model
def create_model_from_search_results(archid, csv_file: str, num_classes: int, qat: bool = False, qat_skip_layers: int = 0) :
csv_path = Path(csv_file)
assert csv_path.exists()
df = pd.read_csv(csv_path)
row = df[df["archid"] == archid]
cfg = json.loads(row["config"].to_list()[0])
assert len(cfg) != 0, "cfg is empty. Make sure the cvs file is valid"
model = _gen_tv_mobilenet(
cfg["arch_def"],
channel_multiplier=cfg["channel_multiplier"],
depth_multiplier=cfg["depth_multiplier"],
num_classes=num_classes,
qat=qat,
qat_skip_layers=qat_skip_layers,
)
return model
class DiscreteSearchSpaceMobileNetV2(DiscreteSearchSpace):
def __init__(self, args, num_classes=140):
super().__init__()
""" Default mobilenetv2 setting in more readable format
t - exp factor, c - channels, n - number of block repeats, s - stride
t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1]"""
# set up a few models configs with variable depth and width
self.cfgs_orig = [
{
"arch_def": [
["ds_r1_k3_s1_c16"],
["ir_r2_k3_s2_e6_c24"],
["ir_r3_k3_s2_e6_c32"],
["ir_r4_k3_s2_e6_c64"],
["ir_r3_k3_s1_e6_c96"],
["ir_r3_k3_s2_e6_c160"],
["ir_r1_k3_s1_e6_c320"],
],
"channel_multiplier": 1.00,
"depth_multiplier": 1.00,
},
{
"arch_def": [
["ds_r1_k3_s1_c16"],
["ir_r2_k3_s2_e6_c24"],
["ir_r3_k3_s2_e6_c32"],
["ir_r4_k3_s2_e6_c64"],
["ir_r3_k3_s1_e6_c96"],
["ir_r3_k3_s2_e6_c160"],
["ir_r1_k3_s1_e6_c320"],
],
"channel_multiplier": 0.75,
"depth_multiplier": 0.75,
},
{
"arch_def": [
["ds_r1_k3_s1_c16"],
["ir_r2_k3_s2_e6_c24"],
["ir_r3_k3_s2_e6_c32"],
["ir_r4_k3_s2_e6_c64"],
["ir_r3_k3_s1_e6_c96"],
["ir_r3_k3_s2_e6_c160"],
["ir_r1_k3_s1_e6_c320"],
],
"channel_multiplier": 0.5,
"depth_multiplier": 0.5,
},
{
"arch_def": [
["ds_r1_k3_s1_c16"],
["ir_r2_k3_s2_e6_c24"],
["ir_r3_k3_s2_e6_c32"],
["ir_r4_k3_s2_e6_c64"],
["ir_r3_k3_s1_e6_c96"],
["ir_r3_k3_s2_e6_c160"],
["ir_r1_k3_s1_e6_c320"],
],
"channel_multiplier": 1.25,
"depth_multiplier": 1.25,
},
]
self.config_all = {}
self.arch_counter = 0
self.num_classes = num_classes
self.r_range = tuple(args.r_range)
self.e_range = tuple(args.e_range)
self.k_range = tuple(args.k_range)
self.channel_mult_range = tuple(args.channel_mult_range)
self.depth_mult_range = tuple(args.depth_mult_range)
# make each run deterministic
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
@overrides
def random_sample(self) -> ArchaiModel:
"""Uniform random sample an architecture, always start with a few seed models"""
if self.arch_counter >= 0 and self.arch_counter <= 3:
cfg = copy.deepcopy(self.cfgs_orig[self.arch_counter])
arch = self._create_uniq_arch(cfg)
else:
arch = None
while arch is None:
cfg = self._rand_modify_config(
self.cfgs_orig[0],
len(self.e_range),
len(self.r_range),
len(self.k_range),
len(self.channel_mult_range),
len(self.depth_mult_range),
)
arch = self._create_uniq_arch(cfg)
assert arch is not None
logger.info(f"{sys._getframe(0).f_code.co_name} return archid = {arch.archid} with config = {arch.metadata}")
return arch
@overrides
def save_arch(self, model: ArchaiModel, file: str):
with open(file, "w") as fp:
cfg = model.metadata["config"]
json.dump({"config": cfg}, fp)
@overrides
def load_arch(self, file: str):
metadata = json.load(open(file))
config = json.loads(metadata["config"])
arch = self._create_uniq_arch(config)
return arch
@overrides
def save_model_weights(self, model: ArchaiModel, file: str):
state_dict = model.arch.get_state_dict()
torch.save(state_dict, file)
@overrides
def load_model_weights(self, model: ArchaiModel, file: str):
model.arch.load_state_dict(torch.load(file))
def _mod_block_cfg(self, cfg, type: str, block: int, delta: int, curr_range) -> str:
"""modify the cfg of a particular block"""
block_cfg = cfg["arch_def"][block][0]
res = re.search(rf"_{type}(\d)_", block_cfg)
if res is not None:
curr = res.group(1)
curr_idx = curr_range.index(int(curr))
mod_range = curr_range[max(0, curr_idx - delta) : min(len(curr_range), curr_idx + delta + 1)]
modified = random.choice(mod_range)
block_cfg = block_cfg[0 : res.start() + 2] + str(modified) + block_cfg[res.end() - 1 :]
return block_cfg
def _mod_multilier(self, cfg, type: str, delta: int, curr_range) -> int:
"modify either channel or depth multiplier"
curr = cfg[f"{type}_multiplier"]
curr_idx = curr_range.index(curr)
mod_range = curr_range[max(0, curr_idx - delta) : min(len(curr_range), curr_idx + delta + 1)]
modified = random.choice(mod_range)
return modified
def _rand_modify_config(self, cfgs_orig, delta_e, delta_r, delta_k, delta_ch_mult, delta_depth_mult):
"""randomly choice a block and modify the corresponding config"""
cfg = copy.deepcopy(cfgs_orig)
block_e = random.choice(range(2, 7))
block_cfg_e = self._mod_block_cfg(cfg, "e", block_e, delta_e, self.e_range)
cfg["arch_def"][block_e][0] = block_cfg_e
block_r = random.choice(range(2, 6))
block_cfg_r = self._mod_block_cfg(cfg, "r", block_r, delta_r, self.r_range)
cfg["arch_def"][block_r][0] = block_cfg_r
block_k = random.choice(range(1, 7))
block_cfg_k = self._mod_block_cfg(cfg, "k", block_k, delta_k, self.k_range)
cfg["arch_def"][block_k][0] = block_cfg_k
cfg["channel_multiplier"] = self._mod_multilier(cfg, "channel", delta_ch_mult, self.channel_mult_range)
cfg["depth_multiplier"] = self._mod_multilier(cfg, "depth", delta_depth_mult, self.depth_mult_range)
return cfg
def _create_uniq_arch(self, cfg):
"""create a unique arch from the config"""
cfg_str = json.dumps(cfg)
archid = sha1(cfg_str.encode("ascii")).hexdigest()[0:8]
if cfg_str in list(self.config_all.values()):
print(f"Creating duplicated model: {cfg_str} ")
else:
self.config_all[archid] = cfg_str
self.arch_counter += 1
logger.info(f"adding model to search space config_all, archid = {archid}, config = {cfg_str}")
model = _gen_tv_mobilenet(
cfg["arch_def"],
channel_multiplier=cfg["channel_multiplier"],
depth_multiplier=cfg["depth_multiplier"],
num_classes=self.num_classes,
)
arch = ArchaiModel(model, archid, metadata={"config": cfg_str})
return arch
class ConfigSearchSpaceExt(DiscreteSearchSpaceMobileNetV2, EvolutionarySearchSpace, BayesOptSearchSpace):
"""Search space for config search"""
@overrides
def mutate(self, model_1: ArchaiModel) -> ArchaiModel:
cfg_1 = json.loads(model_1.metadata["config"])
arch = None
while arch is None:
cfg = self._rand_modify_config(
cfg_1,
len(self.e_range),
len(self.r_range),
len(self.k_range),
len(self.channel_mult_range),
len(self.depth_mult_range),
)
arch = self._create_uniq_arch(cfg)
assert arch is not None
logger.info(f"{sys._getframe(0).f_code.co_name} return archid = {arch.archid} with config = {arch.metadata}")
return arch
@overrides
def crossover(self, model_list: List[ArchaiModel]) -> ArchaiModel:
model_1, model_2 = model_list[:2]
cfg_1 = json.loads(model_1.metadata["config"])
cfg_2 = json.loads(model_2.metadata["config"])
cfg = copy.deepcopy(cfg_1)
arch = None
while arch is None:
for block in range(2, len(cfg["arch_def"])):
cfg["arch_def"][block] = random.choice((cfg_1["arch_def"][block], cfg_2["arch_def"][block]))
cfg["channel_multiplier"] = random.choice((cfg_1["channel_multiplier"], cfg_2["channel_multiplier"]))
cfg["depth_multiplier"] = random.choice((cfg_1["depth_multiplier"], cfg_2["depth_multiplier"]))
arch = self._create_uniq_arch(cfg)
assert arch is not None
logger.info(f"{sys._getframe(0).f_code.co_name} return archid = {arch.archid} with config = {arch.metadata}")
return arch
@overrides
def encode(self, model: ArchaiModel) -> np.ndarray:
# TBD, this is not needed for this implementation
assert False
if __name__ == "__main__":
"""unit test for this module"""
from torchinfo import summary
img_size = 128
def create_random_model(ss):
arch = ss.random_sample()
model = arch.arch
model.to("cpu").eval()
pred = model(torch.randn(1, 3, img_size, img_size))
assert pred is not None
model_summary = summary(
model,
input_size=(1, 3, img_size, img_size),
col_names=["input_size", "output_size", "num_params", "kernel_size", "mult_adds"],
device="cpu",
)
print(model_summary)
return arch
ss = DiscreteSearchSpaceMobileNetV2()
for i in range(0, 2):
archai_model = create_random_model(ss)
print(archai_model.metadata["config"])
|
archai/tasks/facial_landmark_detection/search_space.py/0
|
{
"file_path": "archai/tasks/facial_landmark_detection/search_space.py",
"repo_id": "archai",
"token_count": 7081
}
| 350 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Any
from unittest.mock import MagicMock
from overrides import overrides
from archai.api.dataset_provider import DatasetProvider
class MyDatasetProvider(DatasetProvider):
def __init__(self) -> None:
super().__init__()
@overrides
def get_train_dataset(self) -> Any:
return MagicMock()
@overrides
def get_val_dataset(self) -> Any:
return MagicMock()
@overrides
def get_test_dataset(self) -> Any:
return MagicMock()
def test_my_dataset_provider():
dataset_provider = MyDatasetProvider()
train_dataset = dataset_provider.get_train_dataset()
val_dataset = dataset_provider.get_val_dataset()
test_dataset = dataset_provider.get_test_dataset()
# Assert that mocked datasets are returned
assert train_dataset is not None
assert val_dataset is not None
assert test_dataset is not None
|
archai/tests/api/test_dataset_provider.py/0
|
{
"file_path": "archai/tests/api/test_dataset_provider.py",
"repo_id": "archai",
"token_count": 370
}
| 351 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
import shutil
from archai.datasets.nlp.nvidia_dataset_provider import NvidiaDatasetProvider
def test_nvidia_dataset_provider():
# make sure tests can run in parallel and not clobber each other's dataroot.
unique_data_root = 'test_nvidia_dataset_provider_dataroot'
os.makedirs(f"{unique_data_root}/olx_tmp")
with open(f"{unique_data_root}/olx_tmp/train.txt", "w") as f:
f.write("train")
with open(f"{unique_data_root}/olx_tmp/valid.txt", "w") as f:
f.write("valid")
with open(f"{unique_data_root}/olx_tmp/test.txt", "w") as f:
f.write("test")
# Assert that we can individually load training, validation and test datasets
dataset_provider = NvidiaDatasetProvider("olx_tmp", dataset_dir=f"{unique_data_root}/olx_tmp", refresh_cache=True)
train_dataset = dataset_provider.get_train_dataset()
assert len(train_dataset) == 7
val_dataset = dataset_provider.get_val_dataset()
assert len(val_dataset) == 7
test_dataset = dataset_provider.get_test_dataset()
assert len(test_dataset) == 6
shutil.rmtree("cache")
shutil.rmtree(unique_data_root)
|
archai/tests/datasets/nlp/test_nvidia_dataset_provider.py/0
|
{
"file_path": "archai/tests/datasets/nlp/test_nvidia_dataset_provider.py",
"repo_id": "archai",
"token_count": 489
}
| 352 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import torch
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.api.search_results import SearchResults
from archai.discrete_search.evaluators.pt_profiler import TorchNumParameters
from archai.discrete_search.search_spaces.nlp.transformer_flex.search_space import (
TransformerFlexSearchSpace,
)
def test_search_results():
search_space = TransformerFlexSearchSpace("gpt2")
objectives = SearchObjectives()
search_results = SearchResults(search_space, objectives)
# Assert that attributes are set correctly
assert search_results.search_space == search_space
assert search_results.objectives == objectives
assert search_results.iteration_num == 0
assert len(search_results.search_walltimes) == 0
assert len(search_results.results) == 0
def test_add_iteration_results():
search_space = TransformerFlexSearchSpace("gpt2")
objectives = SearchObjectives()
objectives.add_objective("n_parameters", TorchNumParameters(), False)
search_results = SearchResults(search_space, objectives)
models = [ArchaiModel(torch.nn.Linear(10, 1), "archid")]
obj_name = objectives.objective_names[0]
evaluation_results = {obj_name: np.array([0.5], dtype=np.float32)}
search_results.add_iteration_results(models, evaluation_results)
# Assert that attributes are set correctly after calling `add_iteration_results`
assert search_results.iteration_num == 1
assert len(search_results.search_walltimes) == 1
assert len(search_results.results) == 1
assert len(search_results.results[0]["models"]) == 1
assert search_results.results[0][obj_name][0] == 0.5
|
archai/tests/discrete_search/api/test_search_results.py/0
|
{
"file_path": "archai/tests/discrete_search/api/test_search_results.py",
"repo_id": "archai",
"token_count": 582
}
| 353 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import torch
from archai.discrete_search.api.archai_model import ArchaiModel
from archai.discrete_search.api.search_objectives import SearchObjectives
from archai.discrete_search.evaluators.functional import EvaluationFunction
from archai.discrete_search.utils.multi_objective import (
get_non_dominated_sorting,
get_pareto_frontier,
)
def test_get_pareto_frontier():
models = [ArchaiModel(torch.nn.Linear(10, 1), "archid") for _ in range(5)]
evaluation_results = {
"obj1": np.array([1, 2, 3, 4, 5]),
"obj2": np.array([5, 4, 3, 2, 1]),
}
objectives = SearchObjectives()
objectives.add_objective("obj1", EvaluationFunction(lambda m, b: b), higher_is_better=True)
objectives.add_objective("obj2", EvaluationFunction(lambda m, b: b), higher_is_better=False)
result = get_pareto_frontier(models, evaluation_results, objectives)
# Assert that the result is a list of dictionaries
assert isinstance(result, dict)
# Assert that each dictionary has the required keys
assert "models" in result
assert "evaluation_results" in result
assert "indices" in result
# Assert that the length of each list is the same
assert len(result["models"]) == len(result["evaluation_results"]["obj1"]) == len(result["indices"])
def test_get_non_dominated_sorting():
models = [ArchaiModel(torch.nn.Linear(10, 1), "archid") for _ in range(5)]
evaluation_results = {
"obj1": np.array([1, 2, 3, 4, 5]),
"obj2": np.array([5, 4, 3, 2, 1]),
}
objectives = SearchObjectives()
objectives.add_objective("obj1", EvaluationFunction(lambda m, b: b), higher_is_better=True)
objectives.add_objective("obj2", EvaluationFunction(lambda m, b: b), higher_is_better=False)
result = get_non_dominated_sorting(models, evaluation_results, objectives)
# Assert that the result is a list of dictionaries
assert isinstance(result, list)
assert all(isinstance(r, dict) for r in result)
# Assert that each dictionary has the required keys
assert all("models" in r for r in result)
assert all("evaluation_results" in r for r in result)
assert all("indices" in r for r in result)
# Assert that the length of each list is the same
assert len(result) == 5
assert all(len(r["models"]) == len(r["evaluation_results"]["obj1"]) == len(r["indices"]) for r in result)
|
archai/tests/discrete_search/utils/test_multi_objective.py/0
|
{
"file_path": "archai/tests/discrete_search/utils/test_multi_objective.py",
"repo_id": "archai",
"token_count": 867
}
| 354 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import pytest
import torch
from archai.quantization.modules import FakeDynamicQuantLinear
from archai.quantization.qat import (
DYNAMIC_QAT_MODULE_MAP,
ONNX_DYNAMIC_QAT_MODULE_MAP,
float_to_qat_modules,
prepare_with_qat,
qat_to_float_modules,
)
@pytest.fixture
def model():
class DummyModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10)
return DummyModel()
def test_float_to_qat_modules(model):
# Assert that the QAT linear layer is an instance of `FakeDynamicQuantLinear`
float_to_qat_modules(model, qconfig=torch.quantization.get_default_qat_qconfig("qnnpack"))
assert isinstance(model.linear, FakeDynamicQuantLinear)
def test_qat_to_float_modules(model):
# Assert that the converted model linear layer is an instance of `torch.nn.Linear`
float_to_qat_modules(model, qconfig=torch.quantization.get_default_qat_qconfig("qnnpack"))
qat_to_float_modules(model)
assert isinstance(model.linear, torch.nn.Linear)
def test_prepare_with_qat(model):
# Assert normal QAT preparation
model_copy = copy.deepcopy(model)
prepare_with_qat(model_copy)
assert isinstance(model_copy.linear, DYNAMIC_QAT_MODULE_MAP[torch.nn.Linear])
# Assert normal QAT preparation without `inplace`
prepared_model = prepare_with_qat(model, inplace=False)
assert isinstance(prepared_model.linear, DYNAMIC_QAT_MODULE_MAP[torch.nn.Linear])
# Assert ONNX-compatible QAT preparation
model_copy = copy.deepcopy(model)
prepare_with_qat(model_copy, onnx_compatible=True)
assert isinstance(model_copy.linear, ONNX_DYNAMIC_QAT_MODULE_MAP[torch.nn.Linear])
# Assert ONNX-compatible QAT preparation without `inplace`
prepared_model = prepare_with_qat(model, inplace=False, onnx_compatible=True)
assert isinstance(prepared_model.linear, ONNX_DYNAMIC_QAT_MODULE_MAP[torch.nn.Linear])
|
archai/tests/quantization/test_qat.py/0
|
{
"file_path": "archai/tests/quantization/test_qat.py",
"repo_id": "archai",
"token_count": 788
}
| 355 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import pytest
import torch
from torch.optim import SGD
from archai.trainers.cyclic_cosine_scheduler import CyclicCosineDecayLR
INITIAL_LR = 1.0
@pytest.fixture
def optimizer():
return SGD([torch.randn(2, 2, requires_grad=True)], INITIAL_LR)
@pytest.fixture
def scheduler(optimizer):
return CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1)
def test_cyclic_cosine_decay_lr_init(optimizer):
# Assert for invalid init_decay_epochs input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=0, min_decay_lr=0.1)
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=-1, min_decay_lr=0.1)
# Assert for invalid min_decay_lr input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=[0.1, 0.2])
# Assert for invalid restart_interval input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, restart_interval=0)
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, restart_interval=-1)
# Assert for invalid restart_interval_multiplier input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, restart_interval_multiplier=0)
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, restart_interval_multiplier=-1)
# Assert for invalid restart_lr input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, restart_lr=[0.1, 0.2])
# Assert for invalid warmup_epochs input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, warmup_epochs=0)
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, warmup_epochs=-1)
# Assert for invalid warmup_start_lr input
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, warmup_epochs=1)
with pytest.raises(ValueError):
CyclicCosineDecayLR(optimizer, init_decay_epochs=5, min_decay_lr=0.1, warmup_epochs=1, warmup_start_lr=1)
with pytest.raises(ValueError):
CyclicCosineDecayLR(
optimizer, init_decay_epochs=5, min_decay_lr=0.1, warmup_epochs=1, warmup_start_lr=[0.1, 0.2]
)
def test_cyclic_cosine_decay_lr_step(scheduler, optimizer):
# Assert that the learning rate decreases after each step
lrs = []
for _ in range(15):
scheduler.step()
lrs.append([param_group["lr"] for param_group in optimizer.param_groups])
assert lrs[-1] < lrs[0]
# Assert that the learning rate restarts after the specified number of epochs
lrs = []
scheduler = CyclicCosineDecayLR(optimizer, init_decay_epochs=10, min_decay_lr=0.1, restart_interval=5)
for _ in range(15):
scheduler.step()
lrs.append([param_group["lr"] for param_group in optimizer.param_groups])
assert lrs[-1] == [INITIAL_LR]
# Assert that the learning rate follows a warmup schedule
lrs = []
scheduler = CyclicCosineDecayLR(
optimizer, init_decay_epochs=10, min_decay_lr=0.1, warmup_epochs=5, warmup_start_lr=0.01
)
for _ in range(10):
scheduler.step()
lrs.append([param_group["lr"] for param_group in optimizer.param_groups])
assert lrs[-1] > lrs[0]
|
archai/tests/trainers/test_cyclic_cosine_scheduler.py/0
|
{
"file_path": "archai/tests/trainers/test_cyclic_cosine_scheduler.py",
"repo_id": "archai",
"token_count": 1591
}
| 356 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class TaskAgentClient(Client):
"""TaskAgent
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(TaskAgentClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = 'a85b8835-c1a1-4aac-ae97-1c3d0ba72dbd'
def add_agent_cloud(self, agent_cloud):
"""AddAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>` agent_cloud:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>`
"""
content = self._serialize.body(agent_cloud, 'TaskAgentCloud')
response = self._send(http_method='POST',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='7.1-preview.1',
content=content)
return self._deserialize('TaskAgentCloud', response)
def delete_agent_cloud(self, agent_cloud_id):
"""DeleteAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='DELETE',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('TaskAgentCloud', response)
def get_agent_cloud(self, agent_cloud_id):
"""GetAgentCloud.
[Preview API]
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='GET',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('TaskAgentCloud', response)
def get_agent_clouds(self):
"""GetAgentClouds.
[Preview API]
:rtype: [TaskAgentCloud]
"""
response = self._send(http_method='GET',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='7.1-preview.1')
return self._deserialize('[TaskAgentCloud]', self._unwrap_collection(response))
def update_agent_cloud(self, updated_cloud, agent_cloud_id):
"""UpdateAgentCloud.
[Preview API]
:param :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>` updated_cloud:
:param int agent_cloud_id:
:rtype: :class:`<TaskAgentCloud> <azure.devops.v7_1.task_agent.models.TaskAgentCloud>`
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
content = self._serialize.body(updated_cloud, 'TaskAgentCloud')
response = self._send(http_method='PATCH',
location_id='bfa72b3d-0fc6-43fb-932b-a7f6559f93b9',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskAgentCloud', response)
def get_agent_cloud_types(self):
"""GetAgentCloudTypes.
[Preview API] Get agent cloud types.
:rtype: [TaskAgentCloudType]
"""
response = self._send(http_method='GET',
location_id='5932e193-f376-469d-9c3e-e5588ce12cb5',
version='7.1-preview.1')
return self._deserialize('[TaskAgentCloudType]', self._unwrap_collection(response))
def add_agent(self, agent, pool_id):
"""AddAgent.
[Preview API] Adds an agent to a pool. You probably don't want to call this endpoint directly. Instead, [configure an agent](https://docs.microsoft.com/azure/devops/pipelines/agents/agents) using the agent download package.
:param :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>` agent: Details about the agent being added
:param int pool_id: The agent pool in which to add the agent
:rtype: :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
content = self._serialize.body(agent, 'TaskAgent')
response = self._send(http_method='POST',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskAgent', response)
def delete_agent(self, pool_id, agent_id):
"""DeleteAgent.
[Preview API] Delete an agent. You probably don't want to call this endpoint directly. Instead, [use the agent configuration script](https://docs.microsoft.com/azure/devops/pipelines/agents/agents) to remove an agent from your organization.
:param int pool_id: The pool ID to remove the agent from
:param int agent_id: The agent ID to remove
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
if agent_id is not None:
route_values['agentId'] = self._serialize.url('agent_id', agent_id, 'int')
self._send(http_method='DELETE',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values)
def get_agent(self, pool_id, agent_id, include_capabilities=None, include_assigned_request=None, include_last_completed_request=None, property_filters=None):
"""GetAgent.
[Preview API] Get information about an agent.
:param int pool_id: The agent pool containing the agent
:param int agent_id: The agent ID to get information about
:param bool include_capabilities: Whether to include the agent's capabilities in the response
:param bool include_assigned_request: Whether to include details about the agent's current work
:param bool include_last_completed_request: Whether to include details about the agents' most recent completed work
:param [str] property_filters: Filter which custom properties will be returned
:rtype: :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
if agent_id is not None:
route_values['agentId'] = self._serialize.url('agent_id', agent_id, 'int')
query_parameters = {}
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool')
if include_assigned_request is not None:
query_parameters['includeAssignedRequest'] = self._serialize.query('include_assigned_request', include_assigned_request, 'bool')
if include_last_completed_request is not None:
query_parameters['includeLastCompletedRequest'] = self._serialize.query('include_last_completed_request', include_last_completed_request, 'bool')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TaskAgent', response)
def get_agents(self, pool_id, agent_name=None, include_capabilities=None, include_assigned_request=None, include_last_completed_request=None, property_filters=None, demands=None):
"""GetAgents.
[Preview API] Get a list of agents.
:param int pool_id: The agent pool containing the agents
:param str agent_name: Filter on agent name
:param bool include_capabilities: Whether to include the agents' capabilities in the response
:param bool include_assigned_request: Whether to include details about the agents' current work
:param bool include_last_completed_request: Whether to include details about the agents' most recent completed work
:param [str] property_filters: Filter which custom properties will be returned
:param [str] demands: Filter by demands the agents can satisfy
:rtype: [TaskAgent]
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
query_parameters = {}
if agent_name is not None:
query_parameters['agentName'] = self._serialize.query('agent_name', agent_name, 'str')
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool')
if include_assigned_request is not None:
query_parameters['includeAssignedRequest'] = self._serialize.query('include_assigned_request', include_assigned_request, 'bool')
if include_last_completed_request is not None:
query_parameters['includeLastCompletedRequest'] = self._serialize.query('include_last_completed_request', include_last_completed_request, 'bool')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
if demands is not None:
demands = ",".join(demands)
query_parameters['demands'] = self._serialize.query('demands', demands, 'str')
response = self._send(http_method='GET',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgent]', self._unwrap_collection(response))
def replace_agent(self, agent, pool_id, agent_id):
"""ReplaceAgent.
[Preview API] Replace an agent. You probably don't want to call this endpoint directly. Instead, [use the agent configuration script](https://docs.microsoft.com/azure/devops/pipelines/agents/agents) to remove and reconfigure an agent from your organization.
:param :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>` agent: Updated details about the replacing agent
:param int pool_id: The agent pool to use
:param int agent_id: The agent to replace
:rtype: :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
if agent_id is not None:
route_values['agentId'] = self._serialize.url('agent_id', agent_id, 'int')
content = self._serialize.body(agent, 'TaskAgent')
response = self._send(http_method='PUT',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskAgent', response)
def update_agent(self, agent, pool_id, agent_id):
"""UpdateAgent.
[Preview API] Update agent details.
:param :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>` agent: Updated details about the agent
:param int pool_id: The agent pool to use
:param int agent_id: The agent to update
:rtype: :class:`<TaskAgent> <azure.devops.v7_1.task_agent.models.TaskAgent>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
if agent_id is not None:
route_values['agentId'] = self._serialize.url('agent_id', agent_id, 'int')
content = self._serialize.body(agent, 'TaskAgent')
response = self._send(http_method='PATCH',
location_id='e298ef32-5878-4cab-993c-043836571f42',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskAgent', response)
def add_deployment_group(self, deployment_group, project):
"""AddDeploymentGroup.
[Preview API] Create a deployment group.
:param :class:`<DeploymentGroupCreateParameter> <azure.devops.v7_1.task_agent.models.DeploymentGroupCreateParameter>` deployment_group: Deployment group to create.
:param str project: Project ID or project name
:rtype: :class:`<DeploymentGroup> <azure.devops.v7_1.task_agent.models.DeploymentGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(deployment_group, 'DeploymentGroupCreateParameter')
response = self._send(http_method='POST',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('DeploymentGroup', response)
def delete_deployment_group(self, project, deployment_group_id):
"""DeleteDeploymentGroup.
[Preview API] Delete a deployment group.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group to be deleted.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
self._send(http_method='DELETE',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='7.1-preview.1',
route_values=route_values)
def get_deployment_group(self, project, deployment_group_id, action_filter=None, expand=None):
"""GetDeploymentGroup.
[Preview API] Get a deployment group by its ID.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group.
:param str action_filter: Get the deployment group only if this action can be performed on it.
:param str expand: Include these additional details in the returned object.
:rtype: :class:`<DeploymentGroup> <azure.devops.v7_1.task_agent.models.DeploymentGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
query_parameters = {}
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('DeploymentGroup', response)
def get_deployment_groups(self, project, name=None, action_filter=None, expand=None, continuation_token=None, top=None, ids=None):
"""GetDeploymentGroups.
[Preview API] Get a list of deployment groups by name or IDs.
:param str project: Project ID or project name
:param str name: Name of the deployment group.
:param str action_filter: Get only deployment groups on which this action can be performed.
:param str expand: Include these additional details in the returned objects.
:param str continuation_token: Get deployment groups with names greater than this continuationToken lexicographically.
:param int top: Maximum number of deployment groups to return. Default is **1000**.
:param [int] ids: Comma separated list of IDs of the deployment groups.
:rtype: :class:`<[DeploymentGroup]> <azure.devops.v7_1.task_agent.models.[DeploymentGroup]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if ids is not None:
ids = ",".join(map(str, ids))
query_parameters['ids'] = self._serialize.query('ids', ids, 'str')
response = self._send(http_method='GET',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DeploymentGroup]', self._unwrap_collection(response))
def update_deployment_group(self, deployment_group, project, deployment_group_id):
"""UpdateDeploymentGroup.
[Preview API] Update a deployment group.
:param :class:`<DeploymentGroupUpdateParameter> <azure.devops.v7_1.task_agent.models.DeploymentGroupUpdateParameter>` deployment_group: Deployment group to update.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group.
:rtype: :class:`<DeploymentGroup> <azure.devops.v7_1.task_agent.models.DeploymentGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
content = self._serialize.body(deployment_group, 'DeploymentGroupUpdateParameter')
response = self._send(http_method='PATCH',
location_id='083c4d89-ab35-45af-aa11-7cf66895c53e',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('DeploymentGroup', response)
def get_environment_deployment_execution_records(self, project, environment_id, continuation_token=None, top=None):
"""GetEnvironmentDeploymentExecutionRecords.
[Preview API] Get environment deployment execution history
:param str project: Project ID or project name
:param int environment_id:
:param str continuation_token:
:param int top:
:rtype: :class:`<[EnvironmentDeploymentExecutionRecord]> <azure.devops.v7_1.task_agent.models.[EnvironmentDeploymentExecutionRecord]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
query_parameters = {}
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='51bb5d21-4305-4ea6-9dbb-b7488af73334',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[EnvironmentDeploymentExecutionRecord]', self._unwrap_collection(response))
def add_environment(self, environment_create_parameter, project):
"""AddEnvironment.
[Preview API] Create an environment.
:param :class:`<EnvironmentCreateParameter> <azure.devops.v7_1.task_agent.models.EnvironmentCreateParameter>` environment_create_parameter: Environment to create.
:param str project: Project ID or project name
:rtype: :class:`<EnvironmentInstance> <azure.devops.v7_1.task_agent.models.EnvironmentInstance>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(environment_create_parameter, 'EnvironmentCreateParameter')
response = self._send(http_method='POST',
location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('EnvironmentInstance', response)
def delete_environment(self, project, environment_id):
"""DeleteEnvironment.
[Preview API] Delete the specified environment.
:param str project: Project ID or project name
:param int environment_id: ID of the environment.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
self._send(http_method='DELETE',
location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',
version='7.1-preview.1',
route_values=route_values)
def get_environment_by_id(self, project, environment_id, expands=None):
"""GetEnvironmentById.
[Preview API] Get an environment by its ID.
:param str project: Project ID or project name
:param int environment_id: ID of the environment.
:param str expands: Include these additional details in the returned objects.
:rtype: :class:`<EnvironmentInstance> <azure.devops.v7_1.task_agent.models.EnvironmentInstance>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
query_parameters = {}
if expands is not None:
query_parameters['expands'] = self._serialize.query('expands', expands, 'str')
response = self._send(http_method='GET',
location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('EnvironmentInstance', response)
def get_environments(self, project, name=None, continuation_token=None, top=None):
"""GetEnvironments.
[Preview API] Get all environments.
:param str project: Project ID or project name
:param str name:
:param str continuation_token:
:param int top:
:rtype: :class:`<[EnvironmentInstance]> <azure.devops.v7_1.task_agent.models.[EnvironmentInstance]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[EnvironmentInstance]', self._unwrap_collection(response))
def update_environment(self, environment_update_parameter, project, environment_id):
"""UpdateEnvironment.
[Preview API] Update the specified environment.
:param :class:`<EnvironmentUpdateParameter> <azure.devops.v7_1.task_agent.models.EnvironmentUpdateParameter>` environment_update_parameter: Environment data to update.
:param str project: Project ID or project name
:param int environment_id: ID of the environment.
:rtype: :class:`<EnvironmentInstance> <azure.devops.v7_1.task_agent.models.EnvironmentInstance>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
content = self._serialize.body(environment_update_parameter, 'EnvironmentUpdateParameter')
response = self._send(http_method='PATCH',
location_id='8572b1fc-2482-47fa-8f74-7e3ed53ee54b',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('EnvironmentInstance', response)
def add_kubernetes_resource(self, create_parameters, project, environment_id):
"""AddKubernetesResource.
[Preview API]
:param :class:`<KubernetesResourceCreateParameters> <azure.devops.v7_1.task_agent.models.KubernetesResourceCreateParameters>` create_parameters:
:param str project: Project ID or project name
:param int environment_id:
:rtype: :class:`<KubernetesResource> <azure.devops.v7_1.task_agent.models.KubernetesResource>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
content = self._serialize.body(create_parameters, 'KubernetesResourceCreateParameters')
response = self._send(http_method='POST',
location_id='73fba52f-15ab-42b3-a538-ce67a9223a04',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('KubernetesResource', response)
def delete_kubernetes_resource(self, project, environment_id, resource_id):
"""DeleteKubernetesResource.
[Preview API]
:param str project: Project ID or project name
:param int environment_id:
:param int resource_id:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if resource_id is not None:
route_values['resourceId'] = self._serialize.url('resource_id', resource_id, 'int')
self._send(http_method='DELETE',
location_id='73fba52f-15ab-42b3-a538-ce67a9223a04',
version='7.1-preview.1',
route_values=route_values)
def get_kubernetes_resource(self, project, environment_id, resource_id):
"""GetKubernetesResource.
[Preview API]
:param str project: Project ID or project name
:param int environment_id:
:param int resource_id:
:rtype: :class:`<KubernetesResource> <azure.devops.v7_1.task_agent.models.KubernetesResource>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if resource_id is not None:
route_values['resourceId'] = self._serialize.url('resource_id', resource_id, 'int')
response = self._send(http_method='GET',
location_id='73fba52f-15ab-42b3-a538-ce67a9223a04',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('KubernetesResource', response)
def add_agent_pool(self, pool):
"""AddAgentPool.
[Preview API] Create an agent pool.
:param :class:`<TaskAgentPool> <azure.devops.v7_1.task_agent.models.TaskAgentPool>` pool: Details about the new agent pool
:rtype: :class:`<TaskAgentPool> <azure.devops.v7_1.task_agent.models.TaskAgentPool>`
"""
content = self._serialize.body(pool, 'TaskAgentPool')
response = self._send(http_method='POST',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
content=content)
return self._deserialize('TaskAgentPool', response)
def delete_agent_pool(self, pool_id):
"""DeleteAgentPool.
[Preview API] Delete an agent pool.
:param int pool_id: ID of the agent pool to delete
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
self._send(http_method='DELETE',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
route_values=route_values)
def get_agent_pool(self, pool_id, properties=None, action_filter=None):
"""GetAgentPool.
[Preview API] Get information about an agent pool.
:param int pool_id: An agent pool ID
:param [str] properties: Agent pool properties (comma-separated)
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: :class:`<TaskAgentPool> <azure.devops.v7_1.task_agent.models.TaskAgentPool>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
query_parameters = {}
if properties is not None:
properties = ",".join(properties)
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TaskAgentPool', response)
def get_agent_pools(self, pool_name=None, properties=None, pool_type=None, action_filter=None):
"""GetAgentPools.
[Preview API] Get a list of agent pools.
:param str pool_name: Filter by name
:param [str] properties: Filter by agent pool properties (comma-separated)
:param str pool_type: Filter by pool type
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentPool]
"""
query_parameters = {}
if pool_name is not None:
query_parameters['poolName'] = self._serialize.query('pool_name', pool_name, 'str')
if properties is not None:
properties = ",".join(properties)
query_parameters['properties'] = self._serialize.query('properties', properties, 'str')
if pool_type is not None:
query_parameters['poolType'] = self._serialize.query('pool_type', pool_type, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[TaskAgentPool]', self._unwrap_collection(response))
def get_agent_pools_by_ids(self, pool_ids, action_filter=None):
"""GetAgentPoolsByIds.
[Preview API] Get a list of agent pools.
:param [int] pool_ids: pool Ids to fetch
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentPool]
"""
query_parameters = {}
if pool_ids is not None:
pool_ids = ",".join(map(str, pool_ids))
query_parameters['poolIds'] = self._serialize.query('pool_ids', pool_ids, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('[TaskAgentPool]', self._unwrap_collection(response))
def update_agent_pool(self, pool, pool_id):
"""UpdateAgentPool.
[Preview API] Update properties on an agent pool
:param :class:`<TaskAgentPool> <azure.devops.v7_1.task_agent.models.TaskAgentPool>` pool: Updated agent pool details
:param int pool_id: The agent pool to update
:rtype: :class:`<TaskAgentPool> <azure.devops.v7_1.task_agent.models.TaskAgentPool>`
"""
route_values = {}
if pool_id is not None:
route_values['poolId'] = self._serialize.url('pool_id', pool_id, 'int')
content = self._serialize.body(pool, 'TaskAgentPool')
response = self._send(http_method='PATCH',
location_id='a8c47e17-4d56-4a56-92bb-de7ea7dc65be',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskAgentPool', response)
def add_agent_queue(self, queue, project=None, authorize_pipelines=None):
"""AddAgentQueue.
[Preview API] Create a new agent queue to connect a project to an agent pool.
:param :class:`<TaskAgentQueue> <azure.devops.v7_1.task_agent.models.TaskAgentQueue>` queue: Details about the queue to create
:param str project: Project ID or project name
:param bool authorize_pipelines: Automatically authorize this queue when using YAML
:rtype: :class:`<TaskAgentQueue> <azure.devops.v7_1.task_agent.models.TaskAgentQueue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if authorize_pipelines is not None:
query_parameters['authorizePipelines'] = self._serialize.query('authorize_pipelines', authorize_pipelines, 'bool')
content = self._serialize.body(queue, 'TaskAgentQueue')
response = self._send(http_method='POST',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters,
content=content)
return self._deserialize('TaskAgentQueue', response)
def delete_agent_queue(self, queue_id, project=None):
"""DeleteAgentQueue.
[Preview API] Removes an agent queue from a project.
:param int queue_id: The agent queue to remove
:param str project: Project ID or project name
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if queue_id is not None:
route_values['queueId'] = self._serialize.url('queue_id', queue_id, 'int')
self._send(http_method='DELETE',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values)
def get_agent_queue(self, queue_id, project=None, action_filter=None):
"""GetAgentQueue.
[Preview API] Get information about an agent queue.
:param int queue_id: The agent queue to get information about
:param str project: Project ID or project name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: :class:`<TaskAgentQueue> <azure.devops.v7_1.task_agent.models.TaskAgentQueue>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if queue_id is not None:
route_values['queueId'] = self._serialize.url('queue_id', queue_id, 'int')
query_parameters = {}
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TaskAgentQueue', response)
def get_agent_queues(self, project=None, queue_name=None, action_filter=None):
"""GetAgentQueues.
[Preview API] Get a list of agent queues.
:param str project: Project ID or project name
:param str queue_name: Filter on the agent queue name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentQueue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if queue_name is not None:
query_parameters['queueName'] = self._serialize.query('queue_name', queue_name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))
def get_agent_queues_by_ids(self, queue_ids, project=None, action_filter=None):
"""GetAgentQueuesByIds.
[Preview API] Get a list of agent queues by their IDs
:param [int] queue_ids: A comma-separated list of agent queue IDs to retrieve
:param str project: Project ID or project name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentQueue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if queue_ids is not None:
queue_ids = ",".join(map(str, queue_ids))
query_parameters['queueIds'] = self._serialize.query('queue_ids', queue_ids, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))
def get_agent_queues_by_names(self, queue_names, project=None, action_filter=None):
"""GetAgentQueuesByNames.
[Preview API] Get a list of agent queues by their names
:param [str] queue_names: A comma-separated list of agent names to retrieve
:param str project: Project ID or project name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentQueue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if queue_names is not None:
queue_names = ",".join(queue_names)
query_parameters['queueNames'] = self._serialize.query('queue_names', queue_names, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))
def get_agent_queues_for_pools(self, pool_ids, project=None, action_filter=None):
"""GetAgentQueuesForPools.
[Preview API] Get a list of agent queues by pool ids
:param [int] pool_ids: A comma-separated list of pool ids to get the corresponding queues for
:param str project: Project ID or project name
:param str action_filter: Filter by whether the calling user has use or manage permissions
:rtype: [TaskAgentQueue]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if pool_ids is not None:
pool_ids = ",".join(map(str, pool_ids))
query_parameters['poolIds'] = self._serialize.query('pool_ids', pool_ids, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
response = self._send(http_method='GET',
location_id='900fa995-c559-4923-aae7-f8424fe4fbea',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskAgentQueue]', self._unwrap_collection(response))
def get_agent_cloud_requests(self, agent_cloud_id):
"""GetAgentCloudRequests.
[Preview API]
:param int agent_cloud_id:
:rtype: [TaskAgentCloudRequest]
"""
route_values = {}
if agent_cloud_id is not None:
route_values['agentCloudId'] = self._serialize.url('agent_cloud_id', agent_cloud_id, 'int')
response = self._send(http_method='GET',
location_id='20189bd7-5134-49c2-b8e9-f9e856eea2b2',
version='7.1-preview.1',
route_values=route_values)
return self._deserialize('[TaskAgentCloudRequest]', self._unwrap_collection(response))
def delete_deployment_target(self, project, deployment_group_id, target_id):
"""DeleteDeploymentTarget.
[Preview API] Delete a deployment target in a deployment group. This deletes the agent from associated deployment pool too.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group in which deployment target is deleted.
:param int target_id: ID of the deployment target to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
if target_id is not None:
route_values['targetId'] = self._serialize.url('target_id', target_id, 'int')
self._send(http_method='DELETE',
location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6',
version='7.1-preview.1',
route_values=route_values)
def get_deployment_target(self, project, deployment_group_id, target_id, expand=None):
"""GetDeploymentTarget.
[Preview API] Get a deployment target by its ID in a deployment group
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group to which deployment target belongs.
:param int target_id: ID of the deployment target to return.
:param str expand: Include these additional details in the returned objects.
:rtype: :class:`<DeploymentMachine> <azure.devops.v7_1.task_agent.models.DeploymentMachine>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
if target_id is not None:
route_values['targetId'] = self._serialize.url('target_id', target_id, 'int')
query_parameters = {}
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('DeploymentMachine', response)
def get_deployment_targets(self, project, deployment_group_id, tags=None, name=None, partial_name_match=None, expand=None, agent_status=None, agent_job_result=None, continuation_token=None, top=None, enabled=None, property_filters=None):
"""GetDeploymentTargets.
[Preview API] Get a list of deployment targets in a deployment group.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group.
:param [str] tags: Get only the deployment targets that contain all these comma separted list of tags.
:param str name: Name pattern of the deployment targets to return.
:param bool partial_name_match: When set to true, treats **name** as pattern. Else treats it as absolute match. Default is **false**.
:param str expand: Include these additional details in the returned objects.
:param str agent_status: Get only deployment targets that have this status.
:param str agent_job_result: Get only deployment targets that have this last job result.
:param str continuation_token: Get deployment targets with names greater than this continuationToken lexicographically.
:param int top: Maximum number of deployment targets to return. Default is **1000**.
:param bool enabled: Get only deployment targets that are enabled or disabled. Default is 'null' which returns all the targets.
:param [str] property_filters:
:rtype: :class:`<[DeploymentMachine]> <azure.devops.v7_1.task_agent.models.[DeploymentMachine]>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
query_parameters = {}
if tags is not None:
tags = ",".join(tags)
query_parameters['tags'] = self._serialize.query('tags', tags, 'str')
if name is not None:
query_parameters['name'] = self._serialize.query('name', name, 'str')
if partial_name_match is not None:
query_parameters['partialNameMatch'] = self._serialize.query('partial_name_match', partial_name_match, 'bool')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
if agent_status is not None:
query_parameters['agentStatus'] = self._serialize.query('agent_status', agent_status, 'str')
if agent_job_result is not None:
query_parameters['agentJobResult'] = self._serialize.query('agent_job_result', agent_job_result, 'str')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if enabled is not None:
query_parameters['enabled'] = self._serialize.query('enabled', enabled, 'bool')
if property_filters is not None:
property_filters = ",".join(property_filters)
query_parameters['propertyFilters'] = self._serialize.query('property_filters', property_filters, 'str')
response = self._send(http_method='GET',
location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[DeploymentMachine]', self._unwrap_collection(response))
def update_deployment_targets(self, machines, project, deployment_group_id):
"""UpdateDeploymentTargets.
[Preview API] Update tags of a list of deployment targets in a deployment group.
:param [DeploymentTargetUpdateParameter] machines: Deployment targets with tags to udpdate.
:param str project: Project ID or project name
:param int deployment_group_id: ID of the deployment group in which deployment targets are updated.
:rtype: [DeploymentMachine]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if deployment_group_id is not None:
route_values['deploymentGroupId'] = self._serialize.url('deployment_group_id', deployment_group_id, 'int')
content = self._serialize.body(machines, '[DeploymentTargetUpdateParameter]')
response = self._send(http_method='PATCH',
location_id='2f0aa599-c121-4256-a5fd-ba370e0ae7b6',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('[DeploymentMachine]', self._unwrap_collection(response))
def add_task_group(self, task_group, project):
"""AddTaskGroup.
[Preview API] Create a task group.
:param :class:`<TaskGroupCreateParameter> <azure.devops.v7_1.task_agent.models.TaskGroupCreateParameter>` task_group: Task group object to create.
:param str project: Project ID or project name
:rtype: :class:`<TaskGroup> <azure.devops.v7_1.task_agent.models.TaskGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
content = self._serialize.body(task_group, 'TaskGroupCreateParameter')
response = self._send(http_method='POST',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskGroup', response)
def delete_task_group(self, project, task_group_id, comment=None):
"""DeleteTaskGroup.
[Preview API] Delete a task group.
:param str project: Project ID or project name
:param str task_group_id: Id of the task group to be deleted.
:param str comment: Comments to delete.
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if task_group_id is not None:
route_values['taskGroupId'] = self._serialize.url('task_group_id', task_group_id, 'str')
query_parameters = {}
if comment is not None:
query_parameters['comment'] = self._serialize.query('comment', comment, 'str')
self._send(http_method='DELETE',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
def get_task_groups(self, project, task_group_id=None, expanded=None, task_id_filter=None, deleted=None, top=None, continuation_token=None, query_order=None):
"""GetTaskGroups.
[Preview API] List task groups.
:param str project: Project ID or project name
:param str task_group_id: Id of the task group.
:param bool expanded: 'true' to recursively expand task groups. Default is 'false'.
:param str task_id_filter: Guid of the taskId to filter.
:param bool deleted: 'true'to include deleted task groups. Default is 'false'.
:param int top: Number of task groups to get.
:param datetime continuation_token: Gets the task groups after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'CreatedOnDescending'.
:rtype: [TaskGroup]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if task_group_id is not None:
route_values['taskGroupId'] = self._serialize.url('task_group_id', task_group_id, 'str')
query_parameters = {}
if expanded is not None:
query_parameters['expanded'] = self._serialize.query('expanded', expanded, 'bool')
if task_id_filter is not None:
query_parameters['taskIdFilter'] = self._serialize.query('task_id_filter', task_id_filter, 'str')
if deleted is not None:
query_parameters['deleted'] = self._serialize.query('deleted', deleted, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'iso-8601')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='7.1-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TaskGroup]', self._unwrap_collection(response))
def update_task_group(self, task_group, project, task_group_id=None):
"""UpdateTaskGroup.
[Preview API] Update a task group.
:param :class:`<TaskGroupUpdateParameter> <azure.devops.v7_1.task_agent.models.TaskGroupUpdateParameter>` task_group: Task group to update.
:param str project: Project ID or project name
:param str task_group_id: Id of the task group to update.
:rtype: :class:`<TaskGroup> <azure.devops.v7_1.task_agent.models.TaskGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if task_group_id is not None:
route_values['taskGroupId'] = self._serialize.url('task_group_id', task_group_id, 'str')
content = self._serialize.body(task_group, 'TaskGroupUpdateParameter')
response = self._send(http_method='PUT',
location_id='6c08ffbf-dbf1-4f9a-94e5-a1cbd47005e7',
version='7.1-preview.1',
route_values=route_values,
content=content)
return self._deserialize('TaskGroup', response)
def add_variable_group(self, variable_group_parameters):
"""AddVariableGroup.
[Preview API] Add a variable group.
:param :class:`<VariableGroupParameters> <azure.devops.v7_1.task_agent.models.VariableGroupParameters>` variable_group_parameters:
:rtype: :class:`<VariableGroup> <azure.devops.v7_1.task_agent.models.VariableGroup>`
"""
content = self._serialize.body(variable_group_parameters, 'VariableGroupParameters')
response = self._send(http_method='POST',
location_id='ef5b7057-ffc3-4c77-bbad-c10b4a4abcc7',
version='7.1-preview.2',
content=content)
return self._deserialize('VariableGroup', response)
def delete_variable_group(self, group_id, project_ids):
"""DeleteVariableGroup.
[Preview API] Delete a variable group
:param int group_id: Id of the variable group.
:param [str] project_ids:
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
query_parameters = {}
if project_ids is not None:
project_ids = ",".join(project_ids)
query_parameters['projectIds'] = self._serialize.query('project_ids', project_ids, 'str')
self._send(http_method='DELETE',
location_id='ef5b7057-ffc3-4c77-bbad-c10b4a4abcc7',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
def share_variable_group(self, variable_group_project_references, variable_group_id):
"""ShareVariableGroup.
[Preview API] Add a variable group.
:param [VariableGroupProjectReference] variable_group_project_references:
:param int variable_group_id:
"""
query_parameters = {}
if variable_group_id is not None:
query_parameters['variableGroupId'] = self._serialize.query('variable_group_id', variable_group_id, 'int')
content = self._serialize.body(variable_group_project_references, '[VariableGroupProjectReference]')
self._send(http_method='PATCH',
location_id='ef5b7057-ffc3-4c77-bbad-c10b4a4abcc7',
version='7.1-preview.2',
query_parameters=query_parameters,
content=content)
def update_variable_group(self, variable_group_parameters, group_id):
"""UpdateVariableGroup.
[Preview API] Update a variable group.
:param :class:`<VariableGroupParameters> <azure.devops.v7_1.task_agent.models.VariableGroupParameters>` variable_group_parameters:
:param int group_id: Id of the variable group to update.
:rtype: :class:`<VariableGroup> <azure.devops.v7_1.task_agent.models.VariableGroup>`
"""
route_values = {}
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
content = self._serialize.body(variable_group_parameters, 'VariableGroupParameters')
response = self._send(http_method='PUT',
location_id='ef5b7057-ffc3-4c77-bbad-c10b4a4abcc7',
version='7.1-preview.2',
route_values=route_values,
content=content)
return self._deserialize('VariableGroup', response)
def get_variable_group(self, project, group_id):
"""GetVariableGroup.
[Preview API] Get a variable group.
:param str project: Project ID or project name
:param int group_id: Id of the variable group.
:rtype: :class:`<VariableGroup> <azure.devops.v7_1.task_agent.models.VariableGroup>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if group_id is not None:
route_values['groupId'] = self._serialize.url('group_id', group_id, 'int')
response = self._send(http_method='GET',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='7.1-preview.2',
route_values=route_values)
return self._deserialize('VariableGroup', response)
def get_variable_groups(self, project, group_name=None, action_filter=None, top=None, continuation_token=None, query_order=None):
"""GetVariableGroups.
[Preview API] Get variable groups.
:param str project: Project ID or project name
:param str group_name: Name of variable group.
:param str action_filter: Action filter for the variable group. It specifies the action which can be performed on the variable groups.
:param int top: Number of variable groups to get.
:param int continuation_token: Gets the variable groups after the continuation token provided.
:param str query_order: Gets the results in the defined order. Default is 'IdDescending'.
:rtype: [VariableGroup]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if group_name is not None:
query_parameters['groupName'] = self._serialize.query('group_name', group_name, 'str')
if action_filter is not None:
query_parameters['actionFilter'] = self._serialize.query('action_filter', action_filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'int')
if query_order is not None:
query_parameters['queryOrder'] = self._serialize.query('query_order', query_order, 'str')
response = self._send(http_method='GET',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[VariableGroup]', self._unwrap_collection(response))
def get_variable_groups_by_id(self, project, group_ids):
"""GetVariableGroupsById.
[Preview API] Get variable groups by ids.
:param str project: Project ID or project name
:param [int] group_ids: Comma separated list of Ids of variable groups.
:rtype: [VariableGroup]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if group_ids is not None:
group_ids = ",".join(map(str, group_ids))
query_parameters['groupIds'] = self._serialize.query('group_ids', group_ids, 'str')
response = self._send(http_method='GET',
location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc',
version='7.1-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[VariableGroup]', self._unwrap_collection(response))
def get_yaml_schema(self, validate_task_names=None):
"""GetYamlSchema.
[Preview API] GET the Yaml schema used for Yaml file validation.
:param bool validate_task_names: Whether the schema should validate that tasks are actually installed (useful for offline tools where you don't want validation).
:rtype: object
"""
query_parameters = {}
if validate_task_names is not None:
query_parameters['validateTaskNames'] = self._serialize.query('validate_task_names', validate_task_names, 'bool')
response = self._send(http_method='GET',
location_id='1f9990b9-1dba-441f-9c2e-6485888c42b6',
version='7.1-preview.1',
query_parameters=query_parameters)
return self._deserialize('object', response)
|
azure-devops-python-api/azure-devops/azure/devops/v7_1/task_agent/task_agent_client.py/0
|
{
"file_path": "azure-devops-python-api/azure-devops/azure/devops/v7_1/task_agent/task_agent_client.py",
"repo_id": "azure-devops-python-api",
"token_count": 30516
}
| 357 |

# Azure Quantum #
[/_apis/build/status/microsoft.qdk-python?branchName=main)](https://dev.azure.com/ms-quantum-public/Microsoft%20Quantum%20(public)/_build/latest?definitionId=32&branchName=main) [](https://badge.fury.io/py/azure-quantum)
Azure Quantum is Microsoft's cloud service for running Quantum Computing programs and circuits with our quantum partners and technologies. The `azure-quantum` package for Python provides functionality for interacting with Azure Quantum workspaces, including creating jobs, listing jobs, and retrieving job results. For more information, view the [Azure Quantum Documentation](https://learn.microsoft.com/en-us/azure/quantum/).
This package supports submitting quantum programs or circuits written with Python. To submit quantum programs written with Q#, Microsoft's Domain-specific language for Quantum Programming, view [Submit Q# Jobs to Azure Quantum](https://learn.microsoft.com/azure/quantum/how-to-submit-jobs).
## Installation ##
The package is released on PyPI and can be installed via `pip`:
```bash
pip install azure-quantum
```
To use `azure-quantum` for submitting quantum circuits expressed with [Qiskit](https://pypi.org/project/qiskit), install with optional dependencies:
```bash
pip install azure-quantum[qiskit]
```
To use `azure-quantum` for submitting quantum circuits expressed with [Cirq](https://pypi.org/project/cirq), install with optional dependencies:
```bash
pip install azure-quantum[cirq]
```
## Getting started and Quickstart guides ##
To work in Azure Quantum, you need an Azure subscription. If you don't have an Azure subscription, create a [free account](https://azure.microsoft.com/free/). Follow the [Create an Azure Quantum workspace](https://learn.microsoft.com/azure/quantum/how-to-create-workspace) how-to guide to set up your Workspace and enable your preferred providers.
To get started, visit the following Quickstart guides:
- [Quickstart: Submit a circuit with Qiskit](https://learn.microsoft.com/azure/quantum/quickstart-microsoft-qiskit)
- [Quickstart: Submit a circuit with Cirq](https://learn.microsoft.com/azure/quantum/quickstart-microsoft-qiskit)
- [Quickstart: Submit a circuit with a provider-specific format](https://learn.microsoft.com/azure/quantum/quickstart-microsoft-provider-format).
## General usage ##
To connect to your Azure Quantum Workspace, go to the [Azure Portal](https://portal.azure.com), navigate to your Workspace and copy-paste the resource ID and location into the code snippet below.
```python
from azure.quantum import Workspace
# Enter your Workspace details (resource ID and location) below
workspace = Workspace(
resource_id="",
location=""
)
```
### List all targets ###
To list all targets that are available to your workspace, run
```python
workspace.get_targets()
```
### Submit a quantum program or circuit ###
First, define a quantum program or circuit, and create a job by submitting it to one of the available targets:
```python
# Enter target name below
target = workspace.get_targets("mytarget")
# Submit quantum program or circuit
job = target.submit(my_quantum_program)
# Wait for job to complete and fetch results
result = job.get_results()
```
## Examples ##
You can find example Python scripts that use the Azure Quantum Python API in the [examples](https://github.com/microsoft/qdk-python/tree/main/azure-quantum/examples) directory.
## Contributing ##
For details on contributing to this package, see the [contributing guide](https://github.com/microsoft/qdk-python/blob/main/CONTRIBUTING.md).
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
## Support ##
If you run into any problems or bugs using this package, please head over to the [issues](https://github.com/microsoft/qdk-python/issues) page and open a new issue, if it does not already exist.
|
azure-quantum-python/azure-quantum/README.md/0
|
{
"file_path": "azure-quantum-python/azure-quantum/README.md",
"repo_id": "azure-quantum-python",
"token_count": 1362
}
| 358 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import JobsOperations
from ._operations import ProvidersOperations
from ._operations import StorageOperations
from ._operations import QuotasOperations
from ._operations import SessionsOperations
from ._operations import TopLevelItemsOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"JobsOperations",
"ProvidersOperations",
"StorageOperations",
"QuotasOperations",
"SessionsOperations",
"TopLevelItemsOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
azure-quantum-python/azure-quantum/azure/quantum/_client/operations/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/_client/operations/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 291
}
| 359 |
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
"""Defines Azure Quantum job model"""
from azure.quantum._client.models import JobDetails
from .base_job import BaseJob
from .filtered_job import FilteredJob
from .job import Job, ContentType
from .job_failed_with_results_error import JobFailedWithResultsError
from .workspace_item import WorkspaceItem
from .workspace_item_factory import WorkspaceItemFactory
from .session import Session, SessionHost, SessionDetails, SessionStatus, SessionJobFailurePolicy
__all__ = [
"Job",
"JobDetails",
"ContentType",
"BaseJob",
"FilteredJob",
"WorkspaceItem",
"Session",
"SessionHost",
"SessionDetails",
"SessionStatus",
"SessionJobFailurePolicy",
"JobFailedWithResultsError"
]
|
azure-quantum-python/azure-quantum/azure/quantum/job/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/job/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 253
}
| 360 |
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
from collections import defaultdict
from typing import Any, Dict, List, Union
import numpy as np
try:
from qiskit.providers import JobV1, JobStatus
from qiskit.result import Result
except ImportError:
raise ImportError(
"Missing optional 'qiskit' dependencies. \
To install run: pip install azure-quantum[qiskit]"
)
import ast
import json
import re
from azure.quantum import Job
from azure.quantum.qiskit.results.resource_estimator import make_estimator_result
import logging
logger = logging.getLogger(__name__)
AzureJobStatusMap = {
"Succeeded": JobStatus.DONE,
"Waiting": JobStatus.QUEUED,
"Executing": JobStatus.RUNNING,
"Failed": JobStatus.ERROR,
"Cancelled": JobStatus.CANCELLED,
"Finishing": JobStatus.RUNNING
}
# Constants for output data format:
MICROSOFT_OUTPUT_DATA_FORMAT = "microsoft.quantum-results.v1"
MICROSOFT_OUTPUT_DATA_FORMAT_V2 = "microsoft.quantum-results.v2"
IONQ_OUTPUT_DATA_FORMAT = "ionq.quantum-results.v1"
QUANTINUUM_OUTPUT_DATA_FORMAT = "honeywell.quantum-results.v1"
RESOURCE_ESTIMATOR_OUTPUT_DATA_FORMAT = "microsoft.resource-estimates.v1"
class AzureQuantumJob(JobV1):
def __init__(
self,
backend,
azure_job=None,
**kwargs
) -> None:
"""
A Job running on Azure Quantum
"""
if azure_job is None:
azure_job = Job.from_input_data(
workspace=backend.provider().get_workspace(),
session_id=backend.get_latest_session_id(),
**kwargs
)
self._azure_job = azure_job
self._workspace = backend.provider().get_workspace()
super().__init__(backend, self._azure_job.id, **kwargs)
def job_id(self):
""" This job's id."""
return self._azure_job.id
def id(self):
""" This job's id."""
return self._azure_job.id
def refresh(self):
""" Refreshes the job metadata from the server."""
return self._azure_job.refresh()
def submit(self):
""" Submits the job for execution. """
self._azure_job.submit()
return
def result(self, timeout=None, sampler_seed=None):
"""Return the results of the job."""
self._azure_job.wait_until_completed(timeout_secs=timeout)
success = self._azure_job.details.status == "Succeeded"
results = self._format_results(sampler_seed=sampler_seed)
result_dict = {
"results" : results if isinstance(results, list) else [results],
"job_id" : self._azure_job.details.id,
"backend_name" : self._backend.name(),
"backend_version" : self._backend.version,
"qobj_id" : self._azure_job.details.name,
"success" : success,
"error_data" : None if self._azure_job.details.error_data is None else self._azure_job.details.error_data.as_dict()
}
if self._azure_job.details.output_data_format == RESOURCE_ESTIMATOR_OUTPUT_DATA_FORMAT:
return make_estimator_result(result_dict)
else:
return Result.from_dict(result_dict)
def cancel(self):
"""Attempt to cancel the job."""
self._workspace.cancel_job(self._azure_job)
def status(self):
"""Return the status of the job, among the values of ``JobStatus``."""
self._azure_job.refresh()
status = AzureJobStatusMap[self._azure_job.details.status]
return status
def queue_position(self):
"""Return the position of the job in the queue. Currently not supported."""
return None
def _shots_count(self):
# Some providers use 'count', some other 'shots', give preference to 'count':
input_params = self._azure_job.details.input_params
options = self.backend().options
shots = \
input_params["count"] if "count" in input_params else \
input_params["shots"] if "shots" in input_params else \
options.get("count") if "count" in vars(options) else \
options.get("shots")
return shots
def _format_results(self, sampler_seed=None) -> Union[List[Dict[str, Any]], Dict[str, Any]]:
""" Populates the results datastructures in a format that is compatible with qiskit libraries. """
if (self._azure_job.details.output_data_format == MICROSOFT_OUTPUT_DATA_FORMAT_V2):
return self._format_microsoft_v2_results()
success = self._azure_job.details.status == "Succeeded"
job_result = {
"data": {},
"success": success,
"header": {},
}
if success:
if (self._azure_job.details.output_data_format == MICROSOFT_OUTPUT_DATA_FORMAT):
job_result["data"] = self._format_microsoft_results(sampler_seed=sampler_seed)
elif (self._azure_job.details.output_data_format == IONQ_OUTPUT_DATA_FORMAT):
job_result["data"] = self._format_ionq_results(sampler_seed=sampler_seed)
elif (self._azure_job.details.output_data_format == QUANTINUUM_OUTPUT_DATA_FORMAT):
job_result["data"] = self._format_quantinuum_results()
else:
job_result["data"] = self._format_unknown_results()
job_result["header"] = self._azure_job.details.metadata
if "metadata" in job_result["header"]:
job_result["header"]["metadata"] = json.loads(job_result["header"]["metadata"])
job_result["shots"] = self._shots_count()
return job_result
def _draw_random_sample(self, sampler_seed, probabilities, shots):
_norm = sum(probabilities.values())
if _norm != 1:
if np.isclose(_norm, 1.0, rtol=1e-4):
probabilities = {k: v/_norm for k, v in probabilities.items()}
else:
raise ValueError(f"Probabilities do not add up to 1: {probabilities}")
if not sampler_seed:
import hashlib
id = self.job_id()
sampler_seed = int(hashlib.sha256(id.encode('utf-8')).hexdigest(), 16) % (2**32 - 1)
rand = np.random.RandomState(sampler_seed)
rand_values = rand.choice(list(probabilities.keys()), shots, p=list(probabilities.values()))
return dict(zip(*np.unique(rand_values, return_counts=True)))
@staticmethod
def _to_bitstring(k, num_qubits, meas_map):
# flip bitstring to convert to little Endian
bitstring = format(int(k), f"0{num_qubits}b")[::-1]
# flip bitstring to convert back to big Endian
return "".join([bitstring[n] for n in meas_map])[::-1]
def _format_ionq_results(self, sampler_seed=None):
""" Translate IonQ's histogram data into a format that can be consumed by qiskit libraries. """
az_result = self._azure_job.get_results()
shots = self._shots_count()
if "num_qubits" not in self._azure_job.details.metadata:
raise ValueError(f"Job with ID {self.id()} does not have the required metadata (num_qubits) to format IonQ results.")
meas_map = json.loads(self._azure_job.details.metadata.get("meas_map")) if "meas_map" in self._azure_job.details.metadata else None
num_qubits = self._azure_job.details.metadata.get("num_qubits")
if not 'histogram' in az_result:
raise "Histogram missing from IonQ Job results"
counts = defaultdict(int)
probabilities = defaultdict(int)
for key, value in az_result['histogram'].items():
bitstring = self._to_bitstring(key, num_qubits, meas_map) if meas_map else key
probabilities[bitstring] += value
if self.backend().configuration().simulator:
counts = self._draw_random_sample(sampler_seed, probabilities, shots)
else:
counts = {bitstring: np.round(shots * value) for bitstring, value in probabilities.items()}
return {"counts": counts, "probabilities": probabilities}
@staticmethod
def _qir_to_qiskit_bitstring(obj):
"""Convert the data structure from Azure into the "schema" used by Qiskit """
if isinstance(obj, str) and not re.match(r"[\d\s]+$", obj):
obj = ast.literal_eval(obj)
if isinstance(obj, tuple):
# the outermost implied container is a tuple, and each item is
# associated with a classical register. Azure and Qiskit order the
# registers in opposite directions, so reverse here to match.
return " ".join([AzureQuantumJob._qir_to_qiskit_bitstring(term) for term in reversed(obj)])
elif isinstance(obj, list):
# a list is for an individual classical register
return "".join([str(bit) for bit in obj])
else:
return str(obj)
def _format_microsoft_results(self, sampler_seed=None):
""" Translate Microsoft's job results histogram into a format that can be consumed by qiskit libraries. """
histogram = self._azure_job.get_results()
shots = self._shots_count()
counts = {}
probabilities = {}
for key in histogram.keys():
bitstring = AzureQuantumJob._qir_to_qiskit_bitstring(key)
value = histogram[key]
probabilities[bitstring] = value
if self.backend().configuration().simulator:
counts = self._draw_random_sample(sampler_seed, probabilities, shots)
else:
counts = {bitstring: np.round(shots * value) for bitstring, value in probabilities.items()}
return {"counts": counts, "probabilities": probabilities}
def _format_quantinuum_results(self):
""" Translate Quantinuum's histogram data into a format that can be consumed by qiskit libraries. """
az_result = self._azure_job.get_results()
all_bitstrings = [
bitstrings for classical_register, bitstrings
in az_result.items() if classical_register != "access_token"
]
counts = {}
combined_bitstrings = ["".join(bitstrings) for bitstrings in zip(*all_bitstrings)]
shots = len(combined_bitstrings)
for bitstring in set(combined_bitstrings):
counts[bitstring] = combined_bitstrings.count(bitstring)
histogram = {bitstring: count/shots for bitstring, count in counts.items()}
return {"counts": counts, "probabilities": histogram}
def _format_unknown_results(self):
""" This method is called to format Job results data when the job output is in an unknown format."""
az_result = self._azure_job.get_results()
return az_result
def _translate_microsoft_v2_results(self):
""" Translate Microsoft's batching job results histograms into a format that can be consumed by qiskit libraries. """
az_result = self._azure_job.get_results()
if not "DataFormat" in az_result:
raise ValueError("DataFormat missing from Job results")
if not "Results" in az_result:
raise ValueError("Results missing from Job results")
histograms = []
results = az_result["Results"]
for circuit_results in results:
counts = {}
probabilities = {}
if not "TotalCount" in circuit_results:
raise ValueError("TotalCount missing from Job results")
total_count = circuit_results["TotalCount"]
if total_count <= 0:
raise ValueError("TotalCount must be a positive non-zero integer")
if not "Histogram" in circuit_results:
raise ValueError("Histogram missing from Job results")
histogram = circuit_results["Histogram"]
for result in histogram:
if not "Display" in result:
raise ValueError("Dispaly missing from histogram result")
if not "Count" in result:
raise ValueError("Count missing from histogram result")
bitstring = AzureQuantumJob._qir_to_qiskit_bitstring(result["Display"])
count = result["Count"]
probability = count / total_count
counts[bitstring] = count
probabilities[bitstring] = probability
histograms.append((total_count, {"counts": counts, "probabilities": probabilities}))
return histograms
def _get_entry_point_names(self):
input_params = self._azure_job.details.input_params
# All V2 output is a list of entry points
entry_points = input_params["items"]
entry_point_names = []
for entry_point in entry_points:
if not "entryPoint" in entry_point:
raise ValueError("Entry point input_param is missing an 'entryPoint' field")
entry_point_names.append(entry_point["entryPoint"])
return entry_point_names if len(entry_point_names) > 0 else ["main"]
def _format_microsoft_v2_results(self) -> List[Dict[str, Any]]:
success = self._azure_job.details.status == "Succeeded"
if not success:
return [{
"data": {},
"success": False,
"header": {},
"shots": 0,
}]
entry_point_names = self._get_entry_point_names()
results = self._translate_microsoft_v2_results()
if len(results) != len(entry_point_names):
raise ValueError("The number of experiment results does not match the number of experiment names")
status = self.status()
return [{
"data": result,
"success": success,
"shots": total_count,
"name": name,
"status": status,
"header": {
"name": name
}
} for name, (total_count, result) in zip(entry_point_names, results)]
|
azure-quantum-python/azure-quantum/azure/quantum/qiskit/job.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/qiskit/job.py",
"repo_id": "azure-quantum-python",
"token_count": 5979
}
| 361 |
"""Defines targets and helper functions for the Pasqal provider"""
##
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
##
__all__ = [
"InputParams",
"Result",
"Pasqal",
"PasqalTarget",
]
from .result import Result
from .target import InputParams, Pasqal, PasqalTarget
|
azure-quantum-python/azure-quantum/azure/quantum/target/pasqal/__init__.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/azure/quantum/target/pasqal/__init__.py",
"repo_id": "azure-quantum-python",
"token_count": 111
}
| 362 |
# Resource estimator examples
This directory contains several standalone Python scripts that use the Azure
Quantum Resource Estimator through the `azure-quantum` Python API.
## Prerequisites
These scripts require access to an Azure Quantum workspace. Read [our
documentation](https://learn.microsoft.com/azure/quantum/how-to-create-workspace)
to learn how to set up an Azure Quantum workspace. Once the Azure Quantum
workspace is created, you can retrieve the _resource id_ and _location_ from
the _Overview_ page of your workspace.
Also, you need to install the `azure-quantum` Python package:
```shell
python -m pip install azure-quantum
```
## Example scripts
* **[cli.py](https://github.com/microsoft/qdk-python/blob/main/azure-quantum/examples/resource_estimation/cli.py): A resource estimation CLI that can execute resource estimation jobs from various input formats and generate JSON output.**
The input type is determined by file extension:
* `.qs`: Q# snippet (without `namespace` declaration)
* `.qasm`: OpenQASM file
* `.ll`: QIR in ASCII format
* `.qir`, `.bc`: QIR bitcode
Usage:
Resource estimation from an OpenQASM file:
```shell
python cli.py -r "resource id" -l "location" cli_test_files/rqft_multiplier.qasm
```
Resource estimation from a Q# file with job parameters:
```shell
python cli.py -r "resource id" -l "location" cli_test_files/multiplier.qs \
-p cli_test_files/multiplier.json
```
Writing output into JSON file:
```shell
python cli.py -r "resource id" -l "location" cli_test_files/multiplier.qs \
-p cli_test_files/multiplier.json \
-o output.json
```
* **[rsa.py](https://github.com/microsoft/qdk-python/blob/main/azure-quantum/examples/resource_estimation/rsa.py): Physical resource estimation for RSA using a pre-compiled QIR code.**
You can change the parameters to the factoring algorithm, e.g., the prime product, inside the code.
Usage:
```shell
python rsa.py -r "resource_id" -l "location"
```
* **[ecc.py](https://github.com/microsoft/qdk-python/blob/main/azure-quantum/examples/resource_estimation/ecc.py): Physical resource estimation for Elliptic Curve Cryptography starting from logical resource estimates.**
The possible key sizes are 256, 384, and 521.
Usage:
```shell
python ecc.py -k 256 -r "resource_id" -l "location"
```
|
azure-quantum-python/azure-quantum/examples/resource_estimation/README.md/0
|
{
"file_path": "azure-quantum-python/azure-quantum/examples/resource_estimation/README.md",
"repo_id": "azure-quantum-python",
"token_count": 755
}
| 363 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<#
.SYNOPSIS
Test: Run unit tests for given packages/environments
#>
param (
[bool] $SkipInstall
)
# For debug, print all relevant environment variables:
Get-ChildItem env:AZURE*, env:*VERSION, env:*OUTDIR | Format-Table | Out-String | Write-Host
$PackageDir = Split-Path -parent $PSScriptRoot;
$PackageName = $PackageDir | Split-Path -Leaf;
$RootDir = Split-Path -parent $PackageDir;
Import-Module (Join-Path $RootDir "build" "conda-utils.psm1");
Import-Module (Join-Path $RootDir "build" "package-utils.psm1");
if ($True -eq $SkipInstall) {
Write-Host "##[info]Skipping install."
} else {
& (Join-Path $PSScriptRoot Install-Artifacts.ps1)
}
Enable-Conda
# Try activating the azurequantum conda environment
if ([string]::IsNullOrEmpty($PackageName) -or ($PackageName -eq "azure-quantum")) {
try {
$EnvExists = conda env list | Select-String -Pattern "azurequantum " | Measure-Object | Select-Object -Exp Count
if ($EnvExists) {
conda activate azurequantum
}
}
catch {
Write-Host "##[warning]Failed to active conda environment."
}
}
$EnvName = GetEnvName -PackageName $PackageName
Use-CondaEnv $EnvName
function PyTestMarkExpr() {
param (
[string[]] $AzureQuantumCapabilities
)
$MarkExpr = "live_test";
if ($AzureQuantumCapabilities -notcontains "submit.ionq") {
$MarkExpr += " and not ionq"
}
if ($AzureQuantumCapabilities -notcontains "submit.rigetti") {
$MarkExpr += " and not rigetti"
}
if ($AzureQuantumCapabilities -notcontains "submit.pasqal") {
$MarkExpr += " and not pasqal"
}
if ($AzureQuantumCapabilities -notcontains "submit.quantinuum") {
$MarkExpr += " and not quantinuum"
}
if ($AzureQuantumCapabilities -notcontains "submit.microsoft-qc") {
$MarkExpr += " and not microsoft_qc"
}
if ($AzureQuantumCapabilities -notcontains "submit.microsoft-elements") {
$MarkExpr += " and not microsoft_elements_dft"
}
return $MarkExpr
}
if (Test-Path Env:AZURE_QUANTUM_CAPABILITIES) {
Write-Host "##[info]Using AZURE_QUANTUM_CAPABILITIES env variable: $Env:AZURE_QUANTUM_CAPABILITIES"
$AzureQuantumCapabilities = $Env:AZURE_QUANTUM_CAPABILITIES -Split ";" | ForEach-Object { $_.trim().ToLower() }
# Create marks based on capabilities in test environment
$MarkExpr = PyTestMarkExpr -AzureQuantumCapabilities $AzureQuantumCapabilities;
} else {
Write-Host "##[info]Missing AZURE_QUANTUM_CAPABILITIES env variable. Will run all live tests."
$MarkExpr = "live_test"
}
pip install pytest | Write-Host
$logs = Join-Path $env:BUILD_ARTIFACTSTAGINGDIRECTORY "logs" "qdk-python.txt"
" ==> Generating logs to $logs" | Write-Host
# Copy unit tests without recordings and run Pytest
Write-Host "##[info]Copy unit test files from $PackageDir to $PSScriptRoot"
Copy-Item -Path (Join-Path $PackageDir "tests" "unit" "*.py") -Destination $PSScriptRoot
if ($PackageDir -Match "azure-quantum") {
Write-Host "##[info]Copy auxiliary bitcode files from $PackageDir to $PSScriptRoot/qir"
New-Item -ItemType Directory -Path $PSScriptRoot -Name qir
# Copies auxiliary bitcode files that are used by unit tests in azure_quantum
Copy-Item -Path (Join-Path $PackageDir "tests" "unit" "qir" "*.bc") -Destination (Join-Path $PSScriptRoot "qir")
Write-Host "##[info]Copy auxiliary Q# test files from $PackageDir to $PSScriptRoot"
Copy-Item -Path (Join-Path $PackageDir "tests" "unit" "*.qs") -Destination $PSScriptRoot
Write-Host "##[info]Copy auxiliary DFT test files from $PackageDir to $PSScriptRoot"
Copy-Item -Path (Join-Path $PackageDir "tests" "unit" "*.xyz") -Destination $PSScriptRoot
}
python -m pytest -v `
--junitxml=junit/test-results.xml `
--log-level=INFO `
--log-file-format="%(asctime)s %(levelname)s %(message)s" `
--log-file=$logs `
-m $MarkExpr
|
azure-quantum-python/azure-quantum/tests.live/Run.ps1/0
|
{
"file_path": "azure-quantum-python/azure-quantum/tests.live/Run.ps1",
"repo_id": "azure-quantum-python",
"token_count": 1514
}
| 364 |
##
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
import abc
import pytest
import unittest
from typing import TYPE_CHECKING, Protocol, Tuple, runtime_checkable
from import_qsharp import skip_if_no_qsharp
if TYPE_CHECKING:
import cirq
import qiskit
@runtime_checkable
class QirInputData(Protocol):
_name: str
@abc.abstractmethod
def _repr_qir_(self, **kwargs) -> bytes:
raise NotImplementedError
@abc.abstractmethod
def __str__(self) -> str:
raise NotImplementedError
class JobPayloadFactory():
@staticmethod
def get_cirq_circuit_bell_state() -> "cirq.Circuit":
import cirq
q0 = cirq.LineQubit(0)
q1 = cirq.LineQubit(1)
circuit = cirq.Circuit(
cirq.H(q0),
cirq.CNOT(q0, q1),
cirq.measure(q0, key='q0'),
cirq.measure(q1, key='q1')
)
return circuit
@staticmethod
def get_qsharp_inline_code_bell_state() -> Tuple[str, str]:
return ("""
open Microsoft.Quantum.Intrinsic;
operation BellState_Inline() : (Result,Result) {
use q0 = Qubit();
use q1 = Qubit();
H(q0);
CNOT(q0, q1);
return (M(q0), M(q1));
}
""", "BellState_Inline()")
qsharp_inline_callable_bell_state: QirInputData = None
@staticmethod
def get_qsharp_inline_callable_bell_state() -> QirInputData:
if not JobPayloadFactory.qsharp_inline_callable_bell_state:
(qsharp_code, entrypoint) = JobPayloadFactory.get_qsharp_inline_code_bell_state()
import qsharp
qsharp.eval(qsharp_code)
JobPayloadFactory.qsharp_inline_callable_bell_state = qsharp.compile(entrypoint)
return JobPayloadFactory.qsharp_inline_callable_bell_state
@staticmethod
def get_qsharp_inline_qir_bitcode_bell_state() -> bytes:
qirInputData = JobPayloadFactory.get_qsharp_inline_callable_bell_state()
qir_bitcode = qirInputData._repr_qir_()
return qir_bitcode
qsharp_file_callable_bell_state: QirInputData = None
@staticmethod
def get_qsharp_file_callable_bell_state() -> QirInputData:
if not JobPayloadFactory.qsharp_file_callable_bell_state:
import qsharp
with open('QSharpBellState.qs') as file:
qsharp.eval(file.read())
JobPayloadFactory.qsharp_file_callable_bell_state = qsharp.compile("QSharpBellState.BellState_File()")
return JobPayloadFactory.qsharp_file_callable_bell_state
qsharp_file_qir_bitcode_bell_state: bytes = None
@staticmethod
def get_qsharp_file_qir_bitcode_bell_state() -> bytes:
if not JobPayloadFactory.qsharp_file_qir_bitcode_bell_state:
qirInputData = JobPayloadFactory.get_qsharp_file_callable_bell_state()
qir_bitcode = qirInputData._repr_qir_()
JobPayloadFactory.qsharp_file_qir_bitcode_bell_state = qir_bitcode
return JobPayloadFactory.qsharp_file_qir_bitcode_bell_state
@staticmethod
def get_qiskit_circuit_bell_state() -> "qiskit.QuantumCircuit":
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2, 2)
circuit.name = "BellState"
circuit.h(0)
circuit.cx(0, 1)
circuit.measure([0, 1], [0, 1])
return circuit
class TestJobPayloadFactory(unittest.TestCase):
@pytest.mark.cirq
def test_get_cirq_circuit_bell_state(self):
import cirq
self.assertIsInstance(JobPayloadFactory.get_cirq_circuit_bell_state(), cirq.Circuit)
@pytest.mark.qiskit
def test_get_qiskit_circuit_bell_state(self):
import qiskit
self.assertIsInstance(JobPayloadFactory.get_qiskit_circuit_bell_state(), qiskit.QuantumCircuit)
@pytest.mark.qsharp
@skip_if_no_qsharp
def test_get_qsharp_inline_callable_bell_state(self):
result = JobPayloadFactory.get_qsharp_inline_callable_bell_state()
self.assertIsInstance(result, QirInputData)
@pytest.mark.qsharp
@pytest.mark.qir
@skip_if_no_qsharp
def test_get_qsharp_inline_qir_bell_state(self):
result = JobPayloadFactory.get_qsharp_inline_qir_bitcode_bell_state()
self.assertIsInstance(result, bytes)
@pytest.mark.qsharp
@skip_if_no_qsharp
def test_get_qsharp_file_callable_bell_state(self):
result = JobPayloadFactory.get_qsharp_file_callable_bell_state()
self.assertIsInstance(result, QirInputData)
@pytest.mark.qsharp
@pytest.mark.qir
@skip_if_no_qsharp
def test_get_qsharp_file_qir_bell_state(self):
result = JobPayloadFactory.get_qsharp_file_qir_bitcode_bell_state()
self.assertIsInstance(result, bytes)
|
azure-quantum-python/azure-quantum/tests/unit/test_job_payload_factory.py/0
|
{
"file_path": "azure-quantum-python/azure-quantum/tests/unit/test_job_payload_factory.py",
"repo_id": "azure-quantum-python",
"token_count": 2155
}
| 365 |
<jupyter_start><jupyter_text>👋🌍 Hello, world: Submit a Q job to IonQIn this notebook, we'll review the basics of Azure Quantum by submitting a simple *job*, or quantum program, to [IonQ](https://ionq.com/). We will use [Q](https://learn.microsoft.com/azure/quantum/user-guide/) to express the quantum job. Submit a simple job to IonQ using Azure QuantumAzure Quantum provides several ways to express quantum programs. In this example we are using Q, but note that Qiskit and Cirq are also supported. All code in this example will be written in Python and Q.Let's begin. When you see a code block, hover over it and click the triangle play-button to execute it. To avoid any compilation issues, this should be done in order from top to bottom. 1. Connect to the Azure Quantum workspaceTo connect to the Azure Quantum service, initialize the `Workspace` as seen below.<jupyter_code>from azure.quantum import Workspace
workspace = Workspace (
resource_id = "",
location = ""
)<jupyter_output><empty_output><jupyter_text>We can use the resulting object to see which _targets_ are available for submission.<jupyter_code>print("This workspace's targets:")
for target in workspace.get_targets():
print("-", target.name)<jupyter_output><empty_output><jupyter_text>❕ Do you see `ionq.simulator` in your list of targets? If so, you're ready to keep going.Don't see it? You may need to add IonQ to your workspace to run this sample. Navigate to the **Providers** page in the portal and click **+Add** to add the IonQ provider. Don't worry, there's a free credits plan available. IonQ: The quantum providerAzure Quantum partners with third-party companies to deliver solutions to quantum jobs. These company offerings are called *providers*. Each provider can offer multiple *targets* with different capabilities. See the table below for IonQ's targets.| Target name | Target ID | Number of qubits | Description || --- | --- | --- | --- || Quantum simulator | `ionq.simulator` | 29 qubits | IonQ's cloud-based idealized simulator. Free of cost. || Aria 1 | `ionq.qpu.aria-1` | 23 qubits | IonQ's Aria 1 trapped-ion quantum computer. This is real quantum hardware, not a simulation. || Quantum computer | `ionq.qpu` | 11 qubits | IonQ's trapped-ion quantum computer. This is real quantum hardware, not a simulation. |For this example, we will use `ionq.simulator`. To learn more about IonQ's targets, check out our [documentation](https://learn.microsoft.com/azure/quantum/provider-ionq). 2. Build the quantum programLet's create a simple Q program to run.First, let's initialize the Q environment and set the target profile to Base Profile. Today, Azure Quantum targets only support the Base Profile, a subset of all Q commands.<jupyter_code>import qsharp
qsharp.init(target_profile=qsharp.TargetProfile.Base)
%%qsharp
open Microsoft.Quantum.Measurement;
open Microsoft.Quantum.Arrays;
open Microsoft.Quantum.Convert;
operation GenerateRandomBit() : Result {
use target = Qubit();
// Apply an H-gate and measure.
H(target);
return M(target);
}
# Compile the qsharp operation
operation = qsharp.compile("GenerateRandomBit()")<jupyter_output><empty_output><jupyter_text>The program you built is a simple quantum random bit generator. With IonQ's idealized simulator, we will be able to calculate the probability of measuring a `1` or `0`. 3. Submit the quantum program to IonQWe will use the `target.submit` function to run the quantum program above on IonQ's `ionq.simulator` target. This may take a minute or so ⏳. Your job will be packaged and sent to IonQ, where it will wait its turn to be run.<jupyter_code># Set the target to ionq.simulator
target = workspace.get_targets("ionq.simulator")
# Execute the job. We'll use 100 shots (simulated runs).
job = target.submit(operation, "Generate one random bit", shots=100)
print("Job Id:" + job.id)
result = job.get_results()<jupyter_output><empty_output><jupyter_text>The job ID can be used to retrieve the results later using the [get_job method](https://learn.microsoft.com/python/azure-quantum/azure.quantum.workspace?azure-quantum-workspace-get-job) or by viewing it under the **Job management** section of the portal. 4. Visualize job results You can also view a histogram of the results using [`pyplot`](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html):<jupyter_code>from matplotlib import pyplot
pyplot.bar(result.keys(), result.values())
pyplot.title("Result")
pyplot.xlabel("Measurement")
pyplot.ylabel("Probability")
pyplot.show()<jupyter_output><empty_output>
|
azure-quantum-python/samples/hello-world/HW-ionq-qsharp.ipynb/0
|
{
"file_path": "azure-quantum-python/samples/hello-world/HW-ionq-qsharp.ipynb",
"repo_id": "azure-quantum-python",
"token_count": 1311
}
| 366 |
<jupyter_start><jupyter_text>Introduction to SessionsIn this notebook, we'll get used to working with sessions in Azure Quantum by using a session to run multiple Qiskit jobs on a target. What is a session?A session is a logical grouping of one or more jobs submitted to a single target (backend). Each session has a unique ID that is attached to all jobs in that session. While sessions can be used broadly in Azure Quantum, they are particularly helpful for complex hybrid algorithms where classical code is interwoven with a large number of quantum jobs. With sessions you can focus on your algorithm as a whole instead of managing individual quantum jobs.We'll use Qiskit to submit jobs to the IonQ simulator backend for this example, but you can use sessions with Q + Python and Cirq, and variety of backends. See the [sessions documentation](https://aka.ms/AQ/Hybrid/Sessions/Docs) for details. 1. Connect to Azure Quantum and build a quantum circuitBefore we create a session, we'll construct an instance of the `AzureQuantumProvider` and select a backend.<jupyter_code># Connect to the Azure Quantum workspace
from qiskit import QuantumCircuit
from azure.quantum import Workspace
from azure.quantum.qiskit import AzureQuantumProvider
workspace = Workspace(
resource_id = "",
location = "",
)
provider = AzureQuantumProvider(workspace)
ionq_sim = provider.get_backend('ionq.simulator')
quantinuum_sim = provider.get_backend('quantinuum.sim.h1-1e')
rigetti_sim = provider.get_backend('rigetti.sim.qvm')
# Set the backend you want to use here.
# WARNING: Quantinuum simulator usage is not unlimited. Running this sample against it could consume a significant amount of your eHQC quota.
backend = ionq_sim<jupyter_output><empty_output><jupyter_text>For this example, we'll create a simple 2-qubit circuit that we will run multiple times in a session.<jupyter_code># Create a quantum circuit acting on two qubits
circuit = QuantumCircuit(2, 2)
circuit.name = "GenerateRandomBit"
circuit.h(0)
circuit.cx(0,1)
circuit.measure([0,1], [0,1])
# Print out the circuit
circuit.draw()<jupyter_output><empty_output><jupyter_text>2. Run quantum jobs in a sessionNow it's time to create a session! We open a session using a ```with``` statement so the session is automatically closed upon completion of our program. Then we run as many individual quantum jobs within the session as we need to, in this case we'll submit our circuit three times. You can also add classical code between individual quantum job submissions within the session.<jupyter_code>with backend.open_session(name="Qiskit Session") as session:
job1 = backend.run(circuit=circuit, shots=100, job_name="Job 1") # First job submission
# Classical code could go here
job2 = backend.run(circuit=circuit, shots=100, job_name="Job 2") # Second job submission
# Classical code could go here
job3 = backend.run(circuit=circuit, shots=100, job_name="Job 3") # Third job submission<jupyter_output><empty_output><jupyter_text>And that's it! You've run your first program with sessions! 🥳🎉🎊Running our simple program within a session lets us better organize and manage the jobs. For example, we can list all jobs in the session:<jupyter_code>session_jobs = session.list_jobs()
[session_job.details.name for session_job in session_jobs]<jupyter_output><empty_output>
|
azure-quantum-python/samples/sessions/introduction-to-sessions.ipynb/0
|
{
"file_path": "azure-quantum-python/samples/sessions/introduction-to-sessions.ipynb",
"repo_id": "azure-quantum-python",
"token_count": 943
}
| 367 |
const path = require('path')
const { CleanWebpackPlugin } = require('clean-webpack-plugin')
module.exports = {
entry: './src/index.js',
output: {
filename: 'index.js',
path: path.resolve(__dirname, 'dist'),
},
plugins: [new CleanWebpackPlugin()],
resolve: {
extensions: ['.ts', '.tsx', '.js']
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules/,
use: ['babel-loader']
},
{
test: /\.(ts|tsx)$/,
exclude: /node_modules/,
resolve: {
extensions: ['.ts', '.tsx', '.js', '.jsx', '.json'],
},
use: 'ts-loader'
},
{
test: /\.css$/,
use: ['css-loader'],
exclude: /node_modules/,
}
]
}
}
|
azure-quantum-python/visualization/js-lib/webpack.config.js/0
|
{
"file_path": "azure-quantum-python/visualization/js-lib/webpack.config.js",
"repo_id": "azure-quantum-python",
"token_count": 380
}
| 368 |
export { default as SpaceDiagram } from "./SpaceDiagram";
export { default as TimeDiagram } from "./TimeDiagram";
|
azure-quantum-python/visualization/react-lib/src/components/resource-estimator/index.ts/0
|
{
"file_path": "azure-quantum-python/visualization/react-lib/src/components/resource-estimator/index.ts",
"repo_id": "azure-quantum-python",
"token_count": 32
}
| 369 |
Introduction
============
Many operations commonly performed on text strings are destructive; that is, they lose some information about the original string.
Systems that deal with text will commonly perform many of these operations on their input, whether it's changing case, performing unicode normalization, collapsing whitespace, stripping punctuation, etc.
This helps systems behave in a more uniform manner regarding the many different ways you or I might express the same thing.
But the consequence is that when handling parts of this processed text, it may be hard to know what exactly the user originally wrote.
Sometimes those details can be very important to the user.
Consider an AI personal assistant, for example, that is helping a user send a text message to a friend.
The user writes,
send jane a text that says, "Hey! How are you? Haven't seen you in a while, what's up 😀"
The system may perform some normalization on that text, such that it ends up looking like this, with casing and punctuation gone:
send jane a text that says hey how are you havent seen you in a while whats up emoji
The AI may then identify that the body of the message should be:
hey how are you havent seen you in a while whats up emoji
However, that message wouldn't make much sense as-is.
If the assistant uses `bistring` though, it's easy for it to match that with the original text the user intended:
>>> from bistring import bistr
>>> query = bistr(
... 'send jane a text that says, '
... '"Hey! How are you? Haven\'t seen you in a while, what\'s up 😀"'
... )
>>> # Get rid of upper-/lower-case distinctions
>>> query = query.casefold()
>>> print(query.modified)
send jane a text that says, "hey! how are you? haven't seen you in a while, what's up 😀"
>>> import regex
>>> # Remove all punctuation
>>> query = query.sub(regex.compile(r'\pP'), '')
>>> # Replace all symbols with 'emoji'
>>> query = query.sub(regex.compile(r'\pS'), 'emoji')
>>> print(query.modified)
send jane a text that says hey how are you havent seen you in a while whats up emoji
>>> # Extract the substring we care about, the message body
>>> message = query[27:84]
>>> print(message.modified)
hey how are you havent seen you in a while whats up emoji
>>> print(message.original)
Hey! How are you? Haven't seen you in a while, what's up 😀
Every `bistr` keeps track of the original string it started with, and maintains a sequence alignment between the original and the modified strings.
This alignment means that it knows exactly what substring of the original text is associated with every chunk of the modified text.
So when you slice a `bistr`, you get the matching slice of original text automatically!
|
bistring/docs/Introduction.rst/0
|
{
"file_path": "bistring/docs/Introduction.rst",
"repo_id": "bistring",
"token_count": 767
}
| 370 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
from pathlib import Path
import subprocess
# -- Project information -----------------------------------------------------
project = 'bistring'
copyright = '2022, Microsoft'
author = 'Tavian Barnes'
# The full version, including alpha/beta/rc tags
release = '0.5.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx_autodoc_typehints',
'sphinx_js',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
'node_modules',
'_build',
'Thumbs.db',
'.DS_Store',
]
# -- Intersphinx configuration -----------------------------------------------
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}
# -- Autodoc configuration ---------------------------------------------------
autoclass_content = 'both'
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'show-inheritance': True,
'special-members': '__getitem__',
}
autodoc_inherit_docstrings = False
# -- sphinx-js configuration -------------------------------------------------
parent = Path(__file__).parent.resolve()
npm_bin = parent/'node_modules/.bin'
os.environ["PATH"] = str(npm_bin) + ":" + os.environ["PATH"]
js_language = 'typescript'
js_source_path = '../js/src'
jsdoc_config_path = '../js/tsconfig.json'
root_for_relative_js_paths = '..'
def npm_install(app, config):
node_modules = parent/'node_modules'
if not node_modules.exists():
subprocess.run(['npm', '--prefix=' + str(parent), 'install'])
def setup(app):
app.connect('config-inited', npm_install)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
|
bistring/docs/conf.py/0
|
{
"file_path": "bistring/docs/conf.py",
"repo_id": "bistring",
"token_count": 891
}
| 371 |
/*!
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT license.
*/
export { default as Alignment } from "./alignment";
export { default as BiString } from "./bistring";
export { default as BiStringBuilder } from "./builder";
export * from "./token";
export { default } from "./bistring";
|
bistring/js/src/index.ts/0
|
{
"file_path": "bistring/js/src/index.ts",
"repo_id": "bistring",
"token_count": 91
}
| 372 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import annotations
__all__ = ['Alignment']
import bisect
from typing import Any, Callable, Iterable, Iterator, List, Optional, Sequence, Tuple, TypeVar, Union, cast, overload
from ._typing import AnyBounds, BiIndex, Bounds, Index, Range
T = TypeVar('T')
U = TypeVar('U')
Real = Union[int, float]
CostFn = Callable[[Optional[T], Optional[U]], Real]
class Alignment:
r"""
An alignment between two related sequences.
Consider this alignment between two strings:
.. code-block:: text
|it's| |aligned!|
| \ \ |
|it is| |aligned|
An alignment stores all the indices that are known to correspond between the original and modified sequences. For
the above example, it would be
>>> a = Alignment([
... (0, 0),
... (4, 5),
... (5, 6),
... (13, 13),
... ])
Alignments can be used to answer questions like, "what's the smallest range of the original sequence that is
guaranteed to contain this part of the modified sequence?" For example, the range ``(0, 5)`` ("it is") is known to
match the range ``(0, 4)`` ("it's") of the original sequence:
>>> a.original_bounds(0, 5)
(0, 4)
Results may be imprecise if the alignment is too course to match the exact inputs:
>>> a.original_bounds(0, 2)
(0, 4)
A more granular alignment like this:
.. code-block:: text
|i|t|'s| |a|l|i|g|n|e|d|!|
| | | \ \ \ \ \ \ \ \ \ /
|i|t| is| |a|l|i|g|n|e|d|
.. doctest::
>>> a = Alignment([
... (0, 0), (1, 1), (2, 2), (4, 5), (5, 6), (6, 7), (7, 8),
... (8, 9), (9, 10), (10, 11), (11, 12), (12, 13), (13, 13),
... ])
Can be more precise:
>>> a.original_bounds(0, 2)
(0, 2)
"""
__slots__ = ('_original', '_modified')
_original: List[int]
_modified: List[int]
def __init__(self, values: Iterable[BiIndex]):
"""
:param values:
The sequence of aligned indices. Each element should be a tuple ``(x, y)``, where `x` is the original
sequence position and `y` is the modified sequence position.
"""
self._original = []
self._modified = []
for i, j in values:
if self._original:
if i < self._original[-1]:
raise ValueError('Original sequence position moved backwards')
elif j < self._modified[-1]:
raise ValueError('Modified sequence position moved backwards')
elif i == self._original[-1] and j == self._modified[-1]:
continue
self._original.append(i)
self._modified.append(j)
if not self._original:
raise ValueError('No sequence positions to align')
@classmethod
def _create(cls, original: List[int], modified: List[int]) -> Alignment:
result: Alignment = super().__new__(cls)
result._original = original
result._modified = modified
return result
def __str__(self) -> str:
i, j = self._original[0], self._original[-1]
k, l = self._modified[0], self._modified[-1]
if self._original == list(range(i, j + 1)) and self._modified == list(range(k, l + 1)):
return f'[{i}:{j}⇋{k}:{l}]'
else:
return '[' + ', '.join(f'{i}⇋{j}' for i, j in self) + ']'
def __repr__(self) -> str:
i, j = self._original[0], self._original[-1]
if self._original == list(range(i, j + 1)) and self._modified == list(range(i, j + 1)):
if i == 0:
return f'Alignment.identity({j})'
else:
return f'Alignment.identity({i}, {j})'
else:
return 'Alignment([' + ', '.join(map(repr, self)) + '])'
def __eq__(self, other: Any) -> bool:
if isinstance(other, Alignment):
return (self._original, self._modified) == (other._original, other._modified)
else:
return NotImplemented
@classmethod
def _parse_bounds(cls, args: Tuple[AnyBounds, ...]) -> Bounds:
l = len(args)
if l == 0:
raise TypeError('Not enough arguments')
elif l == 1:
arg = args[0]
if isinstance(arg, range):
return arg.start, arg.stop
elif isinstance(arg, slice):
if arg.start is None or arg.stop is None:
raise ValueError('slice with unspecified bounds')
return arg.start, arg.stop
elif isinstance(arg, tuple):
return arg
else:
return 0, arg
elif l == 2:
return cast(Bounds, args)
else:
raise TypeError('Too many arguments')
@classmethod
def _parse_optional_bounds(cls, args: Tuple[AnyBounds, ...]) -> Union[Bounds, Tuple[None, None]]:
if len(args) == 0:
return None, None
else:
return cls._parse_bounds(args)
@overload
@classmethod
def identity(cls, __length: int) -> Alignment: ...
@overload
@classmethod
def identity(cls, __start: int, __stop: int) -> Alignment: ...
@overload
@classmethod
def identity(cls, __bounds: Range) -> Alignment: ...
@classmethod
def identity(cls, *args: Union[int, range, slice, Bounds]) -> Alignment:
"""
Create an identity alignment, which maps all intervals to themselves. You can pass the size of the sequence:
>>> Alignment.identity(5)
Alignment.identity(5)
or the start and end positions:
>>> Alignment.identity(1, 5)
Alignment.identity(1, 5)
or a range-like object (:class:`range`, :class:`slice`, or ``Tuple[int, int]``):
>>> Alignment.identity(range(1, 5))
Alignment.identity(1, 5)
"""
start, stop = cls._parse_bounds(args)
values = list(range(start, stop + 1))
return cls._create(values, values)
@classmethod
def _infer_costs(cls, original: Sequence[T], modified: Sequence[U], cost_fn: CostFn[T, U]) -> List[Real]:
"""
The Needleman–Wunsch or Wagner–Fischer algorithm. Here we use it in a way that only computes the final row of
costs, without finding the alignment itself. Hirschberg's algorithm uses it as a subroutine to find the optimal
alignment in less than O(N*M) space.
https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm
https://en.wikipedia.org/wiki/Wagner%E2%80%93Fischer_algorithm
"""
row: List[Real] = [0]
for i, m in enumerate(modified):
cost = row[i] + cost_fn(None, m)
row.append(cost)
prev: List[Real] = [0] * len(row)
for o in original:
prev, row = row, prev
row[0] = prev[0] + cost_fn(o, None)
for i, m in enumerate(modified):
sub_cost = prev[i] + cost_fn(o, m)
del_cost = prev[i + 1] + cost_fn(o, None)
ins_cost = row[i] + cost_fn(None, m)
row[i + 1] = min(sub_cost, del_cost, ins_cost)
return row
@classmethod
def _infer_matrix(cls, original: Sequence[T], modified: Sequence[U], cost_fn: CostFn[T, U]) -> List[Bounds]:
"""
The Needleman–Wunsch or Wagner–Fischer algorithm, using the entire matrix to compute the optimal alignment.
"""
row: List[Tuple[Real, int, int]] = [(0, -1, -1)]
for j, m in enumerate(modified):
cost = row[j][0] + cost_fn(None, m)
row.append((cost, 0, j))
matrix = [row]
for i, o in enumerate(original):
prev = matrix[i]
cost = prev[0][0] + cost_fn(o, None)
row = [(cost, i, 0)]
for j, m in enumerate(modified):
cost = prev[j][0] + cost_fn(o, m)
x, y = i, j
del_cost = prev[j + 1][0] + cost_fn(o, None)
if del_cost < cost:
cost = del_cost
x, y = i, j + 1
ins_cost = row[j][0] + cost_fn(None, m)
if ins_cost < cost:
cost = ins_cost
x, y = i + 1, j
row.append((cost, x, y))
matrix.append(row)
result = []
i = len(matrix) - 1
j = len(matrix[i]) - 1
while i >= 0:
result.append((i, j))
_, i, j = matrix[i][j]
result.reverse()
return result
@classmethod
def _infer_recursive(cls, original: Sequence[T], modified: Sequence[U], cost_fn: CostFn[T, U]) -> List[BiIndex]:
"""
Hirschberg's algorithm for computing optimal alignments in linear space.
https://en.wikipedia.org/wiki/Hirschberg's_algorithm
"""
if len(original) <= 1 or len(modified) <= 1:
return cls._infer_matrix(original, modified, cost_fn)
omid = len(original) // 2
oleft = original[:omid]
oright = original[omid:]
lcosts = cls._infer_costs(oleft, modified, cost_fn)
rcosts = cls._infer_costs(oright[::-1], modified[::-1], cost_fn)[::-1]
mmid = min(range(len(lcosts)), key=lambda i: lcosts[i] + rcosts[i])
mleft = modified[:mmid]
mright = modified[mmid:]
left = cls._infer_recursive(oleft, mleft, cost_fn)
right = cls._infer_recursive(oright, mright, cost_fn)
for (o, m) in right:
left.append((o + omid, m + mmid))
return left
@classmethod
def infer(cls, original: Sequence[T], modified: Sequence[U], cost_fn: Optional[CostFn[T, U]] = None) -> Alignment:
"""
Infer the alignment between two sequences with the lowest edit distance.
>>> Alignment.infer('color', 'color')
Alignment.identity(5)
>>> a = Alignment.infer('color', 'colour')
>>> # 'ou' -> 'o'
>>> a.original_bounds(3, 5)
(3, 4)
Warning: this operation has time complexity ``O(N*M)``, where `N` and `M` are the lengths of the original and
modified sequences, and so should only be used for relatively short sequences.
:param original:
The original sequence.
:param modified:
The modified sequence.
:param cost_fn:
A function returning the cost of performing an edit. ``cost_fn(a, b)`` returns the cost of replacing `a`
with `b`. ``cost_fn(a, None)`` returns the cost of deleting `a`, and ``cost_fn(None, b)`` returns the cost
of inserting `b`. By default, all operations have cost 1 except replacing identical elements, which has
cost 0.
:returns:
The inferred alignment.
"""
if cost_fn is None:
real_cost_fn: CostFn[T, U] = lambda a, b: int(a != b)
else:
real_cost_fn = cost_fn
if len(original) < len(modified):
swapped_cost_fn = lambda a, b: real_cost_fn(b, a)
result = cls._infer_recursive(modified, original, swapped_cost_fn)
return Alignment(result).inverse()
else:
result = cls._infer_recursive(original, modified, real_cost_fn)
return Alignment(result)
def __iter__(self) -> Iterator[BiIndex]:
return zip(self._original, self._modified)
def __len__(self) -> int:
return len(self._original)
@overload
def __getitem__(self, index: int) -> BiIndex: ...
@overload
def __getitem__(self, index: slice) -> Alignment: ...
def __getitem__(self, index: Index) -> Union[BiIndex, Alignment]:
"""
Indexing an alignment returns the nth pair of aligned positions:
>>> a = Alignment.identity(5)
>>> a[3]
(3, 3)
Slicing an alignment returns a new alignment with a subrange of its values:
>>> a[1:5]
Alignment.identity(1, 4)
"""
if isinstance(index, slice):
start, stop, stride = index.indices(len(self))
if stride != 1:
raise ValueError('Non-unit strides not supported')
return self._create(self._original[index], self._modified[index])
else:
return (self._original[index], self._modified[index])
def shift(self, delta_o: int, delta_m: int) -> Alignment:
"""
Shift this alignment.
:param delta_o:
The distance to shift the original sequence.
:param delta_m:
The distance to shift the modified sequence.
:returns:
An alignment with all the positions shifted by the given amounts.
"""
return self._create(
[o + delta_o for o in self._original],
[m + delta_m for m in self._modified],
)
def _search(self, source: List[int], start: int, stop: int) -> Bounds:
first = bisect.bisect_right(source, start)
if first == 0:
raise IndexError('range start too small')
first -= 1
last = bisect.bisect_left(source, stop, first)
if last == len(source):
raise IndexError('range end too big')
return first, last
def _bounds(self, source: List[int], target: List[int], args: Tuple[AnyBounds, ...]) -> Bounds:
start, stop = self._parse_optional_bounds(args)
if start is None or stop is None:
i, j = 0, -1
else:
i, j = self._search(source, start, stop)
return (target[i], target[j])
def original_bounds(self, *args: AnyBounds) -> Bounds:
"""
Maps a subrange of the modified sequence to the original sequence. Can be called with either two arguments:
>>> a = Alignment.identity(5).shift(1, 0)
>>> a.original_bounds(1, 3)
(2, 4)
or with a range-like object:
>>> a.original_bounds(range(1, 3))
(2, 4)
With no arguments, returns the bounds of the entire original sequence:
>>> a.original_bounds()
(1, 6)
:returns:
The corresponding bounds in the original sequence.
"""
return self._bounds(self._modified, self._original, args)
def original_range(self, *args: AnyBounds) -> range:
"""
Like :meth:`original_bounds`, but returns a :class:`range`.
"""
return range(*self.original_bounds(*args))
def original_slice(self, *args: AnyBounds) -> slice:
"""
Like :meth:`original_bounds`, but returns a :class:`slice`.
"""
return slice(*self.original_bounds(*args))
def modified_bounds(self, *args: AnyBounds) -> Bounds:
"""
Maps a subrange of the original sequence to the modified sequence. Can be called with either two arguments:
>>> a = Alignment.identity(5).shift(1, 0)
>>> a.modified_bounds(2, 4)
(1, 3)
or with a range-like object:
>>> a.modified_bounds(range(2, 4))
(1, 3)
With no arguments, returns the bounds of the entire modified sequence:
>>> a.modified_bounds()
(0, 5)
:returns:
The corresponding bounds in the modified sequence.
"""
return self._bounds(self._original, self._modified, args)
def modified_range(self, *args: AnyBounds) -> range:
"""
Like :meth:`modified_bounds`, but returns a :class:`range`.
"""
return range(*self.modified_bounds(*args))
def modified_slice(self, *args: AnyBounds) -> slice:
"""
Like :meth:`modified_bounds`, but returns a :class:`range`.
"""
return slice(*self.modified_bounds(*args))
def slice_by_original(self, *args: AnyBounds) -> Alignment:
"""
Slice this alignment by a span of the original sequence.
>>> a = Alignment.identity(5).shift(1, 0)
>>> a.slice_by_original(2, 4)
Alignment([(2, 1), (3, 2), (4, 3)])
:returns:
The slice of this alignment that corresponds with the given span of the original sequence.
"""
start, stop = self._parse_bounds(args)
first, last = self._search(self._original, start, stop)
original = self._original[first:last+1]
original = [min(max(i, start), stop) for i in original]
modified = self._modified[first:last+1]
return self._create(original, modified)
def slice_by_modified(self, *args: AnyBounds) -> Alignment:
"""
Slice this alignment by a span of the modified sequence.
>>> a = Alignment.identity(5).shift(1, 0)
>>> a.slice_by_modified(1, 3)
Alignment([(2, 1), (3, 2), (4, 3)])
:returns:
The slice of this alignment that corresponds with the given span of the modified sequence.
"""
start, stop = self._parse_bounds(args)
first, last = self._search(self._modified, start, stop)
original = self._original[first:last+1]
modified = self._modified[first:last+1]
modified = [min(max(i, start), stop) for i in modified]
return self._create(original, modified)
def __add__(self, other: Any) -> Alignment:
"""
Concatenate two alignments.
"""
if not isinstance(other, Alignment):
return NotImplemented
o_orig = other._original
o_mod = other._modified
if o_orig[0] < self._original[-1]:
raise ValueError('Original sequence position moved backwards')
elif o_mod[0] < self._modified[-1]:
raise ValueError('Modified sequence position moved backwards')
elif o_orig[0] == self._original[-1] and o_mod[0] == self._modified[-1]:
o_orig = o_orig[1:]
o_mod = o_mod[1:]
return self._create(self._original + o_orig, self._modified + o_mod)
def compose(self, other: Alignment) -> Alignment:
"""
:returns:
A new alignment equivalent to applying this one first, then the `other`.
"""
if self.modified_bounds() != other.original_bounds():
raise ValueError('Incompatible alignments')
original = []
modified = []
i, i_max = 0, len(self)
j, j_max = 0, len(other)
while i < i_max:
# Map self._original[i] to its lower bound in other
while self._modified[i] > other._original[j]:
j += 1
while self._modified[i] < other._original[j] and self._modified[i + 1] <= other._original[j]:
i += 1
original.append(self._original[i])
modified.append(other._modified[j])
# Map self._original[i] to its upper bound in other (if it's different)
while i + 1 < i_max and self._original[i] == self._original[i + 1]:
i += 1
needs_upper = False
while j + 1 < j_max and self._modified[i] >= other._original[j + 1]:
needs_upper = True
j += 1
if needs_upper:
original.append(self._original[i])
modified.append(other._modified[j])
i += 1
return self._create(original, modified)
def inverse(self) -> Alignment:
"""
:returns:
The inverse of this alignment, from the modified to the original sequence.
"""
return self._create(self._modified, self._original)
|
bistring/python/bistring/_alignment.py/0
|
{
"file_path": "bistring/python/bistring/_alignment.py",
"repo_id": "bistring",
"token_count": 9075
}
| 373 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from bistring import bistr, Alignment, BistrBuilder
def test_chunk_words():
builder = BistrBuilder(' the quick brown fox ')
builder.discard(2)
builder.replace(3, 'the')
builder.skip(1)
builder.replace(5, 'quick')
builder.replace(2, ' ')
builder.replace(5, 'brown')
builder.skip(1)
builder.replace(3, 'fox')
builder.discard(1)
bs = builder.build()
assert bs.original == ' the quick brown fox '
assert bs.modified == 'the quick brown fox'
assert bs[0:1].original == 'the'
assert bs[1:2].original == 'the'
assert bs[2:3].original == 'the'
assert bs[0:3].original == 'the'
assert bs[1:3].original == 'the'
assert bs[0:4].original == 'the '
assert bs[1:4].original == 'the '
assert bs[3:4].original == ' '
assert bs[9:10].original == ' '
assert bs[4:15].original == 'quick brown'
assert bs[5:14].original == 'quick brown'
assert bs[0:0].original == ''
assert bs[10:10].original == ''
def test_chunk_chars():
builder = BistrBuilder(' the quick brown fox ')
builder.discard_match(r'\s+')
while not builder.is_complete:
builder.skip_match(r'\S+')
builder.replace_match(r'\s+(?=\S)', ' ')
builder.discard_match(r'\s+$')
bs = builder.build()
assert bs.original == ' the quick brown fox '
assert bs.modified == 'the quick brown fox'
assert bs[0:1].original == 't'
assert bs[1:2].original == 'h'
assert bs[2:3].original == 'e'
assert bs[0:3].original == 'the'
assert bs[1:3].original == 'he'
assert bs[0:4].original == 'the '
assert bs[1:4].original == 'he '
assert bs[3:4].original == ' '
assert bs[9:10].original == ' '
assert bs[4:15].original == 'quick brown'
assert bs[5:14].original == 'uick brow'
assert bs[0:0].original == ''
assert bs[10:10].original == ''
def test_empty_string():
builder = BistrBuilder('')
bs = builder.build()
assert bs.original == ''
assert bs.modified == ''
assert bs[0:0].original == ''
def test_iterative():
builder = BistrBuilder("I wish I wouldn't've spent one thousand dollars.")
builder.skip_match(r'[^.]*')
builder.discard_rest()
builder.rewind()
builder.skip_match(r'I wish I ');
builder.replace_match(r"wouldn't've", 'would not have');
builder.skip_match(r' spent ');
builder.replace_match(r'one thousand dollars', '$1,000');
bs = builder.build()
assert bs.original == "I wish I wouldn't've spent one thousand dollars."
assert bs.modified == 'I wish I would not have spent $1,000'
def test_replace_matches():
builder = BistrBuilder('the cheese that the mouse that the cat that the dog chased played with ate')
builder.replace_next(r'that', 'which')
builder.replace_all(r'that', 'whom')
bs = builder.build()
assert bs.original == 'the cheese that the mouse that the cat that the dog chased played with ate'
assert bs.modified == 'the cheese which the mouse whom the cat whom the dog chased played with ate'
def test_replace_backreference():
builder = BistrBuilder("it doesn't work and stuff doesn't get replaced")
builder.replace_all(r"\bdoesn't (\S+)", r'\1s')
bs = builder.build()
assert bs.original == "it doesn't work and stuff doesn't get replaced"
assert bs.modified == 'it works and stuff gets replaced'
def test_append():
builder = BistrBuilder('hello WORLD')
builder.append(bistr(builder.peek(5)).upper('en_US'))
builder.skip(1)
builder.append(bistr(builder.peek(5)).lower('en_US'))
bs = builder.build()
assert bs[1:4] == bistr('ell', 'ELL', Alignment.identity(3))
assert bs[7:10] == bistr('ORL', 'orl', Alignment.identity(3))
|
bistring/python/tests/test_builder.py/0
|
{
"file_path": "bistring/python/tests/test_builder.py",
"repo_id": "bistring",
"token_count": 1508
}
| 374 |
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
class DefaultConfig:
""" Bot Configuration """
PORT = 3978
APP_ID = os.environ.get("MicrosoftAppId", "")
APP_PASSWORD = os.environ.get("MicrosoftAppPassword", "")
APP_TYPE = os.environ.get("MicrosoftAppType", "MultiTenant")
APP_TENANTID = os.environ.get("MicrosoftAppTenantId", "")
LUIS_APP_ID = os.environ.get("LuisAppId", "")
LUIS_API_KEY = os.environ.get("LuisAPIKey", "")
# LUIS endpoint host name, ie "westus.api.cognitive.microsoft.com"
LUIS_API_HOST_NAME = os.environ.get("LuisAPIHostName", "")
|
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/config.py/0
|
{
"file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/config.py",
"repo_id": "botbuilder-python",
"token_count": 256
}
| 375 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.ai.luis import LuisApplication, LuisRecognizer
from botbuilder.core import Recognizer, RecognizerResult, TurnContext
from config import DefaultConfig
class FlightBookingRecognizer(Recognizer):
def __init__(self, configuration: DefaultConfig):
self._recognizer = None
luis_is_configured = (
configuration.LUIS_APP_ID
and configuration.LUIS_API_KEY
and configuration.LUIS_API_HOST_NAME
)
if luis_is_configured:
luis_application = LuisApplication(
configuration.LUIS_APP_ID,
configuration.LUIS_API_KEY,
"https://" + configuration.LUIS_API_HOST_NAME,
)
self._recognizer = LuisRecognizer(luis_application)
@property
def is_configured(self) -> bool:
# Returns true if luis is configured in the appsettings.json and initialized.
return self._recognizer is not None
async def recognize(self, turn_context: TurnContext) -> RecognizerResult:
return await self._recognizer.recognize(turn_context)
|
botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/flight_booking_recognizer.py/0
|
{
"file_path": "botbuilder-python/generators/app/templates/core/{{cookiecutter.bot_name}}/flight_booking_recognizer.py",
"repo_id": "botbuilder-python",
"token_count": 467
}
| 376 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class SlackClientOptions:
"""
Defines the implementation of the SlackClient options.
"""
def __init__(
self,
slack_verification_token: str,
slack_bot_token: str,
slack_client_signing_secret: str,
):
"""
Initializes a new instance of SlackClientOptions.
:param slack_verification_token: A token for validating the origin of incoming webhooks.
:type slack_verification_token: str
:param slack_bot_token: A token for a bot to work on a single workspace.
:type slack_bot_token: str
:param slack_client_signing_secret: The token used to validate that incoming webhooks originated from Slack.
:type slack_client_signing_secret: str
"""
self.slack_verification_token = slack_verification_token
self.slack_bot_token = slack_bot_token
self.slack_client_signing_secret = slack_client_signing_secret
self.slack_client_id = None
self.slack_client_secret = None
self.slack_redirect_uri = None
self.slack_scopes = [str]
|
botbuilder-python/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_client_options.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-adapters-slack/botbuilder/adapters/slack/slack_client_options.py",
"repo_id": "botbuilder-python",
"token_count": 458
}
| 377 |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license.
from botbuilder.core import BotTelemetryClient, NullTelemetryClient
class LuisPredictionOptions:
"""
Optional parameters for a LUIS prediction request.
"""
def __init__(
self,
bing_spell_check_subscription_key: str = None,
include_all_intents: bool = None,
include_instance_data: bool = None,
log: bool = None,
spell_check: bool = None,
staging: bool = None,
timeout: float = 100000,
timezone_offset: float = None,
telemetry_client: BotTelemetryClient = NullTelemetryClient(),
log_personal_information: bool = False,
):
self.bing_spell_check_subscription_key: str = bing_spell_check_subscription_key
self.include_all_intents: bool = include_all_intents
self.include_instance_data: bool = include_instance_data
self.log: bool = log
self.spell_check: bool = spell_check
self.staging: bool = staging
self.timeout: float = timeout
self.timezone_offset: float = timezone_offset
self.telemetry_client: BotTelemetryClient = telemetry_client
self.log_personal_information: bool = log_personal_information
|
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_prediction_options.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/luis/luis_prediction_options.py",
"repo_id": "botbuilder-python",
"token_count": 485
}
| 378 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from msrest.serialization import Model
class FeedbackRecords(Model):
"""Active learning feedback records."""
_attribute_map = {"records": {"key": "records", "type": "[FeedbackRecord]"}}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.records = kwargs.get("records", None)
|
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/feedback_records.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/models/feedback_records.py",
"repo_id": "botbuilder-python",
"token_count": 137
}
| 379 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
from typing import Dict
from botbuilder.core import BotTelemetryClient, TurnContext
from .qnamaker_options import QnAMakerOptions
class QnAMakerTelemetryClient(ABC):
def __init__(
self, log_personal_information: bool, telemetry_client: BotTelemetryClient
):
self.log_personal_information = (log_personal_information,)
self.telemetry_client = telemetry_client
@abstractmethod
def get_answers(
self,
context: TurnContext,
options: QnAMakerOptions = None,
telemetry_properties: Dict[str, str] = None,
telemetry_metrics: Dict[str, float] = None,
):
raise NotImplementedError(
"QnAMakerTelemetryClient.get_answers(): is not implemented."
)
|
botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_telemetry_client.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/botbuilder/ai/qna/qnamaker_telemetry_client.py",
"repo_id": "botbuilder-python",
"token_count": 328
}
| 380 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict
from botbuilder.ai.luis import LuisRecognizer, LuisTelemetryConstants
from botbuilder.core import RecognizerResult, TurnContext
class OverrideFillRecognizer(LuisRecognizer):
def __init__(self, *args, **kwargs):
super(OverrideFillRecognizer, self).__init__(*args, **kwargs)
def on_recognizer_result(
self,
recognizer_result: RecognizerResult,
turn_context: TurnContext,
telemetry_properties: Dict[str, str] = None,
telemetry_metrics: Dict[str, float] = None,
):
properties = super(OverrideFillRecognizer, self).fill_luis_event_properties(
recognizer_result, turn_context, telemetry_properties
)
if "MyImportantProperty" not in properties:
properties["MyImportantProperty"] = "myImportantValue"
# Log event
self.telemetry_client.track_event(
LuisTelemetryConstants.luis_result, properties, telemetry_metrics
)
# Create second event.
second_event_properties: Dict[str, str] = {
"MyImportantProperty2": "myImportantValue2"
}
self.telemetry_client.track_event("MySecondEvent", second_event_properties)
|
botbuilder-python/libraries/botbuilder-ai/tests/luis/override_fill_recognizer.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/override_fill_recognizer.py",
"repo_id": "botbuilder-python",
"token_count": 502
}
| 381 |
{
"query": "Book a table on Friday or tomorrow at 5 or tomorrow at 4",
"topScoringIntent": {
"intent": "None",
"score": 0.8785189
},
"intents": [
{
"intent": "None",
"score": 0.8785189
}
],
"entities": [
{
"entity": "friday",
"type": "builtin.datetimeV2.date",
"startIndex": 16,
"endIndex": 21,
"resolution": {
"values": [
{
"timex": "XXXX-WXX-5",
"type": "date",
"value": "2018-07-13"
},
{
"timex": "XXXX-WXX-5",
"type": "date",
"value": "2018-07-20"
}
]
}
},
{
"entity": "tomorrow at 5",
"type": "builtin.datetimeV2.datetime",
"startIndex": 26,
"endIndex": 38,
"resolution": {
"values": [
{
"timex": "2018-07-19T05",
"type": "datetime",
"value": "2018-07-19 05:00:00"
},
{
"timex": "2018-07-19T17",
"type": "datetime",
"value": "2018-07-19 17:00:00"
}
]
}
},
{
"entity": "tomorrow at 4",
"type": "builtin.datetimeV2.datetime",
"startIndex": 43,
"endIndex": 55,
"resolution": {
"values": [
{
"timex": "2018-07-19T04",
"type": "datetime",
"value": "2018-07-19 04:00:00"
},
{
"timex": "2018-07-19T16",
"type": "datetime",
"value": "2018-07-19 16:00:00"
}
]
}
},
{
"entity": "5",
"type": "builtin.number",
"startIndex": 38,
"endIndex": 38,
"resolution": {
"value": "5"
}
},
{
"entity": "4",
"type": "builtin.number",
"startIndex": 55,
"endIndex": 55,
"resolution": {
"value": "4"
}
}
]
}
|
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/MultipleDateTimeEntities.json/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/MultipleDateTimeEntities.json",
"repo_id": "botbuilder-python",
"token_count": 1825
}
| 382 |
{
"text": "3 inches long by 2 inches wide and 5% to 10% and are you between 6 years old and 8 years old and can i trade kb457 for kb922 and change 425-777-1212 to 206-666-4123 and did delta buy virgin and did the rain from hawaii get to redmond and http://foo.com changed to http://blah.com and i like between 68 degrees and 72 degrees and john likes mary and leave 3pm and arrive 5pm and pay between $400 and $500 and send [email protected] from [email protected]",
"intents": {
"Cancel": {
"score": 4.50860341e-7
},
"Delivery": {
"score": 0.00007978094
},
"EntityTests": {
"score": 0.0046325135
},
"Greeting": {
"score": 4.73494453e-7
},
"Help": {
"score": 7.622754e-7
},
"None": {
"score": 0.00093744183
},
"Roles": {
"score": 1
},
"search": {
"score": 0.07635335
},
"SpecifyName": {
"score": 0.00009136085
},
"Travel": {
"score": 0.00771805458
},
"Weather_GetForecast": {
"score": 0.0100867962
}
},
"entities": {
"$instance": {
"a": [
{
"endIndex": 309,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 299,
"text": "68 degrees",
"type": "builtin.temperature"
}
],
"arrive": [
{
"endIndex": 373,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 370,
"text": "5pm",
"type": "builtin.datetimeV2.time"
}
],
"b": [
{
"endIndex": 324,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 314,
"text": "72 degrees",
"type": "builtin.temperature"
}
],
"begin": [
{
"endIndex": 76,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 65,
"text": "6 years old",
"type": "builtin.age"
}
],
"buy": [
{
"endIndex": 124,
"modelType": "Regex Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 119,
"text": "kb922",
"type": "Part"
}
],
"Buyer": [
{
"endIndex": 178,
"modelType": "List Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 173,
"text": "delta",
"type": "Airline"
}
],
"Composite1": [
{
"endIndex": 172,
"modelType": "Composite Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.01107535,
"startIndex": 0,
"text": "3 inches long by 2 inches wide and 5% to 10% and are you between 6 years old and 8 years old and can i trade kb457 for kb922 and change 425-777-1212 to 206-666-4123 and did",
"type": "Composite1"
}
],
"Composite2": [
{
"endIndex": 283,
"modelType": "Composite Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.15191336,
"startIndex": 238,
"text": "http://foo.com changed to http://blah.com and",
"type": "Composite2"
}
],
"destination": [
{
"endIndex": 233,
"modelType": "Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.985884964,
"startIndex": 226,
"text": "redmond",
"type": "Weather.Location"
}
],
"dimension": [
{
"endIndex": 358,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 355,
"text": "3pm",
"type": "builtin.dimension"
},
{
"endIndex": 373,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 370,
"text": "5pm",
"type": "builtin.dimension"
}
],
"end": [
{
"endIndex": 92,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 81,
"text": "8 years old",
"type": "builtin.age"
}
],
"geographyV2": [
{
"endIndex": 218,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 212,
"text": "hawaii",
"type": "builtin.geographyV2.state"
},
{
"endIndex": 233,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 226,
"text": "redmond",
"type": "builtin.geographyV2.city"
}
],
"leave": [
{
"endIndex": 358,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 355,
"text": "3pm",
"type": "builtin.datetimeV2.time"
}
],
"length": [
{
"endIndex": 8,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 0,
"text": "3 inches",
"type": "builtin.dimension"
}
],
"likee": [
{
"endIndex": 344,
"modelType": "Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.9900547,
"startIndex": 340,
"text": "mary",
"type": "Name"
}
],
"liker": [
{
"endIndex": 333,
"modelType": "Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.992201567,
"startIndex": 329,
"text": "john",
"type": "Name"
}
],
"max": [
{
"endIndex": 403,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 399,
"text": "$500",
"type": "builtin.currency"
}
],
"maximum": [
{
"endIndex": 44,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 41,
"text": "10%",
"type": "builtin.percentage"
}
],
"min": [
{
"endIndex": 394,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 390,
"text": "$400",
"type": "builtin.currency"
}
],
"minimum": [
{
"endIndex": 37,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 35,
"text": "5%",
"type": "builtin.percentage"
}
],
"newPhone": [
{
"endIndex": 164,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.9,
"startIndex": 152,
"text": "206-666-4123",
"type": "builtin.phonenumber"
}
],
"number": [
{
"endIndex": 301,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 299,
"text": "68",
"type": "builtin.number"
},
{
"endIndex": 316,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 314,
"text": "72",
"type": "builtin.number"
},
{
"endIndex": 394,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 391,
"text": "400",
"type": "builtin.number"
},
{
"endIndex": 403,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 400,
"text": "500",
"type": "builtin.number"
}
],
"old": [
{
"endIndex": 148,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.9,
"startIndex": 136,
"text": "425-777-1212",
"type": "builtin.phonenumber"
}
],
"oldURL": [
{
"endIndex": 252,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 238,
"text": "http://foo.com",
"type": "builtin.url"
}
],
"personName": [
{
"endIndex": 333,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 329,
"text": "john",
"type": "builtin.personName"
},
{
"endIndex": 344,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 340,
"text": "mary",
"type": "builtin.personName"
}
],
"receiver": [
{
"endIndex": 431,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 413,
"text": "[email protected]",
"type": "builtin.email"
}
],
"sell": [
{
"endIndex": 114,
"modelType": "Regex Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 109,
"text": "kb457",
"type": "Part"
}
],
"Seller": [
{
"endIndex": 189,
"modelType": "List Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 183,
"text": "virgin",
"type": "Airline"
}
],
"sender": [
{
"endIndex": 451,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 437,
"text": "[email protected]",
"type": "builtin.email"
}
],
"source": [
{
"endIndex": 218,
"modelType": "Entity Extractor",
"recognitionSources": [
"model"
],
"score": 0.9713092,
"startIndex": 212,
"text": "hawaii",
"type": "Weather.Location"
}
],
"width": [
{
"endIndex": 25,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 17,
"text": "2 inches",
"type": "builtin.dimension"
}
]
},
"a": [
{
"number": 68,
"units": "Degree"
}
],
"arrive": [
{
"timex": [
"T17"
],
"type": "time"
}
],
"b": [
{
"number": 72,
"units": "Degree"
}
],
"begin": [
{
"number": 6,
"units": "Year"
}
],
"buy": [
"kb922"
],
"Buyer": [
[
"Delta"
]
],
"Composite1": [
{
"$instance": {
"datetime": [
{
"endIndex": 72,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 65,
"text": "6 years",
"type": "builtin.datetimeV2.duration"
},
{
"endIndex": 88,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 81,
"text": "8 years",
"type": "builtin.datetimeV2.duration"
}
],
"number": [
{
"endIndex": 1,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 0,
"text": "3",
"type": "builtin.number"
},
{
"endIndex": 18,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 17,
"text": "2",
"type": "builtin.number"
},
{
"endIndex": 36,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 35,
"text": "5",
"type": "builtin.number"
},
{
"endIndex": 43,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 41,
"text": "10",
"type": "builtin.number"
},
{
"endIndex": 66,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 65,
"text": "6",
"type": "builtin.number"
},
{
"endIndex": 82,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 81,
"text": "8",
"type": "builtin.number"
},
{
"endIndex": 139,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 136,
"text": "425",
"type": "builtin.number"
},
{
"endIndex": 143,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 140,
"text": "777",
"type": "builtin.number"
},
{
"endIndex": 148,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 144,
"text": "1212",
"type": "builtin.number"
},
{
"endIndex": 155,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 152,
"text": "206",
"type": "builtin.number"
},
{
"endIndex": 159,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 156,
"text": "666",
"type": "builtin.number"
},
{
"endIndex": 164,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 160,
"text": "4123",
"type": "builtin.number"
}
]
},
"datetime": [
{
"timex": [
"P6Y"
],
"type": "duration"
},
{
"timex": [
"P8Y"
],
"type": "duration"
}
],
"number": [
3,
2,
5,
10,
6,
8,
425,
777,
1212,
206,
666,
4123
]
}
],
"Composite2": [
{
"$instance": {
"url": [
{
"endIndex": 279,
"modelType": "Prebuilt Entity Extractor",
"recognitionSources": [
"model"
],
"startIndex": 264,
"text": "http://blah.com",
"type": "builtin.url"
}
]
},
"url": [
"http://blah.com"
]
}
],
"destination": [
"redmond"
],
"dimension": [
{
"number": 3,
"units": "Picometer"
},
{
"number": 5,
"units": "Picometer"
}
],
"end": [
{
"number": 8,
"units": "Year"
}
],
"geographyV2": [
{
"location": "hawaii",
"type": "state"
},
{
"location": "redmond",
"type": "city"
}
],
"leave": [
{
"timex": [
"T15"
],
"type": "time"
}
],
"length": [
{
"number": 3,
"units": "Inch"
}
],
"likee": [
"mary"
],
"liker": [
"john"
],
"max": [
{
"number": 500,
"units": "Dollar"
}
],
"maximum": [
10
],
"min": [
{
"number": 400,
"units": "Dollar"
}
],
"minimum": [
5
],
"newPhone": [
"206-666-4123"
],
"number": [
68,
72,
400,
500
],
"old": [
"425-777-1212"
],
"oldURL": [
"http://foo.com"
],
"personName": [
"john",
"mary"
],
"receiver": [
"[email protected]"
],
"sell": [
"kb457"
],
"Seller": [
[
"Virgin"
]
],
"sender": [
"[email protected]"
],
"source": [
"hawaii"
],
"width": [
{
"number": 2,
"units": "Inch"
}
]
},
"sentiment": {
"label": "neutral",
"score": 0.5
},
"v3": {
"response": {
"prediction": {
"entities": {
"$instance": {
"a": [
{
"length": 10,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "a",
"startIndex": 299,
"text": "68 degrees",
"type": "builtin.temperature"
}
],
"arrive": [
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "arrive",
"startIndex": 370,
"text": "5pm",
"type": "builtin.datetimeV2.time"
}
],
"b": [
{
"length": 10,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "b",
"startIndex": 314,
"text": "72 degrees",
"type": "builtin.temperature"
}
],
"begin": [
{
"length": 11,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "begin",
"startIndex": 65,
"text": "6 years old",
"type": "builtin.age"
}
],
"buy": [
{
"length": 5,
"modelType": "Regex Entity Extractor",
"modelTypeId": 8,
"recognitionSources": [
"model"
],
"role": "buy",
"startIndex": 119,
"text": "kb922",
"type": "Part"
}
],
"Buyer": [
{
"length": 5,
"modelType": "List Entity Extractor",
"modelTypeId": 5,
"recognitionSources": [
"model"
],
"role": "Buyer",
"startIndex": 173,
"text": "delta",
"type": "Airline"
}
],
"Composite1": [
{
"length": 172,
"modelType": "Composite Entity Extractor",
"modelTypeId": 4,
"recognitionSources": [
"model"
],
"score": 0.01107535,
"startIndex": 0,
"text": "3 inches long by 2 inches wide and 5% to 10% and are you between 6 years old and 8 years old and can i trade kb457 for kb922 and change 425-777-1212 to 206-666-4123 and did",
"type": "Composite1"
}
],
"Composite2": [
{
"length": 45,
"modelType": "Composite Entity Extractor",
"modelTypeId": 4,
"recognitionSources": [
"model"
],
"score": 0.15191336,
"startIndex": 238,
"text": "http://foo.com changed to http://blah.com and",
"type": "Composite2"
}
],
"destination": [
{
"length": 7,
"modelType": "Entity Extractor",
"modelTypeId": 1,
"recognitionSources": [
"model"
],
"role": "destination",
"score": 0.985884964,
"startIndex": 226,
"text": "redmond",
"type": "Weather.Location"
}
],
"dimension": [
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 355,
"text": "3pm",
"type": "builtin.dimension"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 370,
"text": "5pm",
"type": "builtin.dimension"
}
],
"end": [
{
"length": 11,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "end",
"startIndex": 81,
"text": "8 years old",
"type": "builtin.age"
}
],
"geographyV2": [
{
"length": 6,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 212,
"text": "hawaii",
"type": "builtin.geographyV2.state"
},
{
"length": 7,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 226,
"text": "redmond",
"type": "builtin.geographyV2.city"
}
],
"leave": [
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "leave",
"startIndex": 355,
"text": "3pm",
"type": "builtin.datetimeV2.time"
}
],
"length": [
{
"length": 8,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "length",
"startIndex": 0,
"text": "3 inches",
"type": "builtin.dimension"
}
],
"likee": [
{
"length": 4,
"modelType": "Entity Extractor",
"modelTypeId": 1,
"recognitionSources": [
"model"
],
"role": "likee",
"score": 0.9900547,
"startIndex": 340,
"text": "mary",
"type": "Name"
}
],
"liker": [
{
"length": 4,
"modelType": "Entity Extractor",
"modelTypeId": 1,
"recognitionSources": [
"model"
],
"role": "liker",
"score": 0.992201567,
"startIndex": 329,
"text": "john",
"type": "Name"
}
],
"max": [
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "max",
"startIndex": 399,
"text": "$500",
"type": "builtin.currency"
}
],
"maximum": [
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "maximum",
"startIndex": 41,
"text": "10%",
"type": "builtin.percentage"
}
],
"min": [
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "min",
"startIndex": 390,
"text": "$400",
"type": "builtin.currency"
}
],
"minimum": [
{
"length": 2,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "minimum",
"startIndex": 35,
"text": "5%",
"type": "builtin.percentage"
}
],
"newPhone": [
{
"length": 12,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "newPhone",
"score": 0.9,
"startIndex": 152,
"text": "206-666-4123",
"type": "builtin.phonenumber"
}
],
"number": [
{
"length": 2,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 299,
"text": "68",
"type": "builtin.number"
},
{
"length": 2,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 314,
"text": "72",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 391,
"text": "400",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 400,
"text": "500",
"type": "builtin.number"
}
],
"old": [
{
"length": 12,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "old",
"score": 0.9,
"startIndex": 136,
"text": "425-777-1212",
"type": "builtin.phonenumber"
}
],
"oldURL": [
{
"length": 14,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "oldURL",
"startIndex": 238,
"text": "http://foo.com",
"type": "builtin.url"
}
],
"personName": [
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 329,
"text": "john",
"type": "builtin.personName"
},
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 340,
"text": "mary",
"type": "builtin.personName"
}
],
"receiver": [
{
"length": 18,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "receiver",
"startIndex": 413,
"text": "[email protected]",
"type": "builtin.email"
}
],
"sell": [
{
"length": 5,
"modelType": "Regex Entity Extractor",
"modelTypeId": 8,
"recognitionSources": [
"model"
],
"role": "sell",
"startIndex": 109,
"text": "kb457",
"type": "Part"
}
],
"Seller": [
{
"length": 6,
"modelType": "List Entity Extractor",
"modelTypeId": 5,
"recognitionSources": [
"model"
],
"role": "Seller",
"startIndex": 183,
"text": "virgin",
"type": "Airline"
}
],
"sender": [
{
"length": 14,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "sender",
"startIndex": 437,
"text": "[email protected]",
"type": "builtin.email"
}
],
"source": [
{
"length": 6,
"modelType": "Entity Extractor",
"modelTypeId": 1,
"recognitionSources": [
"model"
],
"role": "source",
"score": 0.9713092,
"startIndex": 212,
"text": "hawaii",
"type": "Weather.Location"
}
],
"width": [
{
"length": 8,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"role": "width",
"startIndex": 17,
"text": "2 inches",
"type": "builtin.dimension"
}
]
},
"a": [
{
"number": 68,
"unit": "Degree"
}
],
"arrive": [
{
"type": "time",
"values": [
{
"timex": "T17",
"value": "17:00:00"
}
]
}
],
"b": [
{
"number": 72,
"unit": "Degree"
}
],
"begin": [
{
"number": 6,
"unit": "Year"
}
],
"buy": [
"kb922"
],
"Buyer": [
[
"Delta"
]
],
"Composite1": [
{
"$instance": {
"datetimeV2": [
{
"length": 7,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 65,
"text": "6 years",
"type": "builtin.datetimeV2.duration"
},
{
"length": 7,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 81,
"text": "8 years",
"type": "builtin.datetimeV2.duration"
}
],
"number": [
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 0,
"text": "3",
"type": "builtin.number"
},
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 17,
"text": "2",
"type": "builtin.number"
},
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 35,
"text": "5",
"type": "builtin.number"
},
{
"length": 2,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 41,
"text": "10",
"type": "builtin.number"
},
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 65,
"text": "6",
"type": "builtin.number"
},
{
"length": 1,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 81,
"text": "8",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 136,
"text": "425",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 140,
"text": "777",
"type": "builtin.number"
},
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 144,
"text": "1212",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 152,
"text": "206",
"type": "builtin.number"
},
{
"length": 3,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 156,
"text": "666",
"type": "builtin.number"
},
{
"length": 4,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 160,
"text": "4123",
"type": "builtin.number"
}
]
},
"datetimeV2": [
{
"type": "duration",
"values": [
{
"timex": "P6Y",
"value": "189216000"
}
]
},
{
"type": "duration",
"values": [
{
"timex": "P8Y",
"value": "252288000"
}
]
}
],
"number": [
3,
2,
5,
10,
6,
8,
425,
777,
1212,
206,
666,
4123
]
}
],
"Composite2": [
{
"$instance": {
"url": [
{
"length": 15,
"modelType": "Prebuilt Entity Extractor",
"modelTypeId": 2,
"recognitionSources": [
"model"
],
"startIndex": 264,
"text": "http://blah.com",
"type": "builtin.url"
}
]
},
"url": [
"http://blah.com"
]
}
],
"destination": [
"redmond"
],
"dimension": [
{
"number": 3,
"unit": "Picometer"
},
{
"number": 5,
"unit": "Picometer"
}
],
"end": [
{
"number": 8,
"unit": "Year"
}
],
"geographyV2": [
{
"type": "state",
"value": "hawaii"
},
{
"type": "city",
"value": "redmond"
}
],
"leave": [
{
"type": "time",
"values": [
{
"timex": "T15",
"value": "15:00:00"
}
]
}
],
"length": [
{
"number": 3,
"unit": "Inch"
}
],
"likee": [
"mary"
],
"liker": [
"john"
],
"max": [
{
"number": 500,
"unit": "Dollar"
}
],
"maximum": [
10
],
"min": [
{
"number": 400,
"unit": "Dollar"
}
],
"minimum": [
5
],
"newPhone": [
"206-666-4123"
],
"number": [
68,
72,
400,
500
],
"old": [
"425-777-1212"
],
"oldURL": [
"http://foo.com"
],
"personName": [
"john",
"mary"
],
"receiver": [
"[email protected]"
],
"sell": [
"kb457"
],
"Seller": [
[
"Virgin"
]
],
"sender": [
"[email protected]"
],
"source": [
"hawaii"
],
"width": [
{
"number": 2,
"unit": "Inch"
}
]
},
"intents": {
"Cancel": {
"score": 4.50860341e-7
},
"Delivery": {
"score": 0.00007978094
},
"EntityTests": {
"score": 0.0046325135
},
"Greeting": {
"score": 4.73494453e-7
},
"Help": {
"score": 7.622754e-7
},
"None": {
"score": 0.00093744183
},
"Roles": {
"score": 1
},
"search": {
"score": 0.07635335
},
"SpecifyName": {
"score": 0.00009136085
},
"Travel": {
"score": 0.00771805458
},
"Weather.GetForecast": {
"score": 0.0100867962
}
},
"normalizedQuery": "3 inches long by 2 inches wide and 5% to 10% and are you between 6 years old and 8 years old and can i trade kb457 for kb922 and change 425-777-1212 to 206-666-4123 and did delta buy virgin and did the rain from hawaii get to redmond and http://foo.com changed to http://blah.com and i like between 68 degrees and 72 degrees and john likes mary and leave 3pm and arrive 5pm and pay between $400 and $500 and send [email protected] from [email protected]",
"sentiment": {
"label": "neutral",
"score": 0.5
},
"topIntent": "Roles"
},
"query": "3 inches long by 2 inches wide and 5% to 10% and are you between 6 years old and 8 years old and can i trade kb457 for kb922 and change 425-777-1212 to 206-666-4123 and did delta buy virgin and did the rain from hawaii get to redmond and http://foo.com changed to http://blah.com and i like between 68 degrees and 72 degrees and john likes mary and leave 3pm and arrive 5pm and pay between $400 and $500 and send [email protected] from [email protected]"
},
"options": {
"includeAllIntents": true,
"includeAPIResults": true,
"includeInstanceData": true,
"log": true,
"preferExternalEntities": true,
"slot": "production"
}
}
}
|
botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/roles_v3.json/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-ai/tests/luis/test_data/roles_v3.json",
"repo_id": "botbuilder-python",
"token_count": 30318
}
| 383 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Flask Telemetry Bot Middleware."""
from io import BytesIO
from threading import current_thread
# Map of thread id => POST body text
_REQUEST_BODIES = {}
def retrieve_flask_body():
"""retrieve_flask_body
Retrieve the POST body text from temporary cache.
The POST body corresponds with the thread id and should resides in
cache just for lifetime of request.
"""
result = _REQUEST_BODIES.pop(current_thread().ident, None)
return result
class BotTelemetryMiddleware:
"""Bot Telemetry Middleware
Save off the POST body to later populate bot-specific properties to
add to Application Insights.
Example adding telemetry middleware to Flask:
app = Flask(__name__)
app.wsgi_app = BotTelemetryMiddleware(app.wsgi_app)
"""
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
self.process_request(environ)
return self.app(environ, start_response)
def process_request(self, environ) -> bool:
"""Process the incoming Flask request."""
# Bot Service doesn't handle anything over 256k
length = int(environ.get("CONTENT_LENGTH", "0"))
if length > 256 * 1024:
print(f"request too long - rejected")
else:
body_bytes = environ["wsgi.input"].read(length)
environ["wsgi.input"] = BytesIO(body_bytes)
body_unicode = body_bytes.decode("utf-8")
# Sanity check JSON
if body_unicode is not None:
# Integration layer expecting just the json text.
_REQUEST_BODIES[current_thread().ident] = body_unicode
return True
|
botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/flask/flask_telemetry_middleware.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-applicationinsights/botbuilder/applicationinsights/flask/flask_telemetry_middleware.py",
"repo_id": "botbuilder-python",
"token_count": 656
}
| 384 |
"""Implements a CosmosDB based storage provider.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from hashlib import sha256
from typing import Dict, List
from threading import Semaphore
import json
import warnings
from jsonpickle.pickler import Pickler
from jsonpickle.unpickler import Unpickler
import azure.cosmos.cosmos_client as cosmos_client # pylint: disable=no-name-in-module,import-error
import azure.cosmos.errors as cosmos_errors # pylint: disable=no-name-in-module,import-error
from botbuilder.core.storage import Storage
class CosmosDbConfig:
"""The class for CosmosDB configuration for the Azure Bot Framework."""
def __init__(
self,
endpoint: str = None,
masterkey: str = None,
database: str = None,
container: str = None,
partition_key: str = None,
database_creation_options: dict = None,
container_creation_options: dict = None,
**kwargs,
):
"""Create the Config object.
:param endpoint:
:param masterkey:
:param database:
:param container:
:param filename:
:return CosmosDbConfig:
"""
self.__config_file = kwargs.get("filename")
if self.__config_file:
kwargs = json.load(open(self.__config_file))
self.endpoint = endpoint or kwargs.get("endpoint")
self.masterkey = masterkey or kwargs.get("masterkey")
self.database = database or kwargs.get("database", "bot_db")
self.container = container or kwargs.get("container", "bot_container")
self.partition_key = partition_key or kwargs.get("partition_key")
self.database_creation_options = database_creation_options or kwargs.get(
"database_creation_options"
)
self.container_creation_options = container_creation_options or kwargs.get(
"container_creation_options"
)
class CosmosDbKeyEscape:
@staticmethod
def sanitize_key(
key: str, key_suffix: str = "", compatibility_mode: bool = True
) -> str:
"""Return the sanitized key.
Replace characters that are not allowed in keys in Cosmos.
:param key: The provided key to be escaped.
:param key_suffix: The string to add a the end of all RowKeys.
:param compatibility_mode: True if keys should be truncated in order to support previous CosmosDb
max key length of 255. This behavior can be overridden by setting
cosmosdb_partitioned_config.compatibility_mode to False.
:return str:
"""
# forbidden characters
bad_chars = ["\\", "?", "/", "#", "\t", "\n", "\r", "*"]
# replace those with with '*' and the
# Unicode code point of the character and return the new string
key = "".join(map(lambda x: "*" + str(ord(x)) if x in bad_chars else x, key))
if key_suffix is None:
key_suffix = ""
return CosmosDbKeyEscape.truncate_key(f"{key}{key_suffix}", compatibility_mode)
@staticmethod
def truncate_key(key: str, compatibility_mode: bool = True) -> str:
max_key_len = 255
if not compatibility_mode:
return key
if len(key) > max_key_len:
aux_hash = sha256(key.encode("utf-8"))
aux_hex = aux_hash.hexdigest()
key = key[0 : max_key_len - len(aux_hex)] + aux_hex
return key
class CosmosDbStorage(Storage):
"""A CosmosDB based storage provider for a bot."""
def __init__(
self, config: CosmosDbConfig, client: cosmos_client.CosmosClient = None
):
"""Create the storage object.
:param config:
"""
super(CosmosDbStorage, self).__init__()
warnings.warn(
"CosmosDbStorage is obsolete. Use CosmosDbPartitionedStorage instead."
)
self.config = config
self.client = client or cosmos_client.CosmosClient(
self.config.endpoint, {"masterKey": self.config.masterkey}
)
# these are set by the functions that check
# the presence of the database and container or creates them
self.database = None
self.container = None
self._database_creation_options = config.database_creation_options
self._container_creation_options = config.container_creation_options
self.__semaphore = Semaphore()
async def read(self, keys: List[str]) -> Dict[str, object]:
"""Read storeitems from storage.
:param keys:
:return dict:
"""
try:
# check if the database and container exists and if not create
if not self.__container_exists:
self.__create_db_and_container()
if keys:
# create the parameters object
parameters = [
{
"name": f"@id{i}",
"value": f"{CosmosDbKeyEscape.sanitize_key(key)}",
}
for i, key in enumerate(keys)
]
# get the names of the params
parameter_sequence = ",".join(param.get("name") for param in parameters)
# create the query
query = {
"query": f"SELECT c.id, c.realId, c.document, c._etag FROM c WHERE c.id in ({parameter_sequence})",
"parameters": parameters,
}
if self.config.partition_key:
options = {"partitionKey": self.config.partition_key}
else:
options = {"enableCrossPartitionQuery": True}
# run the query and store the results as a list
results = list(
self.client.QueryItems(self.__container_link, query, options)
)
# return a dict with a key and an object
return {r.get("realId"): self.__create_si(r) for r in results}
# No keys passed in, no result to return.
return {}
except TypeError as error:
raise error
async def write(self, changes: Dict[str, object]):
"""Save storeitems to storage.
:param changes:
:return:
"""
if changes is None:
raise Exception("Changes are required when writing")
if not changes:
return
try:
# check if the database and container exists and if not create
if not self.__container_exists:
self.__create_db_and_container()
# iterate over the changes
for key, change in changes.items():
# store the e_tag
e_tag = None
if isinstance(change, dict):
e_tag = change.get("e_tag", None)
elif hasattr(change, "e_tag"):
e_tag = change.e_tag
# create the new document
doc = {
"id": CosmosDbKeyEscape.sanitize_key(key),
"realId": key,
"document": self.__create_dict(change),
}
if e_tag == "":
raise Exception("cosmosdb_storage.write(): etag missing")
# the e_tag will be * for new docs so do an insert
if e_tag == "*" or not e_tag:
self.client.UpsertItem(
database_or_Container_link=self.__container_link,
document=doc,
options={"disableAutomaticIdGeneration": True},
)
# if we have an etag, do opt. concurrency replace
elif e_tag:
access_condition = {"type": "IfMatch", "condition": e_tag}
self.client.ReplaceItem(
document_link=self.__item_link(
CosmosDbKeyEscape.sanitize_key(key)
),
new_document=doc,
options={"accessCondition": access_condition},
)
except Exception as error:
raise error
async def delete(self, keys: List[str]):
"""Remove storeitems from storage.
:param keys:
:return:
"""
try:
# check if the database and container exists and if not create
if not self.__container_exists:
self.__create_db_and_container()
options = {}
if self.config.partition_key:
options["partitionKey"] = self.config.partition_key
# call the function for each key
for key in keys:
self.client.DeleteItem(
document_link=self.__item_link(CosmosDbKeyEscape.sanitize_key(key)),
options=options,
)
# print(res)
except cosmos_errors.HTTPFailure as http_failure:
# print(h.status_code)
if http_failure.status_code != 404:
raise http_failure
except TypeError as error:
raise error
def __create_si(self, result) -> object:
"""Create an object from a result out of CosmosDB.
:param result:
:return object:
"""
# get the document item from the result and turn into a dict
doc = result.get("document")
# read the e_tag from Cosmos
if result.get("_etag"):
doc["e_tag"] = result["_etag"]
result_obj = Unpickler().restore(doc)
# create and return the object
return result_obj
def __create_dict(self, store_item: object) -> Dict:
"""Return the dict of an object.
This eliminates non_magic attributes and the e_tag.
:param store_item:
:return dict:
"""
# read the content
json_dict = Pickler().flatten(store_item)
if "e_tag" in json_dict:
del json_dict["e_tag"]
# loop through attributes and write and return a dict
return json_dict
def __item_link(self, identifier) -> str:
"""Return the item link of a item in the container.
:param identifier:
:return str:
"""
return self.__container_link + "/docs/" + identifier
@property
def __container_link(self) -> str:
"""Return the container link in the database.
:param:
:return str:
"""
return self.__database_link + "/colls/" + self.container
@property
def __database_link(self) -> str:
"""Return the database link.
:return str:
"""
return "dbs/" + self.database
@property
def __container_exists(self) -> bool:
"""Return whether the database and container have been created.
:return bool:
"""
return self.database and self.container
def __create_db_and_container(self):
"""Call the get or create methods."""
with self.__semaphore:
db_id = self.config.database
container_name = self.config.container
self.database = self._get_or_create_database(self.client, db_id)
self.container = self._get_or_create_container(self.client, container_name)
def _get_or_create_database( # pylint: disable=invalid-name
self, doc_client, id
) -> str:
"""Return the database link.
Check if the database exists or create the database.
:param doc_client:
:param id:
:return str:
"""
# query CosmosDB for a database with that name/id
dbs = list(
doc_client.QueryDatabases(
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [{"name": "@id", "value": id}],
}
)
)
# if there are results, return the first (database names are unique)
if dbs:
return dbs[0]["id"]
# create the database if it didn't exist
res = doc_client.CreateDatabase({"id": id}, self._database_creation_options)
return res["id"]
def _get_or_create_container(self, doc_client, container) -> str:
"""Return the container link.
Check if the container exists or create the container.
:param doc_client:
:param container:
:return str:
"""
# query CosmosDB for a container in the database with that name
containers = list(
doc_client.QueryContainers(
self.__database_link,
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [{"name": "@id", "value": container}],
},
)
)
# if there are results, return the first (container names are unique)
if containers:
return containers[0]["id"]
# Create a container if it didn't exist
res = doc_client.CreateContainer(
self.__database_link, {"id": container}, self._container_creation_options
)
return res["id"]
|
botbuilder-python/libraries/botbuilder-azure/botbuilder/azure/cosmosdb_storage.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-azure/botbuilder/azure/cosmosdb_storage.py",
"repo_id": "botbuilder-python",
"token_count": 6122
}
| 385 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
from .turn_context import TurnContext
class Bot(ABC):
"""
Represents a bot that can operate on incoming activities.
"""
@abstractmethod
async def on_turn(self, context: TurnContext):
"""
When implemented in a bot, handles an incoming activity.
:param context: The context object for this turn.
:return:
"""
raise NotImplementedError()
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/bot.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/bot.py",
"repo_id": "botbuilder-python",
"token_count": 180
}
| 386 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Any
from botbuilder.core import TurnContext
from botbuilder.schema import Activity, ConversationReference
from botframework.connector.aio import ConnectorClient
from botframework.connector.auth import MicrosoftAppCredentials
class InspectionSession:
def __init__(
self,
conversation_reference: ConversationReference,
credentials: MicrosoftAppCredentials,
):
self._conversation_reference = conversation_reference
self._connector_client = ConnectorClient(
credentials, base_url=conversation_reference.service_url
)
async def send(self, activity: Activity) -> Any:
TurnContext.apply_conversation_reference(activity, self._conversation_reference)
try:
await self._connector_client.conversations.send_to_conversation(
activity.conversation.id, activity
)
except Exception:
return False
return True
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/inspection/inspection_session.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/inspection/inspection_session.py",
"repo_id": "botbuilder-python",
"token_count": 374
}
| 387 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC, abstractmethod
from botframework.connector import ConnectorClient
from botframework.connector.auth import ClaimsIdentity
class ConnectorClientBuilder(ABC):
"""
Abstraction to build connector clients.
"""
@abstractmethod
async def create_connector_client(
self, service_url: str, identity: ClaimsIdentity = None, audience: str = None
) -> ConnectorClient:
"""
Creates the connector client asynchronous.
:param service_url: The service URL.
:param identity: The claims claimsIdentity.
:param audience: The target audience for the connector.
:return: ConnectorClient instance
"""
raise NotImplementedError()
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/oauth/connector_client_builder.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/oauth/connector_client_builder.py",
"repo_id": "botbuilder-python",
"token_count": 265
}
| 388 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from logging import Logger
from botbuilder.core import BotAdapter, Bot, CloudChannelServiceHandler
from botbuilder.schema import Activity, ResourceResponse
from botframework.connector.auth import BotFrameworkAuthentication, ClaimsIdentity
from .conversation_id_factory import ConversationIdFactoryBase
from .skill_handler import SkillHandler
from ._skill_handler_impl import _SkillHandlerImpl
class CloudSkillHandler(CloudChannelServiceHandler):
SKILL_CONVERSATION_REFERENCE_KEY = SkillHandler.SKILL_CONVERSATION_REFERENCE_KEY
def __init__(
self,
adapter: BotAdapter,
bot: Bot,
conversation_id_factory: ConversationIdFactoryBase,
auth: BotFrameworkAuthentication,
logger: Logger = None,
):
super().__init__(auth)
if not adapter:
raise TypeError("adapter can't be None")
if not bot:
raise TypeError("bot can't be None")
if not conversation_id_factory:
raise TypeError("conversation_id_factory can't be None")
self._inner = _SkillHandlerImpl(
self.SKILL_CONVERSATION_REFERENCE_KEY,
adapter,
bot,
conversation_id_factory,
auth.get_originating_audience,
logger,
)
async def on_send_to_conversation(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity: Activity,
) -> ResourceResponse:
"""
send_to_conversation() API for Skill
This method allows you to send an activity to the end of a conversation.
This is slightly different from ReplyToActivity().
* SendToConversation(conversation_id) - will append the activity to the end
of the conversation according to the timestamp or semantics of the channel.
* ReplyToActivity(conversation_id,ActivityId) - adds the activity as a reply
to another activity, if the channel supports it. If the channel does not
support nested replies, ReplyToActivity falls back to SendToConversation.
Use ReplyToActivity when replying to a specific activity in the
conversation.
Use SendToConversation in all other cases.
:param claims_identity: Claims identity for the bot.
:type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`
:param conversation_id:The conversation ID.
:type conversation_id: str
:param activity: Activity to send.
:type activity: Activity
:return:
"""
return await self._inner.on_send_to_conversation(
claims_identity,
conversation_id,
activity,
)
async def on_reply_to_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
activity: Activity,
) -> ResourceResponse:
"""
reply_to_activity() API for Skill.
This method allows you to reply to an activity.
This is slightly different from SendToConversation().
* SendToConversation(conversation_id) - will append the activity to the end
of the conversation according to the timestamp or semantics of the channel.
* ReplyToActivity(conversation_id,ActivityId) - adds the activity as a reply
to another activity, if the channel supports it. If the channel does not
support nested replies, ReplyToActivity falls back to SendToConversation.
Use ReplyToActivity when replying to a specific activity in the
conversation.
Use SendToConversation in all other cases.
:param claims_identity: Claims identity for the bot.
:type claims_identity: :class:`botframework.connector.auth.ClaimsIdentity`
:param conversation_id:The conversation ID.
:type conversation_id: str
:param activity_id: Activity ID to send.
:type activity_id: str
:param activity: Activity to send.
:type activity: Activity
:return:
"""
return await self._inner.on_reply_to_activity(
claims_identity,
conversation_id,
activity_id,
activity,
)
async def on_delete_activity(
self, claims_identity: ClaimsIdentity, conversation_id: str, activity_id: str
):
await self._inner.on_delete_activity(
claims_identity, conversation_id, activity_id
)
async def on_update_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
activity: Activity,
) -> ResourceResponse:
return await self._inner.on_update_activity(
claims_identity, conversation_id, activity_id, activity
)
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/skills/cloud_skill_handler.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/skills/cloud_skill_handler.py",
"repo_id": "botbuilder-python",
"token_count": 1897
}
| 389 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.schema import Activity
from botbuilder.schema.teams import (
NotificationInfo,
TeamsChannelData,
TeamInfo,
TeamsMeetingInfo,
)
def teams_get_channel_data(activity: Activity) -> TeamsChannelData:
if not activity:
return None
if activity.channel_data:
return TeamsChannelData().deserialize(activity.channel_data)
return None
def teams_get_channel_id(activity: Activity) -> str:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.channel.id if channel_data.channel else None
return None
def teams_get_team_info(activity: Activity) -> TeamInfo:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.team
return None
def teams_notify_user(
activity: Activity, alert_in_meeting: bool = None, external_resource_url: str = None
):
if not activity:
return
if not activity.channel_data:
activity.channel_data = {}
channel_data = TeamsChannelData().deserialize(activity.channel_data)
channel_data.notification = NotificationInfo(alert=True)
channel_data.notification.alert_in_meeting = alert_in_meeting
channel_data.notification.external_resource_url = external_resource_url
activity.channel_data = channel_data
def teams_get_meeting_info(activity: Activity) -> TeamsMeetingInfo:
if not activity:
return None
if activity.channel_data:
channel_data = TeamsChannelData().deserialize(activity.channel_data)
return channel_data.meeting
return None
|
botbuilder-python/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/botbuilder/core/teams/teams_activity_extensions.py",
"repo_id": "botbuilder-python",
"token_count": 632
}
| 390 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from typing import List, Tuple, Awaitable, Callable
from botbuilder.core import BotAdapter, TurnContext
from botbuilder.schema import (
Activity,
ConversationReference,
ResourceResponse,
ConversationParameters,
)
from botbuilder.schema.teams import TeamsChannelAccount
class SimpleAdapter(BotAdapter):
# pylint: disable=unused-argument
def __init__(
self,
call_on_send=None,
call_on_update=None,
call_on_delete=None,
call_create_conversation=None,
):
super(SimpleAdapter, self).__init__()
self.test_aux = unittest.TestCase("__init__")
self._call_on_send = call_on_send
self._call_on_update = call_on_update
self._call_on_delete = call_on_delete
self._call_create_conversation = call_create_conversation
async def delete_activity(
self, context: TurnContext, reference: ConversationReference
):
self.test_aux.assertIsNotNone(
reference, "SimpleAdapter.delete_activity: missing reference"
)
if self._call_on_delete is not None:
self._call_on_delete(reference)
async def send_activities(
self, context: TurnContext, activities: List[Activity]
) -> List[ResourceResponse]:
self.test_aux.assertIsNotNone(
activities, "SimpleAdapter.delete_activity: missing reference"
)
self.test_aux.assertTrue(
len(activities) > 0,
"SimpleAdapter.send_activities: empty activities array.",
)
if self._call_on_send is not None:
self._call_on_send(activities)
responses = []
for activity in activities:
responses.append(ResourceResponse(id=activity.id))
return responses
async def create_conversation( # pylint: disable=arguments-differ
self,
reference: ConversationReference,
logic: Callable[[TurnContext], Awaitable] = None,
conversation_parameters: ConversationParameters = None,
) -> Tuple[ConversationReference, str]:
if self._call_create_conversation is not None:
self._call_create_conversation()
async def update_activity(self, context: TurnContext, activity: Activity):
self.test_aux.assertIsNotNone(
activity, "SimpleAdapter.update_activity: missing activity"
)
if self._call_on_update is not None:
self._call_on_update(activity)
return ResourceResponse(id=activity.id)
async def process_request(self, activity, handler):
context = TurnContext(self, activity)
return await self.run_pipeline(context, handler)
async def create_connector_client(self, service_url: str):
return TestConnectorClient()
class TestConnectorClient:
def __init__(self) -> None:
self.conversations = TestConversations()
class TestConversations:
async def get_conversation_member( # pylint: disable=unused-argument
self, conversation_id, member_id
):
return TeamsChannelAccount(id=member_id)
|
botbuilder-python/libraries/botbuilder-core/tests/simple_adapter.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/tests/simple_adapter.py",
"repo_id": "botbuilder-python",
"token_count": 1237
}
| 391 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.core import ChannelServiceHandler
from botframework.connector.auth import (
AuthenticationConfiguration,
ClaimsIdentity,
SimpleCredentialProvider,
JwtTokenValidation,
AuthenticationConstants,
)
import botbuilder.schema
class TestChannelServiceHandler(ChannelServiceHandler):
def __init__(self):
self.claims_identity = None
ChannelServiceHandler.__init__(
self, SimpleCredentialProvider("", ""), AuthenticationConfiguration()
)
async def on_reply_to_activity(
self,
claims_identity: ClaimsIdentity,
conversation_id: str,
activity_id: str,
activity: botbuilder.schema.Activity,
) -> botbuilder.schema.ResourceResponse:
self.claims_identity = claims_identity
return botbuilder.schema.ResourceResponse()
class ChannelServiceHandlerTests(aiounittest.AsyncTestCase):
async def test_should_authenticate_anonymous_skill_claim(self):
sut = TestChannelServiceHandler()
await sut.handle_reply_to_activity(None, "123", "456", {})
assert (
sut.claims_identity.authentication_type
== AuthenticationConstants.ANONYMOUS_AUTH_TYPE
)
assert (
JwtTokenValidation.get_app_id_from_claims(sut.claims_identity.claims)
== AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
)
|
botbuilder-python/libraries/botbuilder-core/tests/test_channel_service_handler.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-core/tests/test_channel_service_handler.py",
"repo_id": "botbuilder-python",
"token_count": 577
}
| 392 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class ModelResult:
"""Contains recognition result information."""
def __init__(
self, text: str, start: int, end: int, type_name: str, resolution: object
):
"""
Parameters:
----------
text: Substring of the utterance that was recognized.
start: Start character position of the recognized substring.
end: The end character position of the recognized substring.
type_name: The type of the entity that was recognized.
resolution: The recognized entity object.
"""
self.text = text
self.start = start
self.end = end
self.type_name = type_name
self.resolution = resolution
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/model_result.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/choices/model_result.py",
"repo_id": "botbuilder-python",
"token_count": 286
}
| 393 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from .dialog_instance import DialogInstance
class DialogState:
"""
Contains state information for the dialog stack.
"""
def __init__(self, stack: List[DialogInstance] = None):
"""
Initializes a new instance of the :class:`DialogState` class.
:param stack: The state information to initialize the stack with.
:type stack: :class:`typing.List`
"""
if stack is None:
self._dialog_stack = []
else:
self._dialog_stack = stack
@property
def dialog_stack(self):
"""
Initializes a new instance of the :class:`DialogState` class.
:return: The state information to initialize the stack with.
:rtype: :class:`typing.List`
"""
return self._dialog_stack
def __str__(self):
if not self._dialog_stack:
return "dialog stack empty!"
return " ".join(map(str, self._dialog_stack))
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_state.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/dialog_state.py",
"repo_id": "botbuilder-python",
"token_count": 421
}
| 394 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .alias_path_resolver import AliasPathResolver
class HashPathResolver(AliasPathResolver):
def __init__(self):
super().__init__(alias="#", prefix="turn.recognized.intents.")
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/path_resolvers/hash_path_resolver.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/memory/path_resolvers/hash_path_resolver.py",
"repo_id": "botbuilder-python",
"token_count": 88
}
| 395 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import Dict
from .persisted_state_keys import PersistedStateKeys
class PersistedState:
def __init__(self, keys: PersistedStateKeys = None, data: Dict[str, object] = None):
if keys and data:
self.user_state: Dict[str, object] = (
data[keys.user_state] if keys.user_state in data else {}
)
self.conversation_state: Dict[str, object] = (
data[keys.conversation_state] if keys.conversation_state in data else {}
)
else:
self.user_state: Dict[str, object] = {}
self.conversation_state: Dict[str, object] = {}
|
botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/persisted_state.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/botbuilder/dialogs/persisted_state.py",
"repo_id": "botbuilder-python",
"token_count": 315
}
| 396 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
from botbuilder.dialogs.choices import ChoiceFactoryOptions
class ChoiceFactoryOptionsTest(unittest.TestCase):
def test_inline_separator_round_trips(self) -> None:
choice_factor_options = ChoiceFactoryOptions()
expected = ", "
choice_factor_options.inline_separator = expected
self.assertEqual(expected, choice_factor_options.inline_separator)
def test_inline_or_round_trips(self) -> None:
choice_factor_options = ChoiceFactoryOptions()
expected = " or "
choice_factor_options.inline_or = expected
self.assertEqual(expected, choice_factor_options.inline_or)
def test_inline_or_more_round_trips(self) -> None:
choice_factor_options = ChoiceFactoryOptions()
expected = ", or "
choice_factor_options.inline_or_more = expected
self.assertEqual(expected, choice_factor_options.inline_or_more)
def test_include_numbers_round_trips(self) -> None:
choice_factor_options = ChoiceFactoryOptions()
expected = True
choice_factor_options.include_numbers = expected
self.assertEqual(expected, choice_factor_options.include_numbers)
|
botbuilder-python/libraries/botbuilder-dialogs/tests/choices/test_choice_factory_options.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/tests/choices/test_choice_factory_options.py",
"repo_id": "botbuilder-python",
"token_count": 454
}
| 397 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import aiounittest
from botbuilder.dialogs import DialogSet
from botbuilder.core import MemoryStorage, ConversationState
class PromptValidatorContextTests(aiounittest.AsyncTestCase):
async def test_prompt_validator_context_end(self):
storage = MemoryStorage()
conv = ConversationState(storage)
accessor = conv.create_property("dialogstate")
dialog_set = DialogSet(accessor)
self.assertNotEqual(dialog_set, None)
# TODO: Add TestFlow
def test_prompt_validator_context_retry_end(self):
storage = MemoryStorage()
conv = ConversationState(storage)
accessor = conv.create_property("dialogstate")
dialog_set = DialogSet(accessor)
self.assertNotEqual(dialog_set, None)
# TODO: Add TestFlow
# All require Testflow!
|
botbuilder-python/libraries/botbuilder-dialogs/tests/test_prompt_validator_context.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-dialogs/tests/test_prompt_validator_context.py",
"repo_id": "botbuilder-python",
"token_count": 333
}
| 398 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from logging import Logger
from botbuilder.core import InvokeResponse
from botbuilder.integration.aiohttp import BotFrameworkHttpClient
from botbuilder.core.skills import (
ConversationIdFactoryBase,
SkillConversationIdFactoryOptions,
BotFrameworkSkill,
)
from botbuilder.schema import Activity
from botframework.connector.auth import (
AuthenticationConstants,
ChannelProvider,
GovernmentConstants,
SimpleCredentialProvider,
)
class SkillHttpClient(BotFrameworkHttpClient):
def __init__(
self,
credential_provider: SimpleCredentialProvider,
skill_conversation_id_factory: ConversationIdFactoryBase,
channel_provider: ChannelProvider = None,
logger: Logger = None,
):
if not skill_conversation_id_factory:
raise TypeError(
"SkillHttpClient(): skill_conversation_id_factory can't be None"
)
super().__init__(credential_provider)
self._skill_conversation_id_factory = skill_conversation_id_factory
self._channel_provider = channel_provider
async def post_activity_to_skill(
self,
from_bot_id: str,
to_skill: BotFrameworkSkill,
service_url: str,
activity: Activity,
originating_audience: str = None,
) -> InvokeResponse:
if originating_audience is None:
originating_audience = (
GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
if self._channel_provider is not None
and self._channel_provider.is_government()
else AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
)
options = SkillConversationIdFactoryOptions(
from_bot_oauth_scope=originating_audience,
from_bot_id=from_bot_id,
activity=activity,
bot_framework_skill=to_skill,
)
skill_conversation_id = (
await self._skill_conversation_id_factory.create_skill_conversation_id(
options
)
)
return await super().post_activity(
from_bot_id,
to_skill.app_id,
to_skill.skill_endpoint,
service_url,
skill_conversation_id,
activity,
)
|
botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/skills/skill_http_client.py",
"repo_id": "botbuilder-python",
"token_count": 1052
}
| 399 |
include *.rst
include azure_bdist_wheel.py
|
botbuilder-python/libraries/botbuilder-schema/MANIFEST.in/0
|
{
"file_path": "botbuilder-python/libraries/botbuilder-schema/MANIFEST.in",
"repo_id": "botbuilder-python",
"token_count": 15
}
| 400 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import requests
from msrest.authentication import Authentication
from .authentication_constants import AuthenticationConstants
class AppCredentials(Authentication):
"""
Base class for token retrieval. Subclasses MUST override get_access_token in
order to supply a valid token for the specific credentials.
"""
schema = "Bearer"
cache = {}
__tenant = None
def __init__(
self,
app_id: str = None,
channel_auth_tenant: str = None,
oauth_scope: str = None,
):
"""
Initializes a new instance of MicrosoftAppCredentials class
:param channel_auth_tenant: Optional. The oauth token tenant.
"""
self.microsoft_app_id = app_id
self.tenant = channel_auth_tenant
self.oauth_endpoint = (
self._get_to_channel_from_bot_loginurl_prefix() + self.tenant
)
self.oauth_scope = oauth_scope or self._get_to_channel_from_bot_oauthscope()
def _get_default_channelauth_tenant(self) -> str:
return AuthenticationConstants.DEFAULT_CHANNEL_AUTH_TENANT
def _get_to_channel_from_bot_loginurl_prefix(self) -> str:
return AuthenticationConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX
def _get_to_channel_from_bot_oauthscope(self) -> str:
return AuthenticationConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE
@property
def tenant(self) -> str:
return self.__tenant
@tenant.setter
def tenant(self, value: str):
self.__tenant = value or self._get_default_channelauth_tenant()
@staticmethod
def trust_service_url(service_url: str, expiration=None):
"""
Obsolete: trust_service_url is not a required part of the security model.
Checks if the service url is for a trusted host or not.
:param service_url: The service url.
:param expiration: The expiration time after which this service url is not trusted anymore.
"""
@staticmethod
def is_trusted_service(service_url: str) -> bool: # pylint: disable=unused-argument
"""
Obsolete: is_trusted_service is not a required part of the security model.
Checks if the service url is for a trusted host or not.
:param service_url: The service url.
:returns: True if the host of the service url is trusted; False otherwise.
"""
return True
@staticmethod
def _is_trusted_url(host: str) -> bool: # pylint: disable=unused-argument
"""
Obsolete: _is_trusted_url is not a required part of the security model.
"""
return True
# pylint: disable=arguments-differ
def signed_session(self, session: requests.Session = None) -> requests.Session:
"""
Gets the signed session. This is called by the msrest package
:returns: Signed requests.Session object
"""
if not session:
session = requests.Session()
if not self._should_set_token(session):
session.headers.pop("Authorization", None)
else:
auth_token = self.get_access_token()
header = "{} {}".format("Bearer", auth_token)
session.headers["Authorization"] = header
return session
def _should_set_token(
self, session: requests.Session # pylint: disable=unused-argument
) -> bool:
# We don't set the token if the AppId is not set, since it means that we are in an un-authenticated scenario.
return (
self.microsoft_app_id != AuthenticationConstants.ANONYMOUS_SKILL_APP_ID
and self.microsoft_app_id
)
def get_access_token(self, force_refresh: bool = False) -> str:
"""
Returns a token for the current AppCredentials.
:return: The token
"""
raise NotImplementedError()
|
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/app_credentials.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/app_credentials.py",
"repo_id": "botbuilder-python",
"token_count": 1548
}
| 401 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from abc import ABC
class GovernmentConstants(ABC):
"""
Government Channel Service property value
"""
CHANNEL_SERVICE = "https://botframework.azure.us"
"""
TO CHANNEL FROM BOT: Login URL
DEPRECATED: DO NOT USE
"""
TO_CHANNEL_FROM_BOT_LOGIN_URL = (
"https://login.microsoftonline.us/MicrosoftServices.onmicrosoft.us"
)
"""
TO CHANNEL FROM BOT: Login URL prefix
"""
TO_CHANNEL_FROM_BOT_LOGIN_URL_PREFIX = "https://login.microsoftonline.us/"
DEFAULT_CHANNEL_AUTH_TENANT = "MicrosoftServices.onmicrosoft.us"
"""
TO CHANNEL FROM BOT: OAuth scope to request
"""
TO_CHANNEL_FROM_BOT_OAUTH_SCOPE = "https://api.botframework.us"
"""
TO BOT FROM CHANNEL: Token issuer
"""
TO_BOT_FROM_CHANNEL_TOKEN_ISSUER = "https://api.botframework.us"
"""
OAuth Url used to get a token from OAuthApiClient.
"""
OAUTH_URL_GOV = "https://api.botframework.azure.us"
"""
TO BOT FROM CHANNEL: OpenID metadata document for tokens coming from MSA
"""
TO_BOT_FROM_CHANNEL_OPENID_METADATA_URL = (
"https://login.botframework.azure.us/v1/.well-known/openidconfiguration"
)
"""
TO BOT FROM GOV EMULATOR: OpenID metadata document for tokens coming from MSA
"""
TO_BOT_FROM_EMULATOR_OPENID_METADATA_URL = (
"https://login.microsoftonline.us/"
"cab8a31a-1906-4287-a0d8-4eef66b95f6e/v2.0/"
".well-known/openid-configuration"
)
|
botbuilder-python/libraries/botframework-connector/botframework/connector/auth/government_constants.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/auth/government_constants.py",
"repo_id": "botbuilder-python",
"token_count": 668
}
| 402 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from .http_client_base import HttpClientBase
class HttpClientFactory:
def create_client(self) -> HttpClientBase:
pass
|
botbuilder-python/libraries/botframework-connector/botframework/connector/http_client_factory.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/http_client_factory.py",
"repo_id": "botbuilder-python",
"token_count": 69
}
| 403 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import TokenApiClientConfiguration
from .operations._bot_sign_in_operations import BotSignInOperations
from .operations._user_token_operations import UserTokenOperations
from . import models
class TokenApiClient(SDKClient):
"""TokenApiClient
:ivar config: Configuration for client.
:vartype config: TokenApiClientConfiguration
:ivar bot_sign_in: BotSignIn operations
:vartype bot_sign_in: botframework.tokenapi.operations.BotSignInOperations
:ivar user_token: UserToken operations
:vartype user_token: botframework.tokenapi.operations.UserTokenOperations
:param credentials: Subscription credentials which uniquely identify
client subscription.
:type credentials: None
:param str base_url: Service URL
"""
def __init__(self, credentials, base_url=None):
self.config = TokenApiClientConfiguration(credentials, base_url)
super(TokenApiClient, self).__init__(self.config.credentials, self.config)
client_models = {
k: v for k, v in models.__dict__.items() if isinstance(v, type)
}
self.api_version = "token"
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.bot_sign_in = BotSignInOperations(
self._client, self.config, self._serialize, self._deserialize
)
self.user_token = UserTokenOperations(
self._client, self.config, self._serialize, self._deserialize
)
|
botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py",
"repo_id": "botbuilder-python",
"token_count": 623
}
| 404 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
from setuptools import setup
NAME = "botframework-connector"
VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.16.0"
REQUIRES = [
"msrest==0.7.*",
# "requests>=2.23.0,<2.26",
"PyJWT>=2.4.0",
"botbuilder-schema==4.16.0",
"msal==1.*",
]
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name=NAME,
version=VERSION,
description="Microsoft Bot Framework Bot Builder SDK for Python.",
author="Microsoft",
url="https://www.github.com/Microsoft/botbuilder-python",
keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"],
install_requires=REQUIRES,
packages=[
"botframework.connector",
"botframework.connector.auth",
"botframework.connector.async_mixin",
"botframework.connector.operations",
"botframework.connector.models",
"botframework.connector.aio",
"botframework.connector.aio.operations_async",
"botframework.connector.skills",
"botframework.connector.teams",
"botframework.connector.teams.operations",
"botframework.connector.token_api",
"botframework.connector.token_api.aio",
"botframework.connector.token_api.aio.operations_async",
"botframework.connector.token_api.models",
"botframework.connector.token_api.operations",
],
include_package_data=True,
long_description=long_description,
long_description_content_type="text/x-rst",
license="MIT",
classifiers=[
"Programming Language :: Python :: 3.7",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Development Status :: 5 - Production/Stable",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
|
botbuilder-python/libraries/botframework-connector/setup.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/setup.py",
"repo_id": "botbuilder-python",
"token_count": 809
}
| 405 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import base64
import asyncio
import pytest
import msrest
from botbuilder.schema import AttachmentData, ErrorResponseException
from botframework.connector import ConnectorClient
from botframework.connector.auth import MicrosoftAppCredentials
from authentication_stub import MicrosoftTokenAuthenticationStub
SERVICE_URL = "https://slack.botframework.com"
CHANNEL_ID = "slack"
BOT_NAME = "botbuilder-pc-bot"
BOT_ID = "B21UTEF8S:T03CWQ0QB"
RECIPIENT_ID = "U19KH8EHJ:T03CWQ0QB"
CONVERSATION_ID = "B21UTEF8S:T03CWQ0QB:D2369CT7C"
async def get_auth_token():
try:
# pylint: disable=import-outside-toplevel
from .app_creds_real import MICROSOFT_APP_ID, MICROSOFT_APP_PASSWORD
# Define a "app_creds_real.py" file with your bot credentials as follows:
# MICROSOFT_APP_ID = '...'
# MICROSOFT_APP_PASSWORD = '...'
return MicrosoftAppCredentials(
MICROSOFT_APP_ID, MICROSOFT_APP_PASSWORD
).get_access_token()
except ImportError:
return "STUB_ACCESS_TOKEN"
def read_base64(path_to_file):
path_to_current_file = os.path.realpath(__file__)
current_directory = os.path.dirname(path_to_current_file)
path_to_file = os.path.join(current_directory, "resources", path_to_file)
with open(path_to_file, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
return encoded_string
LOOP = asyncio.get_event_loop()
AUTH_TOKEN = LOOP.run_until_complete(get_auth_token())
class AttachmentsTest:
def __init__(self): # pylint: disable=useless-super-delegation
super(AttachmentsTest, self).__init__()
@property
def credentials(self):
return MicrosoftTokenAuthenticationStub(AUTH_TOKEN)
def test_attachments_upload_and_get_attachment(self):
attachment = AttachmentData(
type="image/png",
name="Bot.png",
original_base64=read_base64("bot.png"),
thumbnail_base64=read_base64("bot_icon.png"),
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.upload_attachment(
CONVERSATION_ID, attachment
)
attachment_id = response.id
attachment_info = connector.attachments.get_attachment_info(attachment_id)
assert attachment_info is not None
assert attachment_info.name == "Bot.png"
assert attachment_info.type == "image/png"
assert len(attachment_info.views) == 2
def test_attachments_get_info_invalid_attachment_id_fails(self):
with pytest.raises(ErrorResponseException) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.attachments.get_attachment_info("bt13796-GJS4yaxDLI")
assert "Not Found" in str(excinfo.value)
def test_attachments_get_attachment_view(self):
original = read_base64("bot.png")
attachment = AttachmentData(
type="image/png",
name="Bot.png",
original_base64=original,
thumbnail_base64=read_base64("bot_icon.png"),
)
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.upload_attachment(
CONVERSATION_ID, attachment
)
attachment_id = response.id
attachment_stream = connector.attachments.get_attachment(
attachment_id, "original"
)
assert len(original) == sum(len(_) for _ in attachment_stream)
def test_attachments_get_attachment_view_with_invalid_attachment_id_fails(self):
with pytest.raises(msrest.exceptions.HttpOperationError) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
connector.attachments.get_attachment("bt13796-GJS4yaxDLI", "original")
assert "Not Found" in str(excinfo.value)
def test_attachments_get_attachment_view_with_invalid_view_id_fails(self):
original = read_base64("bot.png")
attachment = AttachmentData(
type="image/png",
name="Bot.png",
original_base64=original,
thumbnail_base64=read_base64("bot_icon.png"),
)
with pytest.raises(msrest.exceptions.HttpOperationError) as excinfo:
connector = ConnectorClient(self.credentials, base_url=SERVICE_URL)
response = connector.conversations.upload_attachment(
CONVERSATION_ID, attachment
)
attachment_id = response.id
connector.attachments.get_attachment(attachment_id, "invalid")
assert "not found" in str(excinfo.value)
|
botbuilder-python/libraries/botframework-connector/tests/test_attachments.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-connector/tests/test_attachments.py",
"repo_id": "botbuilder-python",
"token_count": 2020
}
| 406 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import traceback
from asyncio import Queue, ensure_future
from typing import Awaitable, Callable
class SendQueue:
def __init__(self, action: Callable[[object], Awaitable], timeout: int = 30):
self._action = action
self._queue = Queue()
self._timeout_seconds = timeout
# TODO: this have to be abstracted so can remove asyncio dependency
ensure_future(self._process())
def post(self, item: object):
self._post_internal(item)
def _post_internal(self, item: object):
self._queue.put_nowait(item)
async def _process(self):
while True:
try:
while True:
item = await self._queue.get()
try:
await self._action(item)
except Exception:
traceback.print_exc()
finally:
self._queue.task_done()
except Exception:
# AppInsights.TrackException(e)
traceback.print_exc()
return
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/send_queue.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payload_transport/send_queue.py",
"repo_id": "botbuilder-python",
"token_count": 555
}
| 407 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from botframework.streaming.transport import TransportConstants
class Header:
# pylint: disable=invalid-name
def __init__(self, *, type: str = None, id: UUID = None, end: bool = None):
self._internal_payload_length = None
self.type: str = type
self.id: UUID = id
self.end: bool = end
@property
def payload_length(self) -> int:
return self._internal_payload_length
@payload_length.setter
def payload_length(self, value: int):
self._validate_length(
value, TransportConstants.MAX_LENGTH, TransportConstants.MIN_LENGTH
)
self._internal_payload_length = value
def _validate_length(self, value: int, max_val: int, min_val: int):
if value > max_val:
raise ValueError(f"Length must be less or equal than {max_val}")
if value < min_val:
raise ValueError(f"Length must be greater or equal than {min_val}")
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/models/header.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/payloads/models/header.py",
"repo_id": "botbuilder-python",
"token_count": 418
}
| 408 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import json
from http import HTTPStatus
from uuid import UUID, uuid4
from typing import List, Union
from msrest.serialization import Model
from botframework.streaming.payloads import ResponseMessageStream
from botframework.streaming.payloads.models import Serializable
class StreamingResponse:
def __init__(
self, *, status_code: int = 0, streams: List[ResponseMessageStream] = None
):
self.status_code = status_code
self.streams = streams
def add_stream(self, content: object, identifier: UUID = None):
if not content:
raise TypeError("content can't be None")
if self.streams is None:
self.streams: List[ResponseMessageStream] = []
self.streams.append(
ResponseMessageStream(id=identifier or uuid4(), content=content)
)
def set_body(self, body: Union[str, Serializable, Model]):
# TODO: verify if msrest.serialization.Model is necessary
if not body:
return
if isinstance(body, Serializable):
body = body.to_json()
elif isinstance(body, Model):
body = json.dumps(body.as_dict())
self.add_stream(list(body.encode()))
@staticmethod
def create_response(status_code: int, body: object) -> "StreamingResponse":
response = StreamingResponse(status_code=status_code)
if body:
response.add_stream(body)
return response
@staticmethod
def not_found(body: object = None) -> "StreamingResponse":
return StreamingResponse.create_response(HTTPStatus.NOT_FOUND, body)
@staticmethod
def forbidden(body: object = None) -> "StreamingResponse":
return StreamingResponse.create_response(HTTPStatus.FORBIDDEN, body)
# pylint: disable=invalid-name
@staticmethod
def ok(body: object = None) -> "StreamingResponse":
return StreamingResponse.create_response(HTTPStatus.OK, body)
@staticmethod
def internal_server_error(body: object = None) -> "StreamingResponse":
return StreamingResponse.create_response(HTTPStatus.INTERNAL_SERVER_ERROR, body)
|
botbuilder-python/libraries/botframework-streaming/botframework/streaming/streaming_response.py/0
|
{
"file_path": "botbuilder-python/libraries/botframework-streaming/botframework/streaming/streaming_response.py",
"repo_id": "botbuilder-python",
"token_count": 815
}
| 409 |
trigger: none # no ci trigger
pr: none # no pr trigger
pool:
vmImage: 'ubuntu-latest'
steps:
- task: AzurePowerShell@5
displayName: 'Create container'
inputs:
azureSubscription: 'FUSE Temporary (174c5021-8109-4087-a3e2-a1de20420569)'
ScriptType: 'InlineScript'
Inline: |
Set-PSDebug -Trace 1;
Write-Host 'blah';
Write-Host 'az group create --name NightlyPythonFunctionalTestContainerRegistryRG --location eastus'
az group create --name NightlyPythonFunctionalTestContainerRegistryRG --location eastus
Write-Host 'az acr create --resource-group NightlyPythonFunctionalTestContainerRegistryRG --name NightlyPythonFunctionalTestContainerRegistry --sku Basic'
az acr create --resource-group NightlyPythonFunctionalTestContainerRegistryRG --name NightlyPythonFunctionalTestContainerRegistry --sku Basic
az acr login --name NightlyPythonFunctionalTestContainerRegistry
docker pull hello-world
docker tag hello-world nightlypythonfunctionaltestcontainerregistry.azurecr.io/hello-world:v1
docker push nightlypythonfunctionaltestcontainerregistry.azurecr.io/hello-world:v1
docker rmi nightlypythonfunctionaltestcontainerregistry.azurecr.io/hello-world:v1
az acr repository list --name NightlyPythonFunctionalTestContainerRegistry --output table
az acr repository show-tags --name NightlyPythonFunctionalTestContainerRegistry --repository hello-world --output table
azurePowerShellVersion: 'LatestVersion'
- script: echo Hello, world!
displayName: 'Run a one-line script'
- script: |
echo Add other tasks to build, test, and deploy your project.
echo See https://aka.ms/yaml
displayName: 'Run a multi-line script'
|
botbuilder-python/pipelines/experimental-create-azure-container-registry.yml/0
|
{
"file_path": "botbuilder-python/pipelines/experimental-create-azure-container-registry.yml",
"repo_id": "botbuilder-python",
"token_count": 551
}
| 410 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
FROM mcr.microsoft.com/oryx/python:3.10
RUN mkdir /functionaltestbot
EXPOSE 443
# EXPOSE 2222
COPY ./functionaltestbot /functionaltestbot
COPY setup.py /
COPY test.sh /
# RUN ls -ltr
# RUN cat prestart.sh
# RUN cat main.py
ENV FLASK_APP=/functionaltestbot/app.py
ENV LANG=C.UTF-8
ENV LC_ALL=C.UTF-8
ENV PATH ${PATH}:/home/site/wwwroot
WORKDIR /
# Initialize the bot
RUN pip3 install -e .
# ssh
ENV SSH_PASSWD "root:Docker!"
RUN apt-get update \
&& apt-get install -y --no-install-recommends dialog \
&& apt-get update \
&& apt-get install -y --no-install-recommends openssh-server \
&& echo "$SSH_PASSWD" | chpasswd \
&& apt install -y --no-install-recommends vim
COPY sshd_config /etc/ssh/
COPY init.sh /usr/local/bin/
RUN chmod u+x /usr/local/bin/init.sh
# For Debugging, uncomment the following:
# ENTRYPOINT ["python3.6", "-c", "import time ; time.sleep(500000)"]
ENTRYPOINT ["init.sh"]
# For Devops, they don't like entry points. This is now in the devops
# pipeline.
# ENTRYPOINT [ "flask" ]
# CMD [ "run", "--port", "3978", "--host", "0.0.0.0" ]
|
botbuilder-python/tests/functional-tests/functionaltestbot/Dockerfile/0
|
{
"file_path": "botbuilder-python/tests/functional-tests/functionaltestbot/Dockerfile",
"repo_id": "botbuilder-python",
"token_count": 477
}
| 411 |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""
To run the Flask bot app, in a py virtual environment,
```bash
pip install -r requirements.txt
python runserver.py
```
"""
from flask_bot_app import APP
if __name__ == "__main__":
APP.run(host="0.0.0.0")
|
botbuilder-python/tests/functional-tests/functionaltestbot/runserver.py/0
|
{
"file_path": "botbuilder-python/tests/functional-tests/functionaltestbot/runserver.py",
"repo_id": "botbuilder-python",
"token_count": 102
}
| 412 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="acutetonecomb" format="2">
<unicode hex="0341"/>
<outline>
<component base="acutecomb"/>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.ComponentInfo</key>
<array>
<dict>
<key>alignment</key>
<integer>-1</integer>
<key>index</key>
<integer>0</integer>
<key>name</key>
<string>acutecomb</string>
</dict>
</array>
<key>com.schriftgestaltung.Glyphs.originalWidth</key>
<integer>1200</integer>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/acutetonecomb.glif/0
|
{
"file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/acutetonecomb.glif",
"repo_id": "cascadia-code",
"token_count": 310
}
| 413 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="ainThreedots-ar.init" format="2">
<advance width="1200"/>
<guideline x="210" y="624" angle="0"/>
<outline>
<component base="ain-ar.init"/>
<component base="threedotsupabove-ar" xOffset="61" yOffset="373"/>
</outline>
<lib>
<dict>
<key>public.markColor</key>
<string>0.98,0.36,0.67,1</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.init.glif/0
|
{
"file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/ainT_hreedots-ar.init.glif",
"repo_id": "cascadia-code",
"token_count": 185
}
| 414 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="anoteleia" format="2">
<advance width="1200"/>
<unicode hex="0387"/>
<outline>
<component base="period" yOffset="735"/>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.ComponentInfo</key>
<array>
<dict>
<key>alignment</key>
<integer>-1</integer>
<key>index</key>
<integer>0</integer>
<key>name</key>
<string>period</string>
</dict>
</array>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/anoteleia.glif/0
|
{
"file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/anoteleia.glif",
"repo_id": "cascadia-code",
"token_count": 278
}
| 415 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="baht.BRACKET.600" format="2">
<advance width="1200"/>
<outline>
<contour>
<point x="457" y="1281" type="line"/>
<point x="710" y="1281" type="line"/>
<point x="710" y="1740" type="line"/>
<point x="457" y="1740" type="line"/>
</contour>
<contour>
<point x="457" y="-320" type="line"/>
<point x="710" y="-320" type="line"/>
<point x="710" y="131" type="line"/>
<point x="457" y="131" type="line"/>
</contour>
<component base="B"/>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.ComponentInfo</key>
<array>
<dict>
<key>alignment</key>
<integer>-1</integer>
<key>index</key>
<integer>0</integer>
<key>name</key>
<string>B</string>
</dict>
</array>
<key>com.schriftgestaltung.Glyphs._originalLayerName</key>
<string>[600]</string>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/baht.B_R_A_C_K_E_T_.600.glif/0
|
{
"file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/baht.B_R_A_C_K_E_T_.600.glif",
"repo_id": "cascadia-code",
"token_count": 506
}
| 416 |
<?xml version='1.0' encoding='UTF-8'?>
<glyph name="cent" format="2">
<advance width="1200"/>
<unicode hex="00A2"/>
<outline>
<contour>
<point x="534" y="-334" type="line"/>
<point x="792" y="-334" type="line"/>
<point x="792" y="1394" type="line"/>
<point x="534" y="1394" type="line"/>
</contour>
<component base="c"/>
</outline>
<lib>
<dict>
<key>com.schriftgestaltung.Glyphs.ComponentInfo</key>
<array>
<dict>
<key>alignment</key>
<integer>-1</integer>
<key>index</key>
<integer>0</integer>
<key>name</key>
<string>c</string>
</dict>
</array>
</dict>
</lib>
</glyph>
|
cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/cent.glif/0
|
{
"file_path": "cascadia-code/sources/CascadiaCode-Bold.ufo/glyphs/cent.glif",
"repo_id": "cascadia-code",
"token_count": 372
}
| 417 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.