body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
97cec53cebf8a1bdbdbd67b261838d294809cb53f352d17c037f072b924efb26 | def _strip_auth(self, proc_params):
'Remove options from parameters that cause auth to be enabled.'
params = proc_params.copy()
params.pop('auth', None)
params.pop('clusterAuthMode', None)
return params | Remove options from parameters that cause auth to be enabled. | mongo_orchestration/common.py | _strip_auth | DmitryLukyanov/mongo-orchestration | 49 | python | def _strip_auth(self, proc_params):
params = proc_params.copy()
params.pop('auth', None)
params.pop('clusterAuthMode', None)
return params | def _strip_auth(self, proc_params):
params = proc_params.copy()
params.pop('auth', None)
params.pop('clusterAuthMode', None)
return params<|docstring|>Remove options from parameters that cause auth to be enabled.<|endoftext|> |
e10e2a855df7fc730209b6110cf90b89b7b9053f8677121b41e7943860924cfa | def mongodb_auth_uri(self, hosts):
'Get a connection string with all info necessary to authenticate.'
parts = ['mongodb://']
if self.login:
parts.append(self.login)
if self.password:
parts.append((':' + self.password))
parts.append('@')
parts.append((hosts + '/'))
if self.login:
parts.append(('?authSource=' + self.auth_source))
if self.x509_extra_user:
parts.append('&authMechanism=MONGODB-X509')
return ''.join(parts) | Get a connection string with all info necessary to authenticate. | mongo_orchestration/common.py | mongodb_auth_uri | DmitryLukyanov/mongo-orchestration | 49 | python | def mongodb_auth_uri(self, hosts):
parts = ['mongodb://']
if self.login:
parts.append(self.login)
if self.password:
parts.append((':' + self.password))
parts.append('@')
parts.append((hosts + '/'))
if self.login:
parts.append(('?authSource=' + self.auth_source))
if self.x509_extra_user:
parts.append('&authMechanism=MONGODB-X509')
return .join(parts) | def mongodb_auth_uri(self, hosts):
parts = ['mongodb://']
if self.login:
parts.append(self.login)
if self.password:
parts.append((':' + self.password))
parts.append('@')
parts.append((hosts + '/'))
if self.login:
parts.append(('?authSource=' + self.auth_source))
if self.x509_extra_user:
parts.append('&authMechanism=MONGODB-X509')
return .join(parts)<|docstring|>Get a connection string with all info necessary to authenticate.<|endoftext|> |
dea6f2683d465b3a96493186fe14d78efa0dfd5a8ca9b307136133fa16db9448 | def _add_users(self, db, mongo_version):
'Add given user, and extra x509 user if necessary.'
roles = self._user_roles(db.client)
if self.x509_extra_user:
db.add_user(DEFAULT_SUBJECT, roles=roles)
self.kwargs['ssl_certfile'] = DEFAULT_CLIENT_CERT
create_user(db, mongo_version, self.login, self.password, roles) | Add given user, and extra x509 user if necessary. | mongo_orchestration/common.py | _add_users | DmitryLukyanov/mongo-orchestration | 49 | python | def _add_users(self, db, mongo_version):
roles = self._user_roles(db.client)
if self.x509_extra_user:
db.add_user(DEFAULT_SUBJECT, roles=roles)
self.kwargs['ssl_certfile'] = DEFAULT_CLIENT_CERT
create_user(db, mongo_version, self.login, self.password, roles) | def _add_users(self, db, mongo_version):
roles = self._user_roles(db.client)
if self.x509_extra_user:
db.add_user(DEFAULT_SUBJECT, roles=roles)
self.kwargs['ssl_certfile'] = DEFAULT_CLIENT_CERT
create_user(db, mongo_version, self.login, self.password, roles)<|docstring|>Add given user, and extra x509 user if necessary.<|endoftext|> |
f21af03215c8a23894e8c58f9043c82fe66de81eb48df04a7cda88a0eb8d7d66 | def setup_train_args():
'\n 设置训练参数\n '
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', default=False, action='store_true', help='不使用GPU进行训练')
parser.add_argument('--model_config', default='config/config.json', type=str, required=False, help='选择模型参数')
parser.add_argument('--vocab_path', default='vocab/', type=str, required=False, help='选择词库')
parser.add_argument('--train_raw_path', default='data/convai2/train/data.txt', type=str, required=False, help='原始训练语料')
parser.add_argument('--train_tokenized_path', default='data/convai2/train/tokenized.txt', type=str, required=False, help='将原始训练语料tokenize之后的数据的存放位置')
parser.add_argument('--log_path', default='dialogue_model/convai2/training.log', type=str, required=False, help='训练日志存放位置')
parser.add_argument('--raw', default=False, action='store_true', help='是否对原始训练语料做tokenize。若尚未对原始训练语料进行tokenize,则指定该参数')
parser.add_argument('--epochs', default=12, type=int, required=False, help='训练的轮次')
parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
parser.add_argument('--lr', default=0.00015, type=float, required=False, help='学习率')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
parser.add_argument('--dialogue_model_output_path', default='dialogue_model/convai2/', type=str, required=False, help='对话模型输出路径')
parser.add_argument('--pretrained_model', default='model_param/', type=str, required=False, help='预训练的GPT2模型的路径')
parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=1, help='dataloader加载数据时使用的线程数量')
parser.add_argument('--train_mmi', action='store_true', help='若指定该参数,则训练DialoGPT的MMI模型')
parser.add_argument('--train_mmi_tokenized_path', default='data/train_mmi_tokenized.txt', type=str, required=False, help='将原始训练语料的每段对话翻转,然后进行tokenize之后的数据的存放位置,用于训练MMI模型')
parser.add_argument('--mmi_model_output_path', default='mmi_model', type=str, required=False, help='MMI模型保存路径')
return parser.parse_args() | 设置训练参数 | train_convai2.py | setup_train_args | Lambert-hpx/English-DialoGPT | 1 | python | def setup_train_args():
'\n \n '
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', default=False, action='store_true', help='不使用GPU进行训练')
parser.add_argument('--model_config', default='config/config.json', type=str, required=False, help='选择模型参数')
parser.add_argument('--vocab_path', default='vocab/', type=str, required=False, help='选择词库')
parser.add_argument('--train_raw_path', default='data/convai2/train/data.txt', type=str, required=False, help='原始训练语料')
parser.add_argument('--train_tokenized_path', default='data/convai2/train/tokenized.txt', type=str, required=False, help='将原始训练语料tokenize之后的数据的存放位置')
parser.add_argument('--log_path', default='dialogue_model/convai2/training.log', type=str, required=False, help='训练日志存放位置')
parser.add_argument('--raw', default=False, action='store_true', help='是否对原始训练语料做tokenize。若尚未对原始训练语料进行tokenize,则指定该参数')
parser.add_argument('--epochs', default=12, type=int, required=False, help='训练的轮次')
parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
parser.add_argument('--lr', default=0.00015, type=float, required=False, help='学习率')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
parser.add_argument('--dialogue_model_output_path', default='dialogue_model/convai2/', type=str, required=False, help='对话模型输出路径')
parser.add_argument('--pretrained_model', default='model_param/', type=str, required=False, help='预训练的GPT2模型的路径')
parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=1, help='dataloader加载数据时使用的线程数量')
parser.add_argument('--train_mmi', action='store_true', help='若指定该参数,则训练DialoGPT的MMI模型')
parser.add_argument('--train_mmi_tokenized_path', default='data/train_mmi_tokenized.txt', type=str, required=False, help='将原始训练语料的每段对话翻转,然后进行tokenize之后的数据的存放位置,用于训练MMI模型')
parser.add_argument('--mmi_model_output_path', default='mmi_model', type=str, required=False, help='MMI模型保存路径')
return parser.parse_args() | def setup_train_args():
'\n \n '
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0,1,2,3', type=str, required=False, help='设置使用哪些显卡')
parser.add_argument('--no_cuda', default=False, action='store_true', help='不使用GPU进行训练')
parser.add_argument('--model_config', default='config/config.json', type=str, required=False, help='选择模型参数')
parser.add_argument('--vocab_path', default='vocab/', type=str, required=False, help='选择词库')
parser.add_argument('--train_raw_path', default='data/convai2/train/data.txt', type=str, required=False, help='原始训练语料')
parser.add_argument('--train_tokenized_path', default='data/convai2/train/tokenized.txt', type=str, required=False, help='将原始训练语料tokenize之后的数据的存放位置')
parser.add_argument('--log_path', default='dialogue_model/convai2/training.log', type=str, required=False, help='训练日志存放位置')
parser.add_argument('--raw', default=False, action='store_true', help='是否对原始训练语料做tokenize。若尚未对原始训练语料进行tokenize,则指定该参数')
parser.add_argument('--epochs', default=12, type=int, required=False, help='训练的轮次')
parser.add_argument('--batch_size', default=8, type=int, required=False, help='训练batch size')
parser.add_argument('--lr', default=0.00015, type=float, required=False, help='学习率')
parser.add_argument('--warmup_steps', default=2000, type=int, required=False, help='warm up步数')
parser.add_argument('--log_step', default=100, type=int, required=False, help='多少步汇报一次loss')
parser.add_argument('--gradient_accumulation', default=1, type=int, required=False, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, required=False)
parser.add_argument('--dialogue_model_output_path', default='dialogue_model/convai2/', type=str, required=False, help='对话模型输出路径')
parser.add_argument('--pretrained_model', default='model_param/', type=str, required=False, help='预训练的GPT2模型的路径')
parser.add_argument('--writer_dir', default='tensorboard_summary/', type=str, required=False, help='Tensorboard路径')
parser.add_argument('--seed', type=int, default=None, help='设置种子用于生成随机数,以使得训练的结果是确定的')
parser.add_argument('--num_workers', type=int, default=1, help='dataloader加载数据时使用的线程数量')
parser.add_argument('--train_mmi', action='store_true', help='若指定该参数,则训练DialoGPT的MMI模型')
parser.add_argument('--train_mmi_tokenized_path', default='data/train_mmi_tokenized.txt', type=str, required=False, help='将原始训练语料的每段对话翻转,然后进行tokenize之后的数据的存放位置,用于训练MMI模型')
parser.add_argument('--mmi_model_output_path', default='mmi_model', type=str, required=False, help='MMI模型保存路径')
return parser.parse_args()<|docstring|>设置训练参数<|endoftext|> |
9db08a28a3af3e1531631cdf2181faa65f44ec5809bfeac2d3d174c37cd04478 | def set_random_seed(args):
'\n 设置训练的随机种子\n '
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | 设置训练的随机种子 | train_convai2.py | set_random_seed | Lambert-hpx/English-DialoGPT | 1 | python | def set_random_seed(args):
'\n \n '
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False | def set_random_seed(args):
'\n \n '
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
if args.cuda:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False<|docstring|>设置训练的随机种子<|endoftext|> |
f5a178c18f6e09cd867711413d3fbc6fc02c99a2a74aea0119d70e1ba9f16258 | def create_logger(args):
'\n 将日志输出到日志文件和控制台\n '
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger | 将日志输出到日志文件和控制台 | train_convai2.py | create_logger | Lambert-hpx/English-DialoGPT | 1 | python | def create_logger(args):
'\n \n '
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger | def create_logger(args):
'\n \n '
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler(filename=args.log_path)
file_handler.setFormatter(formatter)
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger.addHandler(console)
return logger<|docstring|>将日志输出到日志文件和控制台<|endoftext|> |
8dde618d8223eccf0c2e9c16a553714f39578e2294b5023148c21a789fc86954 | def create_model(args, vocab_size):
'\n\n :param args:\n :param vocab_size:字典大小\n :return:\n '
if args.pretrained_model:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
logger.info('load pretrain model!')
else:
model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)
model = GPT2LMHeadModel(config=model_config)
model.resize_token_embeddings(vocab_size)
logger.info('model config:\n{}'.format(model.config.to_json_string()))
return (model, model.config.to_dict().get('n_ctx')) | :param args:
:param vocab_size:字典大小
:return: | train_convai2.py | create_model | Lambert-hpx/English-DialoGPT | 1 | python | def create_model(args, vocab_size):
'\n\n :param args:\n :param vocab_size:字典大小\n :return:\n '
if args.pretrained_model:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
logger.info('load pretrain model!')
else:
model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)
model = GPT2LMHeadModel(config=model_config)
model.resize_token_embeddings(vocab_size)
logger.info('model config:\n{}'.format(model.config.to_json_string()))
return (model, model.config.to_dict().get('n_ctx')) | def create_model(args, vocab_size):
'\n\n :param args:\n :param vocab_size:字典大小\n :return:\n '
if args.pretrained_model:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model)
logger.info('load pretrain model!')
else:
model_config = transformers.modeling_gpt2.GPT2Config.from_json_file(args.model_config)
model = GPT2LMHeadModel(config=model_config)
model.resize_token_embeddings(vocab_size)
logger.info('model config:\n{}'.format(model.config.to_json_string()))
return (model, model.config.to_dict().get('n_ctx'))<|docstring|>:param args:
:param vocab_size:字典大小
:return:<|endoftext|> |
1c325d2f39273a66dd5521696a466e0e33d36426c2438105fa2a987be570eaaf | def preprocess_raw_data(args, tokenizer, n_ctx):
'\n 对原始语料进行处理,将原始语料转换为用于train的token id,对于每个dialogue,将其处于成如下形式"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"\n :param args:\n :param tokenizer:\n :param n_ctx:GPT2模型的上下文窗口大小,对于超过n_ctx(n_ctx包括了特殊字符)的dialogue进行截断\n :return:\n '
logger.info('tokenizing raw data,raw data path:{}, token output path:{}'.format(args.train_raw_path, args.train_tokenized_path))
with open(args.train_raw_path, 'rb') as f:
data = f.read().decode('utf-8')
train_data = data.split('\n\n')
logger.info('there are {} dialogue in raw dataset'.format(len(train_data)))
with open(args.train_tokenized_path, 'w', encoding='utf-8') as f:
for (dialogue_index, dialogue) in enumerate(tqdm(train_data)):
utterances = dialogue.split('\n')
dialogue_ids = [tokenizer.cls_token_id]
for utterance in utterances:
dialogue_ids.extend([tokenizer.convert_tokens_to_ids(word) for word in utterance.split(' ')])
dialogue_ids.append(tokenizer.sep_token_id)
dialogue_ids = dialogue_ids[:n_ctx]
for dialogue_id in dialogue_ids:
f.write((str(dialogue_id) + ' '))
if (dialogue_index < (len(train_data) - 1)):
f.write('\n')
logger.info('finish preprocessing raw data,the result is stored in {}'.format(args.train_tokenized_path)) | 对原始语料进行处理,将原始语料转换为用于train的token id,对于每个dialogue,将其处于成如下形式"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
:param args:
:param tokenizer:
:param n_ctx:GPT2模型的上下文窗口大小,对于超过n_ctx(n_ctx包括了特殊字符)的dialogue进行截断
:return: | train_convai2.py | preprocess_raw_data | Lambert-hpx/English-DialoGPT | 1 | python | def preprocess_raw_data(args, tokenizer, n_ctx):
'\n 对原始语料进行处理,将原始语料转换为用于train的token id,对于每个dialogue,将其处于成如下形式"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"\n :param args:\n :param tokenizer:\n :param n_ctx:GPT2模型的上下文窗口大小,对于超过n_ctx(n_ctx包括了特殊字符)的dialogue进行截断\n :return:\n '
logger.info('tokenizing raw data,raw data path:{}, token output path:{}'.format(args.train_raw_path, args.train_tokenized_path))
with open(args.train_raw_path, 'rb') as f:
data = f.read().decode('utf-8')
train_data = data.split('\n\n')
logger.info('there are {} dialogue in raw dataset'.format(len(train_data)))
with open(args.train_tokenized_path, 'w', encoding='utf-8') as f:
for (dialogue_index, dialogue) in enumerate(tqdm(train_data)):
utterances = dialogue.split('\n')
dialogue_ids = [tokenizer.cls_token_id]
for utterance in utterances:
dialogue_ids.extend([tokenizer.convert_tokens_to_ids(word) for word in utterance.split(' ')])
dialogue_ids.append(tokenizer.sep_token_id)
dialogue_ids = dialogue_ids[:n_ctx]
for dialogue_id in dialogue_ids:
f.write((str(dialogue_id) + ' '))
if (dialogue_index < (len(train_data) - 1)):
f.write('\n')
logger.info('finish preprocessing raw data,the result is stored in {}'.format(args.train_tokenized_path)) | def preprocess_raw_data(args, tokenizer, n_ctx):
'\n 对原始语料进行处理,将原始语料转换为用于train的token id,对于每个dialogue,将其处于成如下形式"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"\n :param args:\n :param tokenizer:\n :param n_ctx:GPT2模型的上下文窗口大小,对于超过n_ctx(n_ctx包括了特殊字符)的dialogue进行截断\n :return:\n '
logger.info('tokenizing raw data,raw data path:{}, token output path:{}'.format(args.train_raw_path, args.train_tokenized_path))
with open(args.train_raw_path, 'rb') as f:
data = f.read().decode('utf-8')
train_data = data.split('\n\n')
logger.info('there are {} dialogue in raw dataset'.format(len(train_data)))
with open(args.train_tokenized_path, 'w', encoding='utf-8') as f:
for (dialogue_index, dialogue) in enumerate(tqdm(train_data)):
utterances = dialogue.split('\n')
dialogue_ids = [tokenizer.cls_token_id]
for utterance in utterances:
dialogue_ids.extend([tokenizer.convert_tokens_to_ids(word) for word in utterance.split(' ')])
dialogue_ids.append(tokenizer.sep_token_id)
dialogue_ids = dialogue_ids[:n_ctx]
for dialogue_id in dialogue_ids:
f.write((str(dialogue_id) + ' '))
if (dialogue_index < (len(train_data) - 1)):
f.write('\n')
logger.info('finish preprocessing raw data,the result is stored in {}'.format(args.train_tokenized_path))<|docstring|>对原始语料进行处理,将原始语料转换为用于train的token id,对于每个dialogue,将其处于成如下形式"[CLS]utterance1[SEP]utterance2[SEP]utterance3[SEP]"
:param args:
:param tokenizer:
:param n_ctx:GPT2模型的上下文窗口大小,对于超过n_ctx(n_ctx包括了特殊字符)的dialogue进行截断
:return:<|endoftext|> |
3091f894bdc2cfd93fdba3c6c8ece26c01825061c43f48756f21e2172661b20d | def calculate_loss_and_accuracy(outputs, labels, device):
'\n 计算非pad_id的平均loss和准确率\n :param outputs:\n :param labels:\n :param device:\n :return:\n '
logits = outputs[0]
shift_logits = logits[(..., :(- 1), :)].contiguous()
shift_labels = labels[(..., 1:)].contiguous().to(device)
loss_fct = CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
loss = loss_fct(shift_logits.view((- 1), shift_logits.size((- 1))), shift_labels.view((- 1)))
(_, preds) = shift_logits.max(dim=(- 1))
not_ignore = shift_labels.ne(pad_id)
num_targets = not_ignore.long().sum().item()
correct = ((shift_labels == preds) & not_ignore)
correct = correct.float().sum()
accuracy = (correct / num_targets)
loss = (loss / num_targets)
return (loss, accuracy) | 计算非pad_id的平均loss和准确率
:param outputs:
:param labels:
:param device:
:return: | train_convai2.py | calculate_loss_and_accuracy | Lambert-hpx/English-DialoGPT | 1 | python | def calculate_loss_and_accuracy(outputs, labels, device):
'\n 计算非pad_id的平均loss和准确率\n :param outputs:\n :param labels:\n :param device:\n :return:\n '
logits = outputs[0]
shift_logits = logits[(..., :(- 1), :)].contiguous()
shift_labels = labels[(..., 1:)].contiguous().to(device)
loss_fct = CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
loss = loss_fct(shift_logits.view((- 1), shift_logits.size((- 1))), shift_labels.view((- 1)))
(_, preds) = shift_logits.max(dim=(- 1))
not_ignore = shift_labels.ne(pad_id)
num_targets = not_ignore.long().sum().item()
correct = ((shift_labels == preds) & not_ignore)
correct = correct.float().sum()
accuracy = (correct / num_targets)
loss = (loss / num_targets)
return (loss, accuracy) | def calculate_loss_and_accuracy(outputs, labels, device):
'\n 计算非pad_id的平均loss和准确率\n :param outputs:\n :param labels:\n :param device:\n :return:\n '
logits = outputs[0]
shift_logits = logits[(..., :(- 1), :)].contiguous()
shift_labels = labels[(..., 1:)].contiguous().to(device)
loss_fct = CrossEntropyLoss(ignore_index=pad_id, reduction='sum')
loss = loss_fct(shift_logits.view((- 1), shift_logits.size((- 1))), shift_labels.view((- 1)))
(_, preds) = shift_logits.max(dim=(- 1))
not_ignore = shift_labels.ne(pad_id)
num_targets = not_ignore.long().sum().item()
correct = ((shift_labels == preds) & not_ignore)
correct = correct.float().sum()
accuracy = (correct / num_targets)
loss = (loss / num_targets)
return (loss, accuracy)<|docstring|>计算非pad_id的平均loss和准确率
:param outputs:
:param labels:
:param device:
:return:<|endoftext|> |
cffad8138cc390a22fd1bea3155ed782b5ec3a80f1eaa537868ca27a380dcce4 | def collate_fn(batch):
'\n 计算该batch中的所有sample的最长的input,并且将其他input的长度向其对齐\n :param batch:\n :return:\n '
global pad_id
input_ids = []
btc_size = len(batch)
max_input_len = 0
for btc_idx in range(btc_size):
if (max_input_len < len(batch[btc_idx])):
max_input_len = len(batch[btc_idx])
for btc_idx in range(btc_size):
input_len = len(batch[btc_idx])
input_ids.append(batch[btc_idx])
input_ids[btc_idx].extend(([pad_id] * (max_input_len - input_len)))
return torch.tensor(input_ids, dtype=torch.long) | 计算该batch中的所有sample的最长的input,并且将其他input的长度向其对齐
:param batch:
:return: | train_convai2.py | collate_fn | Lambert-hpx/English-DialoGPT | 1 | python | def collate_fn(batch):
'\n 计算该batch中的所有sample的最长的input,并且将其他input的长度向其对齐\n :param batch:\n :return:\n '
global pad_id
input_ids = []
btc_size = len(batch)
max_input_len = 0
for btc_idx in range(btc_size):
if (max_input_len < len(batch[btc_idx])):
max_input_len = len(batch[btc_idx])
for btc_idx in range(btc_size):
input_len = len(batch[btc_idx])
input_ids.append(batch[btc_idx])
input_ids[btc_idx].extend(([pad_id] * (max_input_len - input_len)))
return torch.tensor(input_ids, dtype=torch.long) | def collate_fn(batch):
'\n 计算该batch中的所有sample的最长的input,并且将其他input的长度向其对齐\n :param batch:\n :return:\n '
global pad_id
input_ids = []
btc_size = len(batch)
max_input_len = 0
for btc_idx in range(btc_size):
if (max_input_len < len(batch[btc_idx])):
max_input_len = len(batch[btc_idx])
for btc_idx in range(btc_size):
input_len = len(batch[btc_idx])
input_ids.append(batch[btc_idx])
input_ids[btc_idx].extend(([pad_id] * (max_input_len - input_len)))
return torch.tensor(input_ids, dtype=torch.long)<|docstring|>计算该batch中的所有sample的最长的input,并且将其他input的长度向其对齐
:param batch:
:return:<|endoftext|> |
c19f2cf20e0993c90be4106262874a4e8361d7652c86e0a8bb493a291b445f3f | def __init__(self, memory_size, alpha):
'Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n alpha: float\n exponent determine how much prioritization.\n Prob_i sim priority_i**alpha/sum(priority**alpha)\n '
self.tree = SumTree(memory_size)
self.memory_size = memory_size
self.alpha = alpha
self.count_sample_errors = 0 | Prioritized experience replay buffer initialization.
Parameters
----------
memory_size : int
sample size to be stored
alpha: float
exponent determine how much prioritization.
Prob_i sim priority_i**alpha/sum(priority**alpha) | src/distributed/prioritized_replay_memory.py | __init__ | mbecker12/surface-rl-decoder | 2 | python | def __init__(self, memory_size, alpha):
'Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n alpha: float\n exponent determine how much prioritization.\n Prob_i sim priority_i**alpha/sum(priority**alpha)\n '
self.tree = SumTree(memory_size)
self.memory_size = memory_size
self.alpha = alpha
self.count_sample_errors = 0 | def __init__(self, memory_size, alpha):
'Prioritized experience replay buffer initialization.\n\n Parameters\n ----------\n memory_size : int\n sample size to be stored\n alpha: float\n exponent determine how much prioritization.\n Prob_i sim priority_i**alpha/sum(priority**alpha)\n '
self.tree = SumTree(memory_size)
self.memory_size = memory_size
self.alpha = alpha
self.count_sample_errors = 0<|docstring|>Prioritized experience replay buffer initialization.
Parameters
----------
memory_size : int
sample size to be stored
alpha: float
exponent determine how much prioritization.
Prob_i sim priority_i**alpha/sum(priority**alpha)<|endoftext|> |
85cdde4699e923c69c2a9d6a9270e2e0591a23fad8cd87ab38086371642ed042 | def save(self, data, priority):
"Add new sample.\n\n Parameters\n ----------\n data: object\n new sample\n priority: float\n sample's priority\n "
self.tree.add(data, (priority ** self.alpha)) | Add new sample.
Parameters
----------
data: object
new sample
priority: float
sample's priority | src/distributed/prioritized_replay_memory.py | save | mbecker12/surface-rl-decoder | 2 | python | def save(self, data, priority):
"Add new sample.\n\n Parameters\n ----------\n data: object\n new sample\n priority: float\n sample's priority\n "
self.tree.add(data, (priority ** self.alpha)) | def save(self, data, priority):
"Add new sample.\n\n Parameters\n ----------\n data: object\n new sample\n priority: float\n sample's priority\n "
self.tree.add(data, (priority ** self.alpha))<|docstring|>Add new sample.
Parameters
----------
data: object
new sample
priority: float
sample's priority<|endoftext|> |
13d26981e17c61c296dfd82451edba3ac96b79cf73af3e5a2c2a2cd622992423 | def sample(self, batch_size, beta, tensorboard=None, verbosity=None):
'The method return samples randomly.\n\n Parameters\n ----------\n batch_size: batch_size to be sampled\n beta: float, PER parameter\n tensorboard: (optional)(torch.utils.SummaryWriter)\n tensorboard instance for logging/monitoring\n verbosity: (optional)(int) verbosity level\n\n Returns\n -------\n out:\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n priorities:\n list of priorities\n '
if (self.tree.filled_size() < batch_size):
return (None, None, None, None)
out = []
indices = np.zeros(batch_size, dtype=np.int32)
weights = np.zeros(batch_size, dtype=np.float64)
priorities = np.zeros(batch_size, dtype=np.float64)
i = 0
max_time = 60
start_time = time()
while (i < batch_size):
if ((time() - start_time) > max_time):
raise TimeoutError('Sampling from Prioritized Experience replay exceeded maximum time! Aborting!')
rand = random.random()
try:
(data, priority, index) = self.tree.find(rand)
assert (data is not None)
priorities[i] = priority
_weight = ((((1.0 / self.memory_size) / priority) ** beta) if (priority > 1e-16) else 0.0)
weights[i] = _weight
indices[i] = index
out.append(data)
self.priority_update([index], [0])
except AssertionError as _:
self.count_sample_errors += 1
continue
else:
i += 1
if (tensorboard is not None):
if (verbosity >= 4):
current_time = time()
tensorboard.add_histogram('per/sampled_priorities', np.array(priorities, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_weights', np.array(weights, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_indices', np.array(indices, dtype=np.float32), walltime=int((current_time * 1000)))
self.priority_update(indices, priorities)
weights_max = np.max(weights)
if (weights_max == 0):
weights = np.zeros(batch_size, dtype=np.float64)
else:
weights_max_inv = np.float64((1.0 / weights_max))
weights = (weights * weights_max_inv)
return (out, weights, indices, priorities) | The method return samples randomly.
Parameters
----------
batch_size: batch_size to be sampled
beta: float, PER parameter
tensorboard: (optional)(torch.utils.SummaryWriter)
tensorboard instance for logging/monitoring
verbosity: (optional)(int) verbosity level
Returns
-------
out:
list of samples
weights:
list of weight
indices:
list of sample indices
The indices indicate sample positions in a sum tree.
priorities:
list of priorities | src/distributed/prioritized_replay_memory.py | sample | mbecker12/surface-rl-decoder | 2 | python | def sample(self, batch_size, beta, tensorboard=None, verbosity=None):
'The method return samples randomly.\n\n Parameters\n ----------\n batch_size: batch_size to be sampled\n beta: float, PER parameter\n tensorboard: (optional)(torch.utils.SummaryWriter)\n tensorboard instance for logging/monitoring\n verbosity: (optional)(int) verbosity level\n\n Returns\n -------\n out:\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n priorities:\n list of priorities\n '
if (self.tree.filled_size() < batch_size):
return (None, None, None, None)
out = []
indices = np.zeros(batch_size, dtype=np.int32)
weights = np.zeros(batch_size, dtype=np.float64)
priorities = np.zeros(batch_size, dtype=np.float64)
i = 0
max_time = 60
start_time = time()
while (i < batch_size):
if ((time() - start_time) > max_time):
raise TimeoutError('Sampling from Prioritized Experience replay exceeded maximum time! Aborting!')
rand = random.random()
try:
(data, priority, index) = self.tree.find(rand)
assert (data is not None)
priorities[i] = priority
_weight = ((((1.0 / self.memory_size) / priority) ** beta) if (priority > 1e-16) else 0.0)
weights[i] = _weight
indices[i] = index
out.append(data)
self.priority_update([index], [0])
except AssertionError as _:
self.count_sample_errors += 1
continue
else:
i += 1
if (tensorboard is not None):
if (verbosity >= 4):
current_time = time()
tensorboard.add_histogram('per/sampled_priorities', np.array(priorities, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_weights', np.array(weights, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_indices', np.array(indices, dtype=np.float32), walltime=int((current_time * 1000)))
self.priority_update(indices, priorities)
weights_max = np.max(weights)
if (weights_max == 0):
weights = np.zeros(batch_size, dtype=np.float64)
else:
weights_max_inv = np.float64((1.0 / weights_max))
weights = (weights * weights_max_inv)
return (out, weights, indices, priorities) | def sample(self, batch_size, beta, tensorboard=None, verbosity=None):
'The method return samples randomly.\n\n Parameters\n ----------\n batch_size: batch_size to be sampled\n beta: float, PER parameter\n tensorboard: (optional)(torch.utils.SummaryWriter)\n tensorboard instance for logging/monitoring\n verbosity: (optional)(int) verbosity level\n\n Returns\n -------\n out:\n list of samples\n weights:\n list of weight\n indices:\n list of sample indices\n The indices indicate sample positions in a sum tree.\n priorities:\n list of priorities\n '
if (self.tree.filled_size() < batch_size):
return (None, None, None, None)
out = []
indices = np.zeros(batch_size, dtype=np.int32)
weights = np.zeros(batch_size, dtype=np.float64)
priorities = np.zeros(batch_size, dtype=np.float64)
i = 0
max_time = 60
start_time = time()
while (i < batch_size):
if ((time() - start_time) > max_time):
raise TimeoutError('Sampling from Prioritized Experience replay exceeded maximum time! Aborting!')
rand = random.random()
try:
(data, priority, index) = self.tree.find(rand)
assert (data is not None)
priorities[i] = priority
_weight = ((((1.0 / self.memory_size) / priority) ** beta) if (priority > 1e-16) else 0.0)
weights[i] = _weight
indices[i] = index
out.append(data)
self.priority_update([index], [0])
except AssertionError as _:
self.count_sample_errors += 1
continue
else:
i += 1
if (tensorboard is not None):
if (verbosity >= 4):
current_time = time()
tensorboard.add_histogram('per/sampled_priorities', np.array(priorities, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_weights', np.array(weights, dtype=np.float32), walltime=int((current_time * 1000)))
tensorboard.add_histogram('per/sampled_indices', np.array(indices, dtype=np.float32), walltime=int((current_time * 1000)))
self.priority_update(indices, priorities)
weights_max = np.max(weights)
if (weights_max == 0):
weights = np.zeros(batch_size, dtype=np.float64)
else:
weights_max_inv = np.float64((1.0 / weights_max))
weights = (weights * weights_max_inv)
return (out, weights, indices, priorities)<|docstring|>The method return samples randomly.
Parameters
----------
batch_size: batch_size to be sampled
beta: float, PER parameter
tensorboard: (optional)(torch.utils.SummaryWriter)
tensorboard instance for logging/monitoring
verbosity: (optional)(int) verbosity level
Returns
-------
out:
list of samples
weights:
list of weight
indices:
list of sample indices
The indices indicate sample positions in a sum tree.
priorities:
list of priorities<|endoftext|> |
4d3744eb336f0a682f1eed5d80ffb29efffe6fe34af69ecf2f553e6d66156da6 | def priority_update(self, indices, priorities):
"Update the samples' priority.\n\n Parameters\n ----------\n indices:\n list of sample indices\n "
for (i, prio) in zip(indices, priorities):
self.tree.val_update(i, (prio ** self.alpha)) | Update the samples' priority.
Parameters
----------
indices:
list of sample indices | src/distributed/prioritized_replay_memory.py | priority_update | mbecker12/surface-rl-decoder | 2 | python | def priority_update(self, indices, priorities):
"Update the samples' priority.\n\n Parameters\n ----------\n indices:\n list of sample indices\n "
for (i, prio) in zip(indices, priorities):
self.tree.val_update(i, (prio ** self.alpha)) | def priority_update(self, indices, priorities):
"Update the samples' priority.\n\n Parameters\n ----------\n indices:\n list of sample indices\n "
for (i, prio) in zip(indices, priorities):
self.tree.val_update(i, (prio ** self.alpha))<|docstring|>Update the samples' priority.
Parameters
----------
indices:
list of sample indices<|endoftext|> |
9a18e370efd6b2b3ac87ed52a02ff000cee0f21ebc74d180eb9aa06d493a2976 | def reset_alpha(self, alpha):
'Reset an exponent alpha.\n\n Parameters\n ----------\n alpha: float\n '
(self.alpha, old_alpha) = (alpha, self.alpha)
priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities) | Reset an exponent alpha.
Parameters
----------
alpha: float | src/distributed/prioritized_replay_memory.py | reset_alpha | mbecker12/surface-rl-decoder | 2 | python | def reset_alpha(self, alpha):
'Reset an exponent alpha.\n\n Parameters\n ----------\n alpha: float\n '
(self.alpha, old_alpha) = (alpha, self.alpha)
priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities) | def reset_alpha(self, alpha):
'Reset an exponent alpha.\n\n Parameters\n ----------\n alpha: float\n '
(self.alpha, old_alpha) = (alpha, self.alpha)
priorities = [(self.tree.get_val(i) ** (- old_alpha)) for i in range(self.tree.filled_size())]
self.priority_update(range(self.tree.filled_size()), priorities)<|docstring|>Reset an exponent alpha.
Parameters
----------
alpha: float<|endoftext|> |
53e9946af186a2560c091e8631b3b6a4b84c437e14780c88c7418fab271ce97f | def print_tree(self):
'\n Print a simple representation of the sum tree.\n '
self.tree.print_tree() | Print a simple representation of the sum tree. | src/distributed/prioritized_replay_memory.py | print_tree | mbecker12/surface-rl-decoder | 2 | python | def print_tree(self):
'\n \n '
self.tree.print_tree() | def print_tree(self):
'\n \n '
self.tree.print_tree()<|docstring|>Print a simple representation of the sum tree.<|endoftext|> |
06e3e87734fca157c3e6e0a655f7f95845d46d960372644892629bb226e965b0 | def filled_size(self):
'\n Return the number of elements stored in the sum tree.\n '
return self.tree.filled_size() | Return the number of elements stored in the sum tree. | src/distributed/prioritized_replay_memory.py | filled_size | mbecker12/surface-rl-decoder | 2 | python | def filled_size(self):
'\n \n '
return self.tree.filled_size() | def filled_size(self):
'\n \n '
return self.tree.filled_size()<|docstring|>Return the number of elements stored in the sum tree.<|endoftext|> |
6f3d5f5cef699acc56dd67eda28e4740564427c2fb509b100ccd323d92592ef1 | def build_ping(dest, count=None, source=None, timeout=None, ttl=None, size=None, vrf=None):
"\n Function to build the command to send to the terminal for the switch\n to execute. All args come from the module's unique params.\n "
if (vrf is not None):
cmd = 'ping vrf {0} {1}'.format(vrf, dest)
else:
cmd = 'ping {0}'.format(dest)
if (count is not None):
cmd += ' count {0}'.format(str(count))
if (timeout is not None):
cmd += ' timeout {0}'.format(str(timeout))
if (ttl is not None):
cmd += ' ttl {0}'.format(str(ttl))
if (size is not None):
cmd += ' size {0}'.format(str(size))
if (source is not None):
cmd += ' source {0}'.format(source)
return cmd | Function to build the command to send to the terminal for the switch
to execute. All args come from the module's unique params. | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/icx/icx_ping.py | build_ping | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def build_ping(dest, count=None, source=None, timeout=None, ttl=None, size=None, vrf=None):
"\n Function to build the command to send to the terminal for the switch\n to execute. All args come from the module's unique params.\n "
if (vrf is not None):
cmd = 'ping vrf {0} {1}'.format(vrf, dest)
else:
cmd = 'ping {0}'.format(dest)
if (count is not None):
cmd += ' count {0}'.format(str(count))
if (timeout is not None):
cmd += ' timeout {0}'.format(str(timeout))
if (ttl is not None):
cmd += ' ttl {0}'.format(str(ttl))
if (size is not None):
cmd += ' size {0}'.format(str(size))
if (source is not None):
cmd += ' source {0}'.format(source)
return cmd | def build_ping(dest, count=None, source=None, timeout=None, ttl=None, size=None, vrf=None):
"\n Function to build the command to send to the terminal for the switch\n to execute. All args come from the module's unique params.\n "
if (vrf is not None):
cmd = 'ping vrf {0} {1}'.format(vrf, dest)
else:
cmd = 'ping {0}'.format(dest)
if (count is not None):
cmd += ' count {0}'.format(str(count))
if (timeout is not None):
cmd += ' timeout {0}'.format(str(timeout))
if (ttl is not None):
cmd += ' ttl {0}'.format(str(ttl))
if (size is not None):
cmd += ' size {0}'.format(str(size))
if (source is not None):
cmd += ' source {0}'.format(source)
return cmd<|docstring|>Function to build the command to send to the terminal for the switch
to execute. All args come from the module's unique params.<|endoftext|> |
e983b95284e52379488f9b61f8f0cc86b96abfaf8038b9919cbd7893dbbdc6b3 | def parse_ping(ping_stats):
'\n Function used to parse the statistical information from the ping response.\n Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms."\n Returns the percent of packet loss, received packets, transmitted packets, and RTT dict.\n '
if ping_stats.startswith('Success'):
rate_re = re.compile('^\\w+\\s+\\w+\\s+\\w+\\s+(?P<pct>\\d+)\\s+\\w+\\s+\\((?P<rx>\\d+)/(?P<tx>\\d+)\\)')
rtt_re = re.compile('.*,\\s+\\S+\\s+\\S+=(?P<min>\\d+)/(?P<avg>\\d+)/(?P<max>\\d+)\\s+\\w+\\.+\\s*$|.*\\s*$')
rate = rate_re.match(ping_stats)
rtt = rtt_re.match(ping_stats)
return (rate.group('pct'), rate.group('rx'), rate.group('tx'), rtt.groupdict())
else:
rate_re = re.compile('^Sending+\\s+(?P<tx>\\d+),')
rate = rate_re.match(ping_stats)
rtt = {'avg': 0, 'max': 0, 'min': 0}
return (0, 0, rate.group('tx'), rtt) | Function used to parse the statistical information from the ping response.
Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms."
Returns the percent of packet loss, received packets, transmitted packets, and RTT dict. | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/icx/icx_ping.py | parse_ping | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def parse_ping(ping_stats):
'\n Function used to parse the statistical information from the ping response.\n Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms."\n Returns the percent of packet loss, received packets, transmitted packets, and RTT dict.\n '
if ping_stats.startswith('Success'):
rate_re = re.compile('^\\w+\\s+\\w+\\s+\\w+\\s+(?P<pct>\\d+)\\s+\\w+\\s+\\((?P<rx>\\d+)/(?P<tx>\\d+)\\)')
rtt_re = re.compile('.*,\\s+\\S+\\s+\\S+=(?P<min>\\d+)/(?P<avg>\\d+)/(?P<max>\\d+)\\s+\\w+\\.+\\s*$|.*\\s*$')
rate = rate_re.match(ping_stats)
rtt = rtt_re.match(ping_stats)
return (rate.group('pct'), rate.group('rx'), rate.group('tx'), rtt.groupdict())
else:
rate_re = re.compile('^Sending+\\s+(?P<tx>\\d+),')
rate = rate_re.match(ping_stats)
rtt = {'avg': 0, 'max': 0, 'min': 0}
return (0, 0, rate.group('tx'), rtt) | def parse_ping(ping_stats):
'\n Function used to parse the statistical information from the ping response.\n Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms."\n Returns the percent of packet loss, received packets, transmitted packets, and RTT dict.\n '
if ping_stats.startswith('Success'):
rate_re = re.compile('^\\w+\\s+\\w+\\s+\\w+\\s+(?P<pct>\\d+)\\s+\\w+\\s+\\((?P<rx>\\d+)/(?P<tx>\\d+)\\)')
rtt_re = re.compile('.*,\\s+\\S+\\s+\\S+=(?P<min>\\d+)/(?P<avg>\\d+)/(?P<max>\\d+)\\s+\\w+\\.+\\s*$|.*\\s*$')
rate = rate_re.match(ping_stats)
rtt = rtt_re.match(ping_stats)
return (rate.group('pct'), rate.group('rx'), rate.group('tx'), rtt.groupdict())
else:
rate_re = re.compile('^Sending+\\s+(?P<tx>\\d+),')
rate = rate_re.match(ping_stats)
rtt = {'avg': 0, 'max': 0, 'min': 0}
return (0, 0, rate.group('tx'), rtt)<|docstring|>Function used to parse the statistical information from the ping response.
Example: "Success rate is 100 percent (5/5), round-trip min/avg/max=40/51/55 ms."
Returns the percent of packet loss, received packets, transmitted packets, and RTT dict.<|endoftext|> |
9caae97aff10231e19e5592aec8176d697257bf12a60743635de2c36d395364d | def validate_results(module, loss, results):
'\n This function is used to validate whether the ping results were unexpected per "state" param.\n '
state = module.params['state']
if ((state == 'present') and (loss == 100)):
module.fail_json(msg='Ping failed unexpectedly', **results)
elif ((state == 'absent') and (loss < 100)):
module.fail_json(msg='Ping succeeded unexpectedly', **results) | This function is used to validate whether the ping results were unexpected per "state" param. | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/icx/icx_ping.py | validate_results | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def validate_results(module, loss, results):
'\n \n '
state = module.params['state']
if ((state == 'present') and (loss == 100)):
module.fail_json(msg='Ping failed unexpectedly', **results)
elif ((state == 'absent') and (loss < 100)):
module.fail_json(msg='Ping succeeded unexpectedly', **results) | def validate_results(module, loss, results):
'\n \n '
state = module.params['state']
if ((state == 'present') and (loss == 100)):
module.fail_json(msg='Ping failed unexpectedly', **results)
elif ((state == 'absent') and (loss < 100)):
module.fail_json(msg='Ping succeeded unexpectedly', **results)<|docstring|>This function is used to validate whether the ping results were unexpected per "state" param.<|endoftext|> |
45a8bc3dbb699ac9e01665d48649dd3a072a5534e7a3beb4e9703bdc4bc62cf4 | def main():
' main entry point for module execution\n '
argument_spec = dict(count=dict(type='int'), dest=dict(type='str', required=True), timeout=dict(type='int'), ttl=dict(type='int'), size=dict(type='int'), source=dict(type='str'), state=dict(type='str', choices=['absent', 'present'], default='present'), vrf=dict(type='str'))
module = AnsibleModule(argument_spec=argument_spec)
count = module.params['count']
dest = module.params['dest']
source = module.params['source']
timeout = module.params['timeout']
ttl = module.params['ttl']
size = module.params['size']
vrf = module.params['vrf']
results = {}
warnings = list()
if warnings:
results['warnings'] = warnings
response = ''
try:
validate_parameters(module, timeout, count)
results['commands'] = [build_ping(dest, count, source, timeout, ttl, size, vrf)]
ping_results = run_commands(module, commands=results['commands'])
ping_results_list = ping_results[0].split('\n')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
validate_fail(module, ping_results[0])
stats = ''
statserror = ''
for line in ping_results_list:
if line.startswith('Sending'):
statserror = line
if line.startswith('Success'):
stats = line
elif line.startswith('No reply'):
stats = statserror
(success, rx, tx, rtt) = parse_ping(stats)
loss = abs((100 - int(success)))
results['packet_loss'] = (str(loss) + '%')
results['packets_rx'] = int(rx)
results['packets_tx'] = int(tx)
for (k, v) in rtt.items():
if (rtt[k] is not None):
rtt[k] = int(v)
results['rtt'] = rtt
validate_results(module, loss, results)
module.exit_json(**results) | main entry point for module execution | ansible/venv/lib/python2.7/site-packages/ansible/modules/network/icx/icx_ping.py | main | gvashchenkolineate/gvashchenkolineate_infra_trytravis | 17 | python | def main():
' \n '
argument_spec = dict(count=dict(type='int'), dest=dict(type='str', required=True), timeout=dict(type='int'), ttl=dict(type='int'), size=dict(type='int'), source=dict(type='str'), state=dict(type='str', choices=['absent', 'present'], default='present'), vrf=dict(type='str'))
module = AnsibleModule(argument_spec=argument_spec)
count = module.params['count']
dest = module.params['dest']
source = module.params['source']
timeout = module.params['timeout']
ttl = module.params['ttl']
size = module.params['size']
vrf = module.params['vrf']
results = {}
warnings = list()
if warnings:
results['warnings'] = warnings
response =
try:
validate_parameters(module, timeout, count)
results['commands'] = [build_ping(dest, count, source, timeout, ttl, size, vrf)]
ping_results = run_commands(module, commands=results['commands'])
ping_results_list = ping_results[0].split('\n')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
validate_fail(module, ping_results[0])
stats =
statserror =
for line in ping_results_list:
if line.startswith('Sending'):
statserror = line
if line.startswith('Success'):
stats = line
elif line.startswith('No reply'):
stats = statserror
(success, rx, tx, rtt) = parse_ping(stats)
loss = abs((100 - int(success)))
results['packet_loss'] = (str(loss) + '%')
results['packets_rx'] = int(rx)
results['packets_tx'] = int(tx)
for (k, v) in rtt.items():
if (rtt[k] is not None):
rtt[k] = int(v)
results['rtt'] = rtt
validate_results(module, loss, results)
module.exit_json(**results) | def main():
' \n '
argument_spec = dict(count=dict(type='int'), dest=dict(type='str', required=True), timeout=dict(type='int'), ttl=dict(type='int'), size=dict(type='int'), source=dict(type='str'), state=dict(type='str', choices=['absent', 'present'], default='present'), vrf=dict(type='str'))
module = AnsibleModule(argument_spec=argument_spec)
count = module.params['count']
dest = module.params['dest']
source = module.params['source']
timeout = module.params['timeout']
ttl = module.params['ttl']
size = module.params['size']
vrf = module.params['vrf']
results = {}
warnings = list()
if warnings:
results['warnings'] = warnings
response =
try:
validate_parameters(module, timeout, count)
results['commands'] = [build_ping(dest, count, source, timeout, ttl, size, vrf)]
ping_results = run_commands(module, commands=results['commands'])
ping_results_list = ping_results[0].split('\n')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
validate_fail(module, ping_results[0])
stats =
statserror =
for line in ping_results_list:
if line.startswith('Sending'):
statserror = line
if line.startswith('Success'):
stats = line
elif line.startswith('No reply'):
stats = statserror
(success, rx, tx, rtt) = parse_ping(stats)
loss = abs((100 - int(success)))
results['packet_loss'] = (str(loss) + '%')
results['packets_rx'] = int(rx)
results['packets_tx'] = int(tx)
for (k, v) in rtt.items():
if (rtt[k] is not None):
rtt[k] = int(v)
results['rtt'] = rtt
validate_results(module, loss, results)
module.exit_json(**results)<|docstring|>main entry point for module execution<|endoftext|> |
76821f96631695c7e743a49c5dd1aede5d1d0f5a151806e9e3be5887cdf3ea11 | def findLUSlength(self, strs):
'\n :type strs: List[str]\n :rtype: int\n '
def isSubsequence(a, b):
i = 0
for j in xrange(len(b)):
if (i >= len(a)):
break
if (a[i] == b[j]):
i += 1
return (i == len(a))
strs.sort(key=len, reverse=True)
for i in xrange(len(strs)):
all_of = True
for j in xrange(len(strs)):
if (len(strs[j]) < len(strs[i])):
break
if ((i != j) and isSubsequence(strs[i], strs[j])):
all_of = False
break
if all_of:
return len(strs[i])
return (- 1) | :type strs: List[str]
:rtype: int | Python/longest-uncommon-subsequence-ii.py | findLUSlength | donaldcao/LeetCode-Solutions | 3,269 | python | def findLUSlength(self, strs):
'\n :type strs: List[str]\n :rtype: int\n '
def isSubsequence(a, b):
i = 0
for j in xrange(len(b)):
if (i >= len(a)):
break
if (a[i] == b[j]):
i += 1
return (i == len(a))
strs.sort(key=len, reverse=True)
for i in xrange(len(strs)):
all_of = True
for j in xrange(len(strs)):
if (len(strs[j]) < len(strs[i])):
break
if ((i != j) and isSubsequence(strs[i], strs[j])):
all_of = False
break
if all_of:
return len(strs[i])
return (- 1) | def findLUSlength(self, strs):
'\n :type strs: List[str]\n :rtype: int\n '
def isSubsequence(a, b):
i = 0
for j in xrange(len(b)):
if (i >= len(a)):
break
if (a[i] == b[j]):
i += 1
return (i == len(a))
strs.sort(key=len, reverse=True)
for i in xrange(len(strs)):
all_of = True
for j in xrange(len(strs)):
if (len(strs[j]) < len(strs[i])):
break
if ((i != j) and isSubsequence(strs[i], strs[j])):
all_of = False
break
if all_of:
return len(strs[i])
return (- 1)<|docstring|>:type strs: List[str]
:rtype: int<|endoftext|> |
127fbe0ab35ab894f4da68c85974c93e2ef306d377b0696fc0ccd1ced30e41a2 | def is_valid(self, identifier, lint_context):
' Implicit scope builtin variables are prohibited.\n Because it will make unexpected variable name conflict between builtin\n and implicit global/function local. For example:\n\n " This variable is not global variable but builtin variable.\n let count = 100\n '
scope_plugin = lint_context['plugins']['scope']
scope_visibility = scope_plugin.get_objective_scope_visibility(identifier)
if (scope_visibility is not ScopeVisibility.BUILTIN):
return True
explicity = scope_plugin.get_explicity_of_scope_visibility(identifier)
is_valid = (explicity is not ExplicityOfScopeVisibility.IMPLICIT)
if (not is_valid):
self._make_description(identifier, scope_plugin)
return is_valid | Implicit scope builtin variables are prohibited.
Because it will make unexpected variable name conflict between builtin
and implicit global/function local. For example:
" This variable is not global variable but builtin variable.
let count = 100 | vint/linting/policy/prohibit_implicit_scope_builtin_variable.py | is_valid | mosheavni/vint | 538 | python | def is_valid(self, identifier, lint_context):
' Implicit scope builtin variables are prohibited.\n Because it will make unexpected variable name conflict between builtin\n and implicit global/function local. For example:\n\n " This variable is not global variable but builtin variable.\n let count = 100\n '
scope_plugin = lint_context['plugins']['scope']
scope_visibility = scope_plugin.get_objective_scope_visibility(identifier)
if (scope_visibility is not ScopeVisibility.BUILTIN):
return True
explicity = scope_plugin.get_explicity_of_scope_visibility(identifier)
is_valid = (explicity is not ExplicityOfScopeVisibility.IMPLICIT)
if (not is_valid):
self._make_description(identifier, scope_plugin)
return is_valid | def is_valid(self, identifier, lint_context):
' Implicit scope builtin variables are prohibited.\n Because it will make unexpected variable name conflict between builtin\n and implicit global/function local. For example:\n\n " This variable is not global variable but builtin variable.\n let count = 100\n '
scope_plugin = lint_context['plugins']['scope']
scope_visibility = scope_plugin.get_objective_scope_visibility(identifier)
if (scope_visibility is not ScopeVisibility.BUILTIN):
return True
explicity = scope_plugin.get_explicity_of_scope_visibility(identifier)
is_valid = (explicity is not ExplicityOfScopeVisibility.IMPLICIT)
if (not is_valid):
self._make_description(identifier, scope_plugin)
return is_valid<|docstring|>Implicit scope builtin variables are prohibited.
Because it will make unexpected variable name conflict between builtin
and implicit global/function local. For example:
" This variable is not global variable but builtin variable.
let count = 100<|endoftext|> |
bb45dc8be1a33dbc0db76be9b7d8a89961d92129535a0ba644e4c96a6c653eda | def cmd_output_startswith(cmd, string):
'call the run() method of cmd and check if output startswith string'
with captured_output() as (out, err):
cmd.run()
output = out.getvalue().strip()
return output.startswith(string) | call the run() method of cmd and check if output startswith string | tests/helpers.py | cmd_output_startswith | babab/DisPass | 3 | python | def cmd_output_startswith(cmd, string):
with captured_output() as (out, err):
cmd.run()
output = out.getvalue().strip()
return output.startswith(string) | def cmd_output_startswith(cmd, string):
with captured_output() as (out, err):
cmd.run()
output = out.getvalue().strip()
return output.startswith(string)<|docstring|>call the run() method of cmd and check if output startswith string<|endoftext|> |
36a2850fe6fa500df7b093e1d65c75ecea052d5c33992f49cf8918ef59adc098 | def _game_process_entry_point(propty: GameMLModeExecutorProperty):
'\n The real entry point of the game process\n '
from .loops import GameMLModeExecutor
executor = GameMLModeExecutor(propty)
executor.start() | The real entry point of the game process | MLGame/mlgame/process.py | _game_process_entry_point | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _game_process_entry_point(propty: GameMLModeExecutorProperty):
'\n \n '
from .loops import GameMLModeExecutor
executor = GameMLModeExecutor(propty)
executor.start() | def _game_process_entry_point(propty: GameMLModeExecutorProperty):
'\n \n '
from .loops import GameMLModeExecutor
executor = GameMLModeExecutor(propty)
executor.start()<|docstring|>The real entry point of the game process<|endoftext|> |
be231e5be71e2440746b4fe78171cdd82b13a1036f177d7fddf96d2552368b29 | def _ml_process_entry_point(propty: MLExecutorProperty):
'\n The real entry point of the ml process\n '
from .loops import MLExecutor
executor = MLExecutor(propty)
executor.start() | The real entry point of the ml process | MLGame/mlgame/process.py | _ml_process_entry_point | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _ml_process_entry_point(propty: MLExecutorProperty):
'\n \n '
from .loops import MLExecutor
executor = MLExecutor(propty)
executor.start() | def _ml_process_entry_point(propty: MLExecutorProperty):
'\n \n '
from .loops import MLExecutor
executor = MLExecutor(propty)
executor.start()<|docstring|>The real entry point of the ml process<|endoftext|> |
567e73f0a216c6c374087f7b43ca680f5d8c2b7805f9407db2ec306e261b61db | def __init__(self, game_executor_propty: GameMLModeExecutorProperty, ml_executor_propties: list):
'\n Constructor\n\n @param game_executor_propty The property for the game executor\n @param ml_executor_proties A list of `MLExecutorProperty` for the ml executors\n '
self._game_executor_propty = game_executor_propty
self._ml_executor_propties = ml_executor_propties
self._ml_procs = [] | Constructor
@param game_executor_propty The property for the game executor
@param ml_executor_proties A list of `MLExecutorProperty` for the ml executors | MLGame/mlgame/process.py | __init__ | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def __init__(self, game_executor_propty: GameMLModeExecutorProperty, ml_executor_propties: list):
'\n Constructor\n\n @param game_executor_propty The property for the game executor\n @param ml_executor_proties A list of `MLExecutorProperty` for the ml executors\n '
self._game_executor_propty = game_executor_propty
self._ml_executor_propties = ml_executor_propties
self._ml_procs = [] | def __init__(self, game_executor_propty: GameMLModeExecutorProperty, ml_executor_propties: list):
'\n Constructor\n\n @param game_executor_propty The property for the game executor\n @param ml_executor_proties A list of `MLExecutorProperty` for the ml executors\n '
self._game_executor_propty = game_executor_propty
self._ml_executor_propties = ml_executor_propties
self._ml_procs = []<|docstring|>Constructor
@param game_executor_propty The property for the game executor
@param ml_executor_proties A list of `MLExecutorProperty` for the ml executors<|endoftext|> |
3b4e87b3bb388c9e4fcbadc512b58ec5909c249fa6eda3cb377abc2990d8dd0d | def start(self):
'\n Start the processes\n\n The ml processes are spawned and started first, and then the main process executes\n the game process. After returning from the game process, the ml processes will be\n terminated.\n\n Note that there must be 1 game process and at least 1 ml process set\n before calling this function. Otherwise, the RuntimeError will be raised.\n '
if (self._game_executor_propty is None):
raise RuntimeError('The game process is not set. Cannot start the ProcessManager')
if (len(self._ml_executor_propties) == 0):
raise RuntimeError('No ml process added. Cannot start the ProcessManager')
self._create_pipes()
self._start_ml_processes()
returncode = 0
try:
self._start_game_process()
except ProcessError as e:
print("Error: Exception occurred in '{}' process:".format(e.process_name))
print(e.message)
returncode = (- 1)
self._terminate()
return returncode | Start the processes
The ml processes are spawned and started first, and then the main process executes
the game process. After returning from the game process, the ml processes will be
terminated.
Note that there must be 1 game process and at least 1 ml process set
before calling this function. Otherwise, the RuntimeError will be raised. | MLGame/mlgame/process.py | start | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def start(self):
'\n Start the processes\n\n The ml processes are spawned and started first, and then the main process executes\n the game process. After returning from the game process, the ml processes will be\n terminated.\n\n Note that there must be 1 game process and at least 1 ml process set\n before calling this function. Otherwise, the RuntimeError will be raised.\n '
if (self._game_executor_propty is None):
raise RuntimeError('The game process is not set. Cannot start the ProcessManager')
if (len(self._ml_executor_propties) == 0):
raise RuntimeError('No ml process added. Cannot start the ProcessManager')
self._create_pipes()
self._start_ml_processes()
returncode = 0
try:
self._start_game_process()
except ProcessError as e:
print("Error: Exception occurred in '{}' process:".format(e.process_name))
print(e.message)
returncode = (- 1)
self._terminate()
return returncode | def start(self):
'\n Start the processes\n\n The ml processes are spawned and started first, and then the main process executes\n the game process. After returning from the game process, the ml processes will be\n terminated.\n\n Note that there must be 1 game process and at least 1 ml process set\n before calling this function. Otherwise, the RuntimeError will be raised.\n '
if (self._game_executor_propty is None):
raise RuntimeError('The game process is not set. Cannot start the ProcessManager')
if (len(self._ml_executor_propties) == 0):
raise RuntimeError('No ml process added. Cannot start the ProcessManager')
self._create_pipes()
self._start_ml_processes()
returncode = 0
try:
self._start_game_process()
except ProcessError as e:
print("Error: Exception occurred in '{}' process:".format(e.process_name))
print(e.message)
returncode = (- 1)
self._terminate()
return returncode<|docstring|>Start the processes
The ml processes are spawned and started first, and then the main process executes
the game process. After returning from the game process, the ml processes will be
terminated.
Note that there must be 1 game process and at least 1 ml process set
before calling this function. Otherwise, the RuntimeError will be raised.<|endoftext|> |
920e42bf1f27959b1680063abb18f1b8be8a2b8ae14d2fd2c5167595fb23055f | def _create_pipes(self):
'\n Create communication pipes for processes\n '
for ml_executor_propty in self._ml_executor_propties:
(recv_pipe_for_game, send_pipe_for_ml) = Pipe(False)
(recv_pipe_for_ml, send_pipe_for_game) = Pipe(False)
self._game_executor_propty.comm_manager.add_comm_to_ml(ml_executor_propty.name, recv_pipe_for_game, send_pipe_for_game)
ml_executor_propty.comm_manager.set_comm_to_game(recv_pipe_for_ml, send_pipe_for_ml) | Create communication pipes for processes | MLGame/mlgame/process.py | _create_pipes | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _create_pipes(self):
'\n \n '
for ml_executor_propty in self._ml_executor_propties:
(recv_pipe_for_game, send_pipe_for_ml) = Pipe(False)
(recv_pipe_for_ml, send_pipe_for_game) = Pipe(False)
self._game_executor_propty.comm_manager.add_comm_to_ml(ml_executor_propty.name, recv_pipe_for_game, send_pipe_for_game)
ml_executor_propty.comm_manager.set_comm_to_game(recv_pipe_for_ml, send_pipe_for_ml) | def _create_pipes(self):
'\n \n '
for ml_executor_propty in self._ml_executor_propties:
(recv_pipe_for_game, send_pipe_for_ml) = Pipe(False)
(recv_pipe_for_ml, send_pipe_for_game) = Pipe(False)
self._game_executor_propty.comm_manager.add_comm_to_ml(ml_executor_propty.name, recv_pipe_for_game, send_pipe_for_game)
ml_executor_propty.comm_manager.set_comm_to_game(recv_pipe_for_ml, send_pipe_for_ml)<|docstring|>Create communication pipes for processes<|endoftext|> |
67f999d06bc599082a70b326a09546a6e340101b5d5a7395aca01e7023d7024b | def _start_ml_processes(self):
'\n Spawn and start all ml processes\n '
for propty in self._ml_executor_propties:
process = Process(target=_ml_process_entry_point, name=propty.name, args=(propty,))
process.start()
self._ml_procs.append(process) | Spawn and start all ml processes | MLGame/mlgame/process.py | _start_ml_processes | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _start_ml_processes(self):
'\n \n '
for propty in self._ml_executor_propties:
process = Process(target=_ml_process_entry_point, name=propty.name, args=(propty,))
process.start()
self._ml_procs.append(process) | def _start_ml_processes(self):
'\n \n '
for propty in self._ml_executor_propties:
process = Process(target=_ml_process_entry_point, name=propty.name, args=(propty,))
process.start()
self._ml_procs.append(process)<|docstring|>Spawn and start all ml processes<|endoftext|> |
d3d0e9b0f6cdb53b709a0006dd6abbc46d102a620b0994c96b4c54df918bf1af | def _start_game_process(self):
'\n Start the game process\n '
_game_process_entry_point(self._game_executor_propty) | Start the game process | MLGame/mlgame/process.py | _start_game_process | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _start_game_process(self):
'\n \n '
_game_process_entry_point(self._game_executor_propty) | def _start_game_process(self):
'\n \n '
_game_process_entry_point(self._game_executor_propty)<|docstring|>Start the game process<|endoftext|> |
dbcabe3b582d87cbfb34dea1ea0588eb618f918fbd2df6c31edd0d79fcc012db | def _terminate(self):
'\n Stop all spawned ml processes if it exists\n '
for ml_process in self._ml_procs:
if ml_process.is_alive():
self._game_executor_propty.comm_manager.send_to_ml(None, ml_process.name) | Stop all spawned ml processes if it exists | MLGame/mlgame/process.py | _terminate | Liuian/1092_INTRODUCTION-TO-MACHINE-LEARNING-AND-ITS-APPLICATION-TO-GAMING | 0 | python | def _terminate(self):
'\n \n '
for ml_process in self._ml_procs:
if ml_process.is_alive():
self._game_executor_propty.comm_manager.send_to_ml(None, ml_process.name) | def _terminate(self):
'\n \n '
for ml_process in self._ml_procs:
if ml_process.is_alive():
self._game_executor_propty.comm_manager.send_to_ml(None, ml_process.name)<|docstring|>Stop all spawned ml processes if it exists<|endoftext|> |
7cd45e299002466967b96d2b7978e4d2862843a1f160f0b0967a6180ef387223 | def create(self, vorbis_audio_configuration, **kwargs):
'Create Vorbis Codec Configuration\n\n :param vorbis_audio_configuration: The Vorbis Codec Configuration to be created\n :type vorbis_audio_configuration: VorbisAudioConfiguration, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.post('/encoding/configurations/audio/vorbis', vorbis_audio_configuration, type=VorbisAudioConfiguration, **kwargs) | Create Vorbis Codec Configuration
:param vorbis_audio_configuration: The Vorbis Codec Configuration to be created
:type vorbis_audio_configuration: VorbisAudioConfiguration, required
:return: Vorbis Audio Configuration
:rtype: VorbisAudioConfiguration | bitmovin_api_sdk/encoding/configurations/audio/vorbis/vorbis_api.py | create | jaythecaesarean/bitmovin-api-sdk-python | 11 | python | def create(self, vorbis_audio_configuration, **kwargs):
'Create Vorbis Codec Configuration\n\n :param vorbis_audio_configuration: The Vorbis Codec Configuration to be created\n :type vorbis_audio_configuration: VorbisAudioConfiguration, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.post('/encoding/configurations/audio/vorbis', vorbis_audio_configuration, type=VorbisAudioConfiguration, **kwargs) | def create(self, vorbis_audio_configuration, **kwargs):
'Create Vorbis Codec Configuration\n\n :param vorbis_audio_configuration: The Vorbis Codec Configuration to be created\n :type vorbis_audio_configuration: VorbisAudioConfiguration, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.post('/encoding/configurations/audio/vorbis', vorbis_audio_configuration, type=VorbisAudioConfiguration, **kwargs)<|docstring|>Create Vorbis Codec Configuration
:param vorbis_audio_configuration: The Vorbis Codec Configuration to be created
:type vorbis_audio_configuration: VorbisAudioConfiguration, required
:return: Vorbis Audio Configuration
:rtype: VorbisAudioConfiguration<|endoftext|> |
4a9f09fafae714f98d0cdcd2451daab4fdc8928259f51328200162f974542381 | def delete(self, configuration_id, **kwargs):
'Delete Vorbis Codec Configuration\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Id of the codec configuration\n :rtype: BitmovinResponse\n '
return self.api_client.delete('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=BitmovinResponse, **kwargs) | Delete Vorbis Codec Configuration
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Id of the codec configuration
:rtype: BitmovinResponse | bitmovin_api_sdk/encoding/configurations/audio/vorbis/vorbis_api.py | delete | jaythecaesarean/bitmovin-api-sdk-python | 11 | python | def delete(self, configuration_id, **kwargs):
'Delete Vorbis Codec Configuration\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Id of the codec configuration\n :rtype: BitmovinResponse\n '
return self.api_client.delete('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=BitmovinResponse, **kwargs) | def delete(self, configuration_id, **kwargs):
'Delete Vorbis Codec Configuration\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Id of the codec configuration\n :rtype: BitmovinResponse\n '
return self.api_client.delete('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=BitmovinResponse, **kwargs)<|docstring|>Delete Vorbis Codec Configuration
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Id of the codec configuration
:rtype: BitmovinResponse<|endoftext|> |
9ae9d6f57cea52c7496a7e96fd9286508288bc8a4574c660a21baa76c876a1c4 | def get(self, configuration_id, **kwargs):
'Vorbis Codec Configuration Details\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=VorbisAudioConfiguration, **kwargs) | Vorbis Codec Configuration Details
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Vorbis Audio Configuration
:rtype: VorbisAudioConfiguration | bitmovin_api_sdk/encoding/configurations/audio/vorbis/vorbis_api.py | get | jaythecaesarean/bitmovin-api-sdk-python | 11 | python | def get(self, configuration_id, **kwargs):
'Vorbis Codec Configuration Details\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=VorbisAudioConfiguration, **kwargs) | def get(self, configuration_id, **kwargs):
'Vorbis Codec Configuration Details\n\n :param configuration_id: Id of the codec configuration\n :type configuration_id: string_types, required\n :return: Vorbis Audio Configuration\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis/{configuration_id}', path_params={'configuration_id': configuration_id}, type=VorbisAudioConfiguration, **kwargs)<|docstring|>Vorbis Codec Configuration Details
:param configuration_id: Id of the codec configuration
:type configuration_id: string_types, required
:return: Vorbis Audio Configuration
:rtype: VorbisAudioConfiguration<|endoftext|> |
71a583380d936c30ea261e878d50f31fb7d74c26ec2b00d4dbe54212ad0ba227 | def list(self, query_params=None, **kwargs):
'List Vorbis Configurations\n\n :param query_params: Query parameters\n :type query_params: VorbisAudioConfigurationListQueryParams\n :return: List of Vorbis codec configurations\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis', query_params=query_params, pagination_response=True, type=VorbisAudioConfiguration, **kwargs) | List Vorbis Configurations
:param query_params: Query parameters
:type query_params: VorbisAudioConfigurationListQueryParams
:return: List of Vorbis codec configurations
:rtype: VorbisAudioConfiguration | bitmovin_api_sdk/encoding/configurations/audio/vorbis/vorbis_api.py | list | jaythecaesarean/bitmovin-api-sdk-python | 11 | python | def list(self, query_params=None, **kwargs):
'List Vorbis Configurations\n\n :param query_params: Query parameters\n :type query_params: VorbisAudioConfigurationListQueryParams\n :return: List of Vorbis codec configurations\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis', query_params=query_params, pagination_response=True, type=VorbisAudioConfiguration, **kwargs) | def list(self, query_params=None, **kwargs):
'List Vorbis Configurations\n\n :param query_params: Query parameters\n :type query_params: VorbisAudioConfigurationListQueryParams\n :return: List of Vorbis codec configurations\n :rtype: VorbisAudioConfiguration\n '
return self.api_client.get('/encoding/configurations/audio/vorbis', query_params=query_params, pagination_response=True, type=VorbisAudioConfiguration, **kwargs)<|docstring|>List Vorbis Configurations
:param query_params: Query parameters
:type query_params: VorbisAudioConfigurationListQueryParams
:return: List of Vorbis codec configurations
:rtype: VorbisAudioConfiguration<|endoftext|> |
c686028b74551fa8dc7a724695557a611c55b845ac0202643044e88b709d646f | @property
def has_db(self) -> bool:
'Only consider a DB connection if we have config info.'
rel = self.model.get_relation(DATABASE)
return ((len(rel.units) > 0) if (rel is not None) else False) | Only consider a DB connection if we have config info. | src/charm.py | has_db | paulomach/mysql-router-k8s-operator | 0 | python | @property
def has_db(self) -> bool:
rel = self.model.get_relation(DATABASE)
return ((len(rel.units) > 0) if (rel is not None) else False) | @property
def has_db(self) -> bool:
rel = self.model.get_relation(DATABASE)
return ((len(rel.units) > 0) if (rel is not None) else False)<|docstring|>Only consider a DB connection if we have config info.<|endoftext|> |
38b20b95907cb83de92438034274447cbfee2824090e58f5dba5df575581e50c | @property
def peers(self) -> list:
'Fetch the peer relation.'
return self.model.get_relation(PEER) | Fetch the peer relation. | src/charm.py | peers | paulomach/mysql-router-k8s-operator | 0 | python | @property
def peers(self) -> list:
return self.model.get_relation(PEER) | @property
def peers(self) -> list:
return self.model.get_relation(PEER)<|docstring|>Fetch the peer relation.<|endoftext|> |
26d106b02e03f10f52449a52797e655355938357a90160e22ac54beaa818d6a4 | def set_peer_data(self, key: str, data: Any) -> None:
'Put information into the peer data bucket instead of `StoredState`.'
if self.unit.is_leader():
self.peers.data[self.app][key] = json.dumps(data) | Put information into the peer data bucket instead of `StoredState`. | src/charm.py | set_peer_data | paulomach/mysql-router-k8s-operator | 0 | python | def set_peer_data(self, key: str, data: Any) -> None:
if self.unit.is_leader():
self.peers.data[self.app][key] = json.dumps(data) | def set_peer_data(self, key: str, data: Any) -> None:
if self.unit.is_leader():
self.peers.data[self.app][key] = json.dumps(data)<|docstring|>Put information into the peer data bucket instead of `StoredState`.<|endoftext|> |
81996d7ca2450f587a4c6a56b9f18cf9059ae61056e5985bd7a07b4b1caff91f | def get_peer_data(self, key: str) -> Any:
'Retrieve information from the peer data bucket instead of `StoredState`.'
data = self.peers.data[self.app].get(key, '')
return (json.loads(data) if data else {}) | Retrieve information from the peer data bucket instead of `StoredState`. | src/charm.py | get_peer_data | paulomach/mysql-router-k8s-operator | 0 | python | def get_peer_data(self, key: str) -> Any:
data = self.peers.data[self.app].get(key, )
return (json.loads(data) if data else {}) | def get_peer_data(self, key: str) -> Any:
data = self.peers.data[self.app].get(key, )
return (json.loads(data) if data else {})<|docstring|>Retrieve information from the peer data bucket instead of `StoredState`.<|endoftext|> |
6c453ff3372073300dc68e63dfdfed9147d23c93ba4466c0eac1c75bb1dac512 | def _configure(self) -> None:
'Configure the charm.'
data = self.get_peer_data(DATABASE)
if (not self._validate_config(data['mysql'])):
logger.error('Invalid config')
self.unit.status = WaitingStatus('Invalid relation config')
return
pebble_layer = self._mysqlrouter_layer(port=data['mysql']['port'], host=data['mysql']['host'], user=data['mysql']['user'], password=data['mysql']['password'])
container = self.unit.get_container(self.name)
plan = container.get_plan()
if (plan.services != pebble_layer['services']):
logger.info('Config changed')
container.add_layer(self.name, pebble_layer, combine=True)
if container.get_service(self.name).is_running():
container.stop(self.name)
container.start(self.name)
logging.info('mysqlrouter restarted')
self.unit.status = ActiveStatus() | Configure the charm. | src/charm.py | _configure | paulomach/mysql-router-k8s-operator | 0 | python | def _configure(self) -> None:
data = self.get_peer_data(DATABASE)
if (not self._validate_config(data['mysql'])):
logger.error('Invalid config')
self.unit.status = WaitingStatus('Invalid relation config')
return
pebble_layer = self._mysqlrouter_layer(port=data['mysql']['port'], host=data['mysql']['host'], user=data['mysql']['user'], password=data['mysql']['password'])
container = self.unit.get_container(self.name)
plan = container.get_plan()
if (plan.services != pebble_layer['services']):
logger.info('Config changed')
container.add_layer(self.name, pebble_layer, combine=True)
if container.get_service(self.name).is_running():
container.stop(self.name)
container.start(self.name)
logging.info('mysqlrouter restarted')
self.unit.status = ActiveStatus() | def _configure(self) -> None:
data = self.get_peer_data(DATABASE)
if (not self._validate_config(data['mysql'])):
logger.error('Invalid config')
self.unit.status = WaitingStatus('Invalid relation config')
return
pebble_layer = self._mysqlrouter_layer(port=data['mysql']['port'], host=data['mysql']['host'], user=data['mysql']['user'], password=data['mysql']['password'])
container = self.unit.get_container(self.name)
plan = container.get_plan()
if (plan.services != pebble_layer['services']):
logger.info('Config changed')
container.add_layer(self.name, pebble_layer, combine=True)
if container.get_service(self.name).is_running():
container.stop(self.name)
container.start(self.name)
logging.info('mysqlrouter restarted')
self.unit.status = ActiveStatus()<|docstring|>Configure the charm.<|endoftext|> |
f193715ac458eeb99638c47a992b89187f6af54450617505f64b0e19d7b0c120 | def _on_mysqlrouter_pebble_ready(self, event) -> None:
'Define and start a workload using the Pebble API.'
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure() | Define and start a workload using the Pebble API. | src/charm.py | _on_mysqlrouter_pebble_ready | paulomach/mysql-router-k8s-operator | 0 | python | def _on_mysqlrouter_pebble_ready(self, event) -> None:
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure() | def _on_mysqlrouter_pebble_ready(self, event) -> None:
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure()<|docstring|>Define and start a workload using the Pebble API.<|endoftext|> |
ce96b0ebedfb75beb6a56dcf783d849df71d3a9a513fcaa62aaa48a1384c2b73 | def _mysqlrouter_layer(self, host, port, user, password) -> dict:
'Return a layer configuration for the mysqlrouter service.\n\n Args:\n host (str): The hostname of the MySQL cluster.\n port (int): The port of the MySQL cluster.\n user (str): The username for the MySQL cluster.\n password (str): The password for the MySQL cluster.\n '
return {'summary': 'mysqlrouter layer', 'description': 'pebble config layer for mysqlrouter', 'services': {'mysqlrouter': {'override': 'replace', 'summary': 'mysqlrouter', 'command': './run.sh', 'startup': 'enabled', 'environment': {'MYSQL_PORT': port, 'MYSQL_HOST': host, 'MYSQL_USER': user, 'MYSQL_PASSWORD': password}}}} | Return a layer configuration for the mysqlrouter service.
Args:
host (str): The hostname of the MySQL cluster.
port (int): The port of the MySQL cluster.
user (str): The username for the MySQL cluster.
password (str): The password for the MySQL cluster. | src/charm.py | _mysqlrouter_layer | paulomach/mysql-router-k8s-operator | 0 | python | def _mysqlrouter_layer(self, host, port, user, password) -> dict:
'Return a layer configuration for the mysqlrouter service.\n\n Args:\n host (str): The hostname of the MySQL cluster.\n port (int): The port of the MySQL cluster.\n user (str): The username for the MySQL cluster.\n password (str): The password for the MySQL cluster.\n '
return {'summary': 'mysqlrouter layer', 'description': 'pebble config layer for mysqlrouter', 'services': {'mysqlrouter': {'override': 'replace', 'summary': 'mysqlrouter', 'command': './run.sh', 'startup': 'enabled', 'environment': {'MYSQL_PORT': port, 'MYSQL_HOST': host, 'MYSQL_USER': user, 'MYSQL_PASSWORD': password}}}} | def _mysqlrouter_layer(self, host, port, user, password) -> dict:
'Return a layer configuration for the mysqlrouter service.\n\n Args:\n host (str): The hostname of the MySQL cluster.\n port (int): The port of the MySQL cluster.\n user (str): The username for the MySQL cluster.\n password (str): The password for the MySQL cluster.\n '
return {'summary': 'mysqlrouter layer', 'description': 'pebble config layer for mysqlrouter', 'services': {'mysqlrouter': {'override': 'replace', 'summary': 'mysqlrouter', 'command': './run.sh', 'startup': 'enabled', 'environment': {'MYSQL_PORT': port, 'MYSQL_HOST': host, 'MYSQL_USER': user, 'MYSQL_PASSWORD': password}}}}<|docstring|>Return a layer configuration for the mysqlrouter service.
Args:
host (str): The hostname of the MySQL cluster.
port (int): The port of the MySQL cluster.
user (str): The username for the MySQL cluster.
password (str): The password for the MySQL cluster.<|endoftext|> |
0604f5d5d3460fc446a13566a6c421fe0ad484dbe17fefa216ffb506f96fefbf | def _validate_config(self, configuration: Dict) -> bool:
'Validate the configuration.'
for (k, v) in configuration.items():
return (((k == 'host') and (type(v) == str)) or ((k == 'port') and (type(v) == int)) or ((k == 'user') and (type(v) == str)) or ((k == 'password') and (type(v) == str))) | Validate the configuration. | src/charm.py | _validate_config | paulomach/mysql-router-k8s-operator | 0 | python | def _validate_config(self, configuration: Dict) -> bool:
for (k, v) in configuration.items():
return (((k == 'host') and (type(v) == str)) or ((k == 'port') and (type(v) == int)) or ((k == 'user') and (type(v) == str)) or ((k == 'password') and (type(v) == str))) | def _validate_config(self, configuration: Dict) -> bool:
for (k, v) in configuration.items():
return (((k == 'host') and (type(v) == str)) or ((k == 'port') and (type(v) == int)) or ((k == 'user') and (type(v) == str)) or ((k == 'password') and (type(v) == str)))<|docstring|>Validate the configuration.<|endoftext|> |
38ab892259d5c7e2f7d4aba8e5ebd7a1f99be480843e852b3db4df4393b01054 | def _on_config_changed(self, event) -> None:
'Handle config-changed event.'
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure() | Handle config-changed event. | src/charm.py | _on_config_changed | paulomach/mysql-router-k8s-operator | 0 | python | def _on_config_changed(self, event) -> None:
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure() | def _on_config_changed(self, event) -> None:
if (not self.has_db):
logger.debug('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
self._configure()<|docstring|>Handle config-changed event.<|endoftext|> |
74db25e88db14a9e47e23b23db83dd005046bd36504c936323dae55af7f67b81 | def _on_database_relation_created(self, event) -> None:
'Handle database relation created event.'
logger.info('Database relation created')
if (not self.has_db):
logger.info('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
data = dict(event.relation.data[event.app])
self.set_peer_data(DATABASE, data)
self._configure() | Handle database relation created event. | src/charm.py | _on_database_relation_created | paulomach/mysql-router-k8s-operator | 0 | python | def _on_database_relation_created(self, event) -> None:
logger.info('Database relation created')
if (not self.has_db):
logger.info('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
data = dict(event.relation.data[event.app])
self.set_peer_data(DATABASE, data)
self._configure() | def _on_database_relation_created(self, event) -> None:
logger.info('Database relation created')
if (not self.has_db):
logger.info('No database relation found')
self.unit.status = WaitingStatus('Waiting for database relation')
event.defer()
return
data = dict(event.relation.data[event.app])
self.set_peer_data(DATABASE, data)
self._configure()<|docstring|>Handle database relation created event.<|endoftext|> |
d990476e88f371141452a4a148f00e0991bd2f30dcf6b225ad643fd0b13ee430 | def _on_database_relation_departed(self, event) -> None:
'Handle database relation departed event.'
container = event.workload
container.stop('mysqlrouter')
self.unit.status = WaitingStatus('Waiting for database relation') | Handle database relation departed event. | src/charm.py | _on_database_relation_departed | paulomach/mysql-router-k8s-operator | 0 | python | def _on_database_relation_departed(self, event) -> None:
container = event.workload
container.stop('mysqlrouter')
self.unit.status = WaitingStatus('Waiting for database relation') | def _on_database_relation_departed(self, event) -> None:
container = event.workload
container.stop('mysqlrouter')
self.unit.status = WaitingStatus('Waiting for database relation')<|docstring|>Handle database relation departed event.<|endoftext|> |
657d9ed5565a87aeb67ca7e4c1cdf9cce68cdeaf2cb0222b4549b8fd62cb9d03 | def parseIsoDate(isoString, formatstring=''):
'Turn an ISO 8601 formatted duration string like P1DT45M3S into something readable like "1 day, 45 minutes, 3 seconds'
durations = {'year': 0, 'month': 0, 'week': 0, 'day': 0, 'hour': 0, 'minute': 0, 'second': 0}
regex = 'P(?:(?P<year>\\d+)Y)?(?:(?P<month>\\d+)M)?(?:(?P<week>\\d+)W)?(?:(?P<day>\\d+)D)?T?(?:(?P<hour>\\d+)H)?(?:(?P<minute>\\d+)M)?(?:(?P<second>\\d+)S)?'
result = re.search(regex, isoString)
if (result is None):
logger.warning('No date results found')
else:
for (group, value) in result.groupdict().iteritems():
if (value is not None):
durations[group] = int(float(value))
if (formatstring != ''):
return formatstring.format(**durations)
else:
return durations | Turn an ISO 8601 formatted duration string like P1DT45M3S into something readable like "1 day, 45 minutes, 3 seconds | util/DateTimeUtil.py | parseIsoDate | Cybertinus/DideRobot | 4 | python | def parseIsoDate(isoString, formatstring=):
durations = {'year': 0, 'month': 0, 'week': 0, 'day': 0, 'hour': 0, 'minute': 0, 'second': 0}
regex = 'P(?:(?P<year>\\d+)Y)?(?:(?P<month>\\d+)M)?(?:(?P<week>\\d+)W)?(?:(?P<day>\\d+)D)?T?(?:(?P<hour>\\d+)H)?(?:(?P<minute>\\d+)M)?(?:(?P<second>\\d+)S)?'
result = re.search(regex, isoString)
if (result is None):
logger.warning('No date results found')
else:
for (group, value) in result.groupdict().iteritems():
if (value is not None):
durations[group] = int(float(value))
if (formatstring != ):
return formatstring.format(**durations)
else:
return durations | def parseIsoDate(isoString, formatstring=):
durations = {'year': 0, 'month': 0, 'week': 0, 'day': 0, 'hour': 0, 'minute': 0, 'second': 0}
regex = 'P(?:(?P<year>\\d+)Y)?(?:(?P<month>\\d+)M)?(?:(?P<week>\\d+)W)?(?:(?P<day>\\d+)D)?T?(?:(?P<hour>\\d+)H)?(?:(?P<minute>\\d+)M)?(?:(?P<second>\\d+)S)?'
result = re.search(regex, isoString)
if (result is None):
logger.warning('No date results found')
else:
for (group, value) in result.groupdict().iteritems():
if (value is not None):
durations[group] = int(float(value))
if (formatstring != ):
return formatstring.format(**durations)
else:
return durations<|docstring|>Turn an ISO 8601 formatted duration string like P1DT45M3S into something readable like "1 day, 45 minutes, 3 seconds<|endoftext|> |
e0ac9b22b67d9c19b01efc103ef0c1ba598849e65c67b422694996e06490ada2 | def version():
'\n entry point for --version\n '
print(('version pypodo : ' + pkg_resources.get_distribution('pypodo').version))
print(('location of todo file : ' + todofilefromconfig()))
print(('location of config file : ' + os.path.join(get_user_config_directory_pypodo(), TODO_RC_FILE)))
print(('location of backup folder : ' + todobackupfolderfromconfig())) | entry point for --version | pypodo/version.py | version | thib1984/pytodo | 4 | python | def version():
'\n \n '
print(('version pypodo : ' + pkg_resources.get_distribution('pypodo').version))
print(('location of todo file : ' + todofilefromconfig()))
print(('location of config file : ' + os.path.join(get_user_config_directory_pypodo(), TODO_RC_FILE)))
print(('location of backup folder : ' + todobackupfolderfromconfig())) | def version():
'\n \n '
print(('version pypodo : ' + pkg_resources.get_distribution('pypodo').version))
print(('location of todo file : ' + todofilefromconfig()))
print(('location of config file : ' + os.path.join(get_user_config_directory_pypodo(), TODO_RC_FILE)))
print(('location of backup folder : ' + todobackupfolderfromconfig()))<|docstring|>entry point for --version<|endoftext|> |
2b119d882b68c02fcd753a5446cd8c3c7ad3b4ae4ab6adaa41b2f0ffdd7da2a1 | def __getattribute__(self, item):
"Used to dynamically call the method of a controller in a command\n function. If the specified controller does not exist, just return\n the class attribute.\n\n For example, the line ``ovh.foo.bar()`` in the following command\n calls the ``modules.foo.controllers.Foo.bar`` method :\n\n @foo.command('bar')\n @pass_ovh\n def bar(ovh):\n data = ovh.foo.bar()\n "
if (item in object.__getattribute__(self, '_controllers')):
cls = object.__getattribute__(self, '_controllers')[item]
if (item != 'setup'):
client = self.get_ovh_client()
cls.client = client
return cls
return object.__getattribute__(self, item) | Used to dynamically call the method of a controller in a command
function. If the specified controller does not exist, just return
the class attribute.
For example, the line ``ovh.foo.bar()`` in the following command
calls the ``modules.foo.controllers.Foo.bar`` method :
@foo.command('bar')
@pass_ovh
def bar(ovh):
data = ovh.foo.bar() | ovhcli/context.py | __getattribute__ | akram/ovh-cli | 42 | python | def __getattribute__(self, item):
"Used to dynamically call the method of a controller in a command\n function. If the specified controller does not exist, just return\n the class attribute.\n\n For example, the line ``ovh.foo.bar()`` in the following command\n calls the ``modules.foo.controllers.Foo.bar`` method :\n\n @foo.command('bar')\n @pass_ovh\n def bar(ovh):\n data = ovh.foo.bar()\n "
if (item in object.__getattribute__(self, '_controllers')):
cls = object.__getattribute__(self, '_controllers')[item]
if (item != 'setup'):
client = self.get_ovh_client()
cls.client = client
return cls
return object.__getattribute__(self, item) | def __getattribute__(self, item):
"Used to dynamically call the method of a controller in a command\n function. If the specified controller does not exist, just return\n the class attribute.\n\n For example, the line ``ovh.foo.bar()`` in the following command\n calls the ``modules.foo.controllers.Foo.bar`` method :\n\n @foo.command('bar')\n @pass_ovh\n def bar(ovh):\n data = ovh.foo.bar()\n "
if (item in object.__getattribute__(self, '_controllers')):
cls = object.__getattribute__(self, '_controllers')[item]
if (item != 'setup'):
client = self.get_ovh_client()
cls.client = client
return cls
return object.__getattribute__(self, item)<|docstring|>Used to dynamically call the method of a controller in a command
function. If the specified controller does not exist, just return
the class attribute.
For example, the line ``ovh.foo.bar()`` in the following command
calls the ``modules.foo.controllers.Foo.bar`` method :
@foo.command('bar')
@pass_ovh
def bar(ovh):
data = ovh.foo.bar()<|endoftext|> |
e5c0d3448787fb4e3fc593f8f9d01ffb21aade346768d8256fc506d5e8b125b2 | def get_ovh_client(self):
'Get the OVH client.'
try:
client = ovh.Client()
except InvalidRegion:
self.error('The configuration was not found.')
self.error('Please use `ovh setup init` to create it.')
self.exit()
return client | Get the OVH client. | ovhcli/context.py | get_ovh_client | akram/ovh-cli | 42 | python | def get_ovh_client(self):
try:
client = ovh.Client()
except InvalidRegion:
self.error('The configuration was not found.')
self.error('Please use `ovh setup init` to create it.')
self.exit()
return client | def get_ovh_client(self):
try:
client = ovh.Client()
except InvalidRegion:
self.error('The configuration was not found.')
self.error('Please use `ovh setup init` to create it.')
self.exit()
return client<|docstring|>Get the OVH client.<|endoftext|> |
4a870db888b4597d15c9200885bccf30228f4734078c338c64f95c3bfb16b470 | def load_controllers(self):
"Load the controllers for each module specified in the\n ``MODULE_FOLDER`` constant.\n\n If a module can't be imported for any reason, we do not display it."
modules = [module for module in sorted(os.listdir(MODULES_FOLDER)) if is_module(module)]
for module in modules:
try:
controller = importlib.import_module('ovhcli.modules.{}.controllers'.format(module))
self._controllers[module] = getattr(controller, module.capitalize())
except ImportError:
pass | Load the controllers for each module specified in the
``MODULE_FOLDER`` constant.
If a module can't be imported for any reason, we do not display it. | ovhcli/context.py | load_controllers | akram/ovh-cli | 42 | python | def load_controllers(self):
"Load the controllers for each module specified in the\n ``MODULE_FOLDER`` constant.\n\n If a module can't be imported for any reason, we do not display it."
modules = [module for module in sorted(os.listdir(MODULES_FOLDER)) if is_module(module)]
for module in modules:
try:
controller = importlib.import_module('ovhcli.modules.{}.controllers'.format(module))
self._controllers[module] = getattr(controller, module.capitalize())
except ImportError:
pass | def load_controllers(self):
"Load the controllers for each module specified in the\n ``MODULE_FOLDER`` constant.\n\n If a module can't be imported for any reason, we do not display it."
modules = [module for module in sorted(os.listdir(MODULES_FOLDER)) if is_module(module)]
for module in modules:
try:
controller = importlib.import_module('ovhcli.modules.{}.controllers'.format(module))
self._controllers[module] = getattr(controller, module.capitalize())
except ImportError:
pass<|docstring|>Load the controllers for each module specified in the
``MODULE_FOLDER`` constant.
If a module can't be imported for any reason, we do not display it.<|endoftext|> |
fccfc0fa8b135c8ca28e8bd11c0463150ecc31ad787110ea479bc2669899aa58 | def echo(self, message, prefix='', color='white'):
'Print a message with a colored prefix unless the ``--json``\n parameter is specified.'
try:
json = self.json
except AttributeError:
json = False
if (not json):
if prefix:
prefix = '[{}] '.format(click.style(prefix, fg=color))
click.echo(u'{}{}'.format(prefix, message)) | Print a message with a colored prefix unless the ``--json``
parameter is specified. | ovhcli/context.py | echo | akram/ovh-cli | 42 | python | def echo(self, message, prefix=, color='white'):
'Print a message with a colored prefix unless the ``--json``\n parameter is specified.'
try:
json = self.json
except AttributeError:
json = False
if (not json):
if prefix:
prefix = '[{}] '.format(click.style(prefix, fg=color))
click.echo(u'{}{}'.format(prefix, message)) | def echo(self, message, prefix=, color='white'):
'Print a message with a colored prefix unless the ``--json``\n parameter is specified.'
try:
json = self.json
except AttributeError:
json = False
if (not json):
if prefix:
prefix = '[{}] '.format(click.style(prefix, fg=color))
click.echo(u'{}{}'.format(prefix, message))<|docstring|>Print a message with a colored prefix unless the ``--json``
parameter is specified.<|endoftext|> |
7e4b661233f244c19462093730c92fe42e0c40d1f7158f3424739c1083a22b2a | def debug(self, message):
'Print a debug message if the debug mode is enabled.'
if self.debug_mode:
self.echo(message, 'debug', 'blue') | Print a debug message if the debug mode is enabled. | ovhcli/context.py | debug | akram/ovh-cli | 42 | python | def debug(self, message):
if self.debug_mode:
self.echo(message, 'debug', 'blue') | def debug(self, message):
if self.debug_mode:
self.echo(message, 'debug', 'blue')<|docstring|>Print a debug message if the debug mode is enabled.<|endoftext|> |
93aa2890731ebe866f3bc12f341ef91ab78df32bc46c4635b455f730af959229 | def info(self, message):
'Print an information message.'
self.echo(message, '-', 'cyan') | Print an information message. | ovhcli/context.py | info | akram/ovh-cli | 42 | python | def info(self, message):
self.echo(message, '-', 'cyan') | def info(self, message):
self.echo(message, '-', 'cyan')<|docstring|>Print an information message.<|endoftext|> |
a7059843b3fd009f529f17f8fd7b39e30e3f270aa05eaca73644729d777c79ef | def time_echo(self, message):
'Print an information message with a formatted date.'
self.echo(message, strftime('%H:%M:%S'), 'cyan') | Print an information message with a formatted date. | ovhcli/context.py | time_echo | akram/ovh-cli | 42 | python | def time_echo(self, message):
self.echo(message, strftime('%H:%M:%S'), 'cyan') | def time_echo(self, message):
self.echo(message, strftime('%H:%M:%S'), 'cyan')<|docstring|>Print an information message with a formatted date.<|endoftext|> |
37a91511e866ece26bc376df94d15cbaf379f4820e84299f2b35e1027d8a0f75 | def success(self, message):
'Print a success message.'
self.echo(message, '*', 'green') | Print a success message. | ovhcli/context.py | success | akram/ovh-cli | 42 | python | def success(self, message):
self.echo(message, '*', 'green') | def success(self, message):
self.echo(message, '*', 'green')<|docstring|>Print a success message.<|endoftext|> |
1f310c6eeb442290cf54868ed6dd33e6aedaef15d8a1cdaa764a46907551892e | def warning(self, message):
'Print a warning message.'
self.echo(message, 'warning', 'yellow') | Print a warning message. | ovhcli/context.py | warning | akram/ovh-cli | 42 | python | def warning(self, message):
self.echo(message, 'warning', 'yellow') | def warning(self, message):
self.echo(message, 'warning', 'yellow')<|docstring|>Print a warning message.<|endoftext|> |
a13740e14b08ee26935f20749182c25a3ab2b20286653463cff0fb1df6d6ce84 | def error(self, message):
'Print an error message.'
self.echo(message, 'error', 'red') | Print an error message. | ovhcli/context.py | error | akram/ovh-cli | 42 | python | def error(self, message):
self.echo(message, 'error', 'red') | def error(self, message):
self.echo(message, 'error', 'red')<|docstring|>Print an error message.<|endoftext|> |
1bbfc790c47bf7b9c0dec4084beb54708c10c960645e780585293a84da9ebc55 | def table(self, data, custom_func=None, exclude=[], sort=None):
'\n Print a pretty table unless the ``--json`` parameter is specified.\n\n If no custom function is given, use the ``Output`` class to generate\n the table.'
try:
json = self.json
except AttributeError:
json = False
if json:
click.echo(_json.dumps(data))
return
if custom_func:
self.echo(custom_func(data))
return
table = Output(data, exclude=exclude, sort=sort)
self.echo(table.convert()) | Print a pretty table unless the ``--json`` parameter is specified.
If no custom function is given, use the ``Output`` class to generate
the table. | ovhcli/context.py | table | akram/ovh-cli | 42 | python | def table(self, data, custom_func=None, exclude=[], sort=None):
'\n Print a pretty table unless the ``--json`` parameter is specified.\n\n If no custom function is given, use the ``Output`` class to generate\n the table.'
try:
json = self.json
except AttributeError:
json = False
if json:
click.echo(_json.dumps(data))
return
if custom_func:
self.echo(custom_func(data))
return
table = Output(data, exclude=exclude, sort=sort)
self.echo(table.convert()) | def table(self, data, custom_func=None, exclude=[], sort=None):
'\n Print a pretty table unless the ``--json`` parameter is specified.\n\n If no custom function is given, use the ``Output`` class to generate\n the table.'
try:
json = self.json
except AttributeError:
json = False
if json:
click.echo(_json.dumps(data))
return
if custom_func:
self.echo(custom_func(data))
return
table = Output(data, exclude=exclude, sort=sort)
self.echo(table.convert())<|docstring|>Print a pretty table unless the ``--json`` parameter is specified.
If no custom function is given, use the ``Output`` class to generate
the table.<|endoftext|> |
5c7a80c106e2c9433e9b8e94f25e5970c2111ce70002fa596fb1cb3a12b6cf4f | def display_task(self, task):
'Print a task status.'
name = task['function']
if (task['status'] in ['init', 'todo', 'doing']):
self.success('The task {} has been launched.'.format(name))
elif (task['status'] == 'done'):
self.success('The task {} is done.'.format(name))
elif (task['status'] == 'cancelled'):
self.warning('The task {} has been cancelled.'.format(name))
else:
self.error('The task {} fell in an error state.'.format(name)) | Print a task status. | ovhcli/context.py | display_task | akram/ovh-cli | 42 | python | def display_task(self, task):
name = task['function']
if (task['status'] in ['init', 'todo', 'doing']):
self.success('The task {} has been launched.'.format(name))
elif (task['status'] == 'done'):
self.success('The task {} is done.'.format(name))
elif (task['status'] == 'cancelled'):
self.warning('The task {} has been cancelled.'.format(name))
else:
self.error('The task {} fell in an error state.'.format(name)) | def display_task(self, task):
name = task['function']
if (task['status'] in ['init', 'todo', 'doing']):
self.success('The task {} has been launched.'.format(name))
elif (task['status'] == 'done'):
self.success('The task {} is done.'.format(name))
elif (task['status'] == 'cancelled'):
self.warning('The task {} has been cancelled.'.format(name))
else:
self.error('The task {} fell in an error state.'.format(name))<|docstring|>Print a task status.<|endoftext|> |
a2d724a22264d17b8b8cf97af8ff2b681159be963b2b510f9b2aed6ee6997c4e | def create_presigned_post(bucket_name, object_name, fields=None, conditions=None, expiration=3600):
'Generate a presigned URL S3 POST request to upload a file\n\n :param bucket_name: string\n :param object_name: string\n :param fields: Dictionary of prefilled form fields\n :param conditions: List of conditions to include in the policy\n :param expiration: Time in seconds for the presigned URL to remain valid\n :return: Dictionary with the following keys:\n url: URL to post to\n fields: Dictionary of form fields and values to submit with the POST\n :return: None if error.\n '
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_post(bucket_name, object_name, Fields=fields, Conditions=conditions, ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
return response | Generate a presigned URL S3 POST request to upload a file
:param bucket_name: string
:param object_name: string
:param fields: Dictionary of prefilled form fields
:param conditions: List of conditions to include in the policy
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Dictionary with the following keys:
url: URL to post to
fields: Dictionary of form fields and values to submit with the POST
:return: None if error. | testdocs/s3uploadproc.py | create_presigned_post | liwen611/amazon-textract-serverless-large-scale-document-processing | 0 | python | def create_presigned_post(bucket_name, object_name, fields=None, conditions=None, expiration=3600):
'Generate a presigned URL S3 POST request to upload a file\n\n :param bucket_name: string\n :param object_name: string\n :param fields: Dictionary of prefilled form fields\n :param conditions: List of conditions to include in the policy\n :param expiration: Time in seconds for the presigned URL to remain valid\n :return: Dictionary with the following keys:\n url: URL to post to\n fields: Dictionary of form fields and values to submit with the POST\n :return: None if error.\n '
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_post(bucket_name, object_name, Fields=fields, Conditions=conditions, ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
return response | def create_presigned_post(bucket_name, object_name, fields=None, conditions=None, expiration=3600):
'Generate a presigned URL S3 POST request to upload a file\n\n :param bucket_name: string\n :param object_name: string\n :param fields: Dictionary of prefilled form fields\n :param conditions: List of conditions to include in the policy\n :param expiration: Time in seconds for the presigned URL to remain valid\n :return: Dictionary with the following keys:\n url: URL to post to\n fields: Dictionary of form fields and values to submit with the POST\n :return: None if error.\n '
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_post(bucket_name, object_name, Fields=fields, Conditions=conditions, ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
return response<|docstring|>Generate a presigned URL S3 POST request to upload a file
:param bucket_name: string
:param object_name: string
:param fields: Dictionary of prefilled form fields
:param conditions: List of conditions to include in the policy
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Dictionary with the following keys:
url: URL to post to
fields: Dictionary of form fields and values to submit with the POST
:return: None if error.<|endoftext|> |
32f4b664162593d2e7c68736bab10b3d03aab3d58f5e9ca5abf26fdff0403c69 | @pytest.fixture(scope='module')
def app_config(app_config):
'Fixture for customizing the service config via app config.'
app_config['REQUESTS_PERMISSION_POLICY'] = CustomPermissionPolicy
return app_config | Fixture for customizing the service config via app config. | tests/services/requests/test_requests_service_config.py | app_config | NRodriguezcuellar/invenio-requests | 0 | python | @pytest.fixture(scope='module')
def app_config(app_config):
app_config['REQUESTS_PERMISSION_POLICY'] = CustomPermissionPolicy
return app_config | @pytest.fixture(scope='module')
def app_config(app_config):
app_config['REQUESTS_PERMISSION_POLICY'] = CustomPermissionPolicy
return app_config<|docstring|>Fixture for customizing the service config via app config.<|endoftext|> |
24e163ab31c1e16647b25c081100dc07adee5bd7a048bbd8b3a865c94f933062 | def test_customizations_via_app_config(app):
'Test if the customization mechanism works correctly.'
current_permission_policy_cls = current_requests.requests_service.config.permission_policy_cls
assert (current_permission_policy_cls is CustomPermissionPolicy)
assert hasattr(current_permission_policy_cls, 'can_test') | Test if the customization mechanism works correctly. | tests/services/requests/test_requests_service_config.py | test_customizations_via_app_config | NRodriguezcuellar/invenio-requests | 0 | python | def test_customizations_via_app_config(app):
current_permission_policy_cls = current_requests.requests_service.config.permission_policy_cls
assert (current_permission_policy_cls is CustomPermissionPolicy)
assert hasattr(current_permission_policy_cls, 'can_test') | def test_customizations_via_app_config(app):
current_permission_policy_cls = current_requests.requests_service.config.permission_policy_cls
assert (current_permission_policy_cls is CustomPermissionPolicy)
assert hasattr(current_permission_policy_cls, 'can_test')<|docstring|>Test if the customization mechanism works correctly.<|endoftext|> |
15294472af1f9e96521ae8b96710a9ec22e76bbb30eb19e07b59faf994662492 | def test_customization_mixin(app):
'Test if the customize mixin method does what it is supposed to do.'
custom_config = RequestsServiceConfig.build(app)
assert (custom_config is not RequestsServiceConfig)
assert (custom_config.permission_policy_cls is CustomPermissionPolicy) | Test if the customize mixin method does what it is supposed to do. | tests/services/requests/test_requests_service_config.py | test_customization_mixin | NRodriguezcuellar/invenio-requests | 0 | python | def test_customization_mixin(app):
custom_config = RequestsServiceConfig.build(app)
assert (custom_config is not RequestsServiceConfig)
assert (custom_config.permission_policy_cls is CustomPermissionPolicy) | def test_customization_mixin(app):
custom_config = RequestsServiceConfig.build(app)
assert (custom_config is not RequestsServiceConfig)
assert (custom_config.permission_policy_cls is CustomPermissionPolicy)<|docstring|>Test if the customize mixin method does what it is supposed to do.<|endoftext|> |
21a37822a6b78de4fa246ba2d188018d10894aff18c9c34d8a7524d8a5be22ca | def produto_pre_save(signal, instance, sender, **kwargs):
'Coisas a serem feitas antes de salvar no banco'
instance.slug = slugify(instance.nome)
instance.codigo = upper(instance.codigo) | Coisas a serem feitas antes de salvar no banco | projeto2/core/models.py | produto_pre_save | carlosbecker2077/django-projeto2 | 0 | python | def produto_pre_save(signal, instance, sender, **kwargs):
instance.slug = slugify(instance.nome)
instance.codigo = upper(instance.codigo) | def produto_pre_save(signal, instance, sender, **kwargs):
instance.slug = slugify(instance.nome)
instance.codigo = upper(instance.codigo)<|docstring|>Coisas a serem feitas antes de salvar no banco<|endoftext|> |
70d88ac90d2f3bee328d7005ade205aee8dad43799ce67602c3342e82880cf33 | def __init__(self, delaunay, triangulation):
" Creating a TriFinder for matplotlib.tri.triangulation using the scipy.spatial.Delaunay object\n Compatibility is not checked!\n User must make sure the triangulation is created by the same Delaunay object's *simplices* information, and of course the Delaunay must be of 2-dimensional.\n "
self.delaunay = delaunay
super(DelaunayTriFinder, self).__init__(triangulation)
assert isinstance(delaunay, Delaunay) | Creating a TriFinder for matplotlib.tri.triangulation using the scipy.spatial.Delaunay object
Compatibility is not checked!
User must make sure the triangulation is created by the same Delaunay object's *simplices* information, and of course the Delaunay must be of 2-dimensional. | src/python2/sdp/geometry/support.py | __init__ | LeiShi/Synthetic-Diagnostics-Platform | 5 | python | def __init__(self, delaunay, triangulation):
" Creating a TriFinder for matplotlib.tri.triangulation using the scipy.spatial.Delaunay object\n Compatibility is not checked!\n User must make sure the triangulation is created by the same Delaunay object's *simplices* information, and of course the Delaunay must be of 2-dimensional.\n "
self.delaunay = delaunay
super(DelaunayTriFinder, self).__init__(triangulation)
assert isinstance(delaunay, Delaunay) | def __init__(self, delaunay, triangulation):
" Creating a TriFinder for matplotlib.tri.triangulation using the scipy.spatial.Delaunay object\n Compatibility is not checked!\n User must make sure the triangulation is created by the same Delaunay object's *simplices* information, and of course the Delaunay must be of 2-dimensional.\n "
self.delaunay = delaunay
super(DelaunayTriFinder, self).__init__(triangulation)
assert isinstance(delaunay, Delaunay)<|docstring|>Creating a TriFinder for matplotlib.tri.triangulation using the scipy.spatial.Delaunay object
Compatibility is not checked!
User must make sure the triangulation is created by the same Delaunay object's *simplices* information, and of course the Delaunay must be of 2-dimensional.<|endoftext|> |
275f1e40172728468aacb99921ab0ebf48f6c7165593d0f672c49affe879042b | def __call__(self, x, y):
' find the corresponding simplices (triangles) using Delaunay method: find_simplex(p)\n :param x: x coordinates of specified points\n :type x: numpy array of float\n :param y: y coordinates of specified points\n :type y: numpy array of float\n :return s: indices of triangles within which each point lies.\n :rtype s: numpy array of int\n '
assert (x.shape == y.shape)
axes = range(1, (x.ndim + 1))
axes.append(0)
p = np.array([x, y]).transpose(axes)
return self.delaunay.find_simplex(p) | find the corresponding simplices (triangles) using Delaunay method: find_simplex(p)
:param x: x coordinates of specified points
:type x: numpy array of float
:param y: y coordinates of specified points
:type y: numpy array of float
:return s: indices of triangles within which each point lies.
:rtype s: numpy array of int | src/python2/sdp/geometry/support.py | __call__ | LeiShi/Synthetic-Diagnostics-Platform | 5 | python | def __call__(self, x, y):
' find the corresponding simplices (triangles) using Delaunay method: find_simplex(p)\n :param x: x coordinates of specified points\n :type x: numpy array of float\n :param y: y coordinates of specified points\n :type y: numpy array of float\n :return s: indices of triangles within which each point lies.\n :rtype s: numpy array of int\n '
assert (x.shape == y.shape)
axes = range(1, (x.ndim + 1))
axes.append(0)
p = np.array([x, y]).transpose(axes)
return self.delaunay.find_simplex(p) | def __call__(self, x, y):
' find the corresponding simplices (triangles) using Delaunay method: find_simplex(p)\n :param x: x coordinates of specified points\n :type x: numpy array of float\n :param y: y coordinates of specified points\n :type y: numpy array of float\n :return s: indices of triangles within which each point lies.\n :rtype s: numpy array of int\n '
assert (x.shape == y.shape)
axes = range(1, (x.ndim + 1))
axes.append(0)
p = np.array([x, y]).transpose(axes)
return self.delaunay.find_simplex(p)<|docstring|>find the corresponding simplices (triangles) using Delaunay method: find_simplex(p)
:param x: x coordinates of specified points
:type x: numpy array of float
:param y: y coordinates of specified points
:type y: numpy array of float
:return s: indices of triangles within which each point lies.
:rtype s: numpy array of int<|endoftext|> |
8a67903456f171ef69dd1c8578e36ac9304f6d5abc2a7c765992917ad8e4d7f6 | def __init__(self, duration, labels=None):
'\n Initializes a model with a single hidden layer. Features are\n aggregated over the time specified by the duration and the hidden\n layer size is a hyperparameter set at initialization.\n\n Args:\n duration: Time duration to aggregate features for\n '
self.duration = duration
self.means = None
self.stds = None
self.feature_list = None
self.model = None
self.labels = labels
self.sessions = None | Initializes a model with a single hidden layer. Features are
aggregated over the time specified by the duration and the hidden
layer size is a hyperparameter set at initialization.
Args:
duration: Time duration to aggregate features for | utils/PcaPFeatureExtractor.py | __init__ | alshaboti/PoseidonMLOld | 0 | python | def __init__(self, duration, labels=None):
'\n Initializes a model with a single hidden layer. Features are\n aggregated over the time specified by the duration and the hidden\n layer size is a hyperparameter set at initialization.\n\n Args:\n duration: Time duration to aggregate features for\n '
self.duration = duration
self.means = None
self.stds = None
self.feature_list = None
self.model = None
self.labels = labels
self.sessions = None | def __init__(self, duration, labels=None):
'\n Initializes a model with a single hidden layer. Features are\n aggregated over the time specified by the duration and the hidden\n layer size is a hyperparameter set at initialization.\n\n Args:\n duration: Time duration to aggregate features for\n '
self.duration = duration
self.means = None
self.stds = None
self.feature_list = None
self.model = None
self.labels = labels
self.sessions = None<|docstring|>Initializes a model with a single hidden layer. Features are
aggregated over the time specified by the duration and the hidden
layer size is a hyperparameter set at initialization.
Args:
duration: Time duration to aggregate features for<|endoftext|> |
414dc7defcc6915fedd9a5b18c2a57b6c653f89fc1c64faed0e59d158f516cc3 | def get_x_y(self, data_dir):
'\n Trains a single layer model on the data contained in the specified\n directory. Labels found in the directory are augmented with an\n unknown label.\n\n Args:\n data_dir: Directory containing the training data\n '
print('Reading data')
(X_all, y_all, new_labels) = read_data(data_dir, duration=self.duration, labels=self.labels)
self.labels = new_labels | Trains a single layer model on the data contained in the specified
directory. Labels found in the directory are augmented with an
unknown label.
Args:
data_dir: Directory containing the training data | utils/PcaPFeatureExtractor.py | get_x_y | alshaboti/PoseidonMLOld | 0 | python | def get_x_y(self, data_dir):
'\n Trains a single layer model on the data contained in the specified\n directory. Labels found in the directory are augmented with an\n unknown label.\n\n Args:\n data_dir: Directory containing the training data\n '
print('Reading data')
(X_all, y_all, new_labels) = read_data(data_dir, duration=self.duration, labels=self.labels)
self.labels = new_labels | def get_x_y(self, data_dir):
'\n Trains a single layer model on the data contained in the specified\n directory. Labels found in the directory are augmented with an\n unknown label.\n\n Args:\n data_dir: Directory containing the training data\n '
print('Reading data')
(X_all, y_all, new_labels) = read_data(data_dir, duration=self.duration, labels=self.labels)
self.labels = new_labels<|docstring|>Trains a single layer model on the data contained in the specified
directory. Labels found in the directory are augmented with an
unknown label.
Args:
data_dir: Directory containing the training data<|endoftext|> |
92d07df5ab4bee76a238945fecaed83872358ce8dfadf6588917256e2a18ef7b | def __init__(self, file_path: str):
'Constructor\n\n :param file_path: path to the csv file\n '
self._file_path = file_path
self._file = Path(file_path)
if (not self._file.exists()):
raise FileNotFoundError
self._columns = self._rows = (- 1)
self._load_stats() | Constructor
:param file_path: path to the csv file | DatabaseNormalizer/db_normalizer/csv_handler/reader.py | __init__ | pBouillon/Telecom_PPII | 1 | python | def __init__(self, file_path: str):
'Constructor\n\n :param file_path: path to the csv file\n '
self._file_path = file_path
self._file = Path(file_path)
if (not self._file.exists()):
raise FileNotFoundError
self._columns = self._rows = (- 1)
self._load_stats() | def __init__(self, file_path: str):
'Constructor\n\n :param file_path: path to the csv file\n '
self._file_path = file_path
self._file = Path(file_path)
if (not self._file.exists()):
raise FileNotFoundError
self._columns = self._rows = (- 1)
self._load_stats()<|docstring|>Constructor
:param file_path: path to the csv file<|endoftext|> |
3c6cfc8998aa95b6535633651c6afd171706f5a97f93130e6a16851e1f9259c4 | def _load_stats(self) -> None:
'Load stats of the .csv file for future usage\n '
self._rows = 0
for line in self._file.read_text(Csv.encoding).split('\n'):
if (self._columns == (- 1)):
self._columns = len(line.split(Csv.separator))
self._rows += 1 | Load stats of the .csv file for future usage | DatabaseNormalizer/db_normalizer/csv_handler/reader.py | _load_stats | pBouillon/Telecom_PPII | 1 | python | def _load_stats(self) -> None:
'\n '
self._rows = 0
for line in self._file.read_text(Csv.encoding).split('\n'):
if (self._columns == (- 1)):
self._columns = len(line.split(Csv.separator))
self._rows += 1 | def _load_stats(self) -> None:
'\n '
self._rows = 0
for line in self._file.read_text(Csv.encoding).split('\n'):
if (self._columns == (- 1)):
self._columns = len(line.split(Csv.separator))
self._rows += 1<|docstring|>Load stats of the .csv file for future usage<|endoftext|> |
3ad51da80eedc1da375f3ab275ad0f9e70506aa09857a06b310f68ad46c3cc4e | def read_content(self, skip_header: Optional[bool]=False, encoding: Optional[str]=Csv.encoding) -> Iterator[List[str]]:
'Read .csv content\n\n :param skip_header:\n :param encoding:\n :return: an iterator with each line as a list of its columns\n '
is_header_skipped = False
for line in self._file.read_text(encoding).split('\n'):
if (skip_header and (not is_header_skipped)):
is_header_skipped = True
continue
if (not line):
continue
values = [field for (field, _) in re.findall(Parsing.parse_regex, line)][:(- 1)]
(yield list(map((lambda field: (field[1:(- 1)] if (field.startswith(Csv.delimiter) and field.endswith(Csv.delimiter)) else field)), values))) | Read .csv content
:param skip_header:
:param encoding:
:return: an iterator with each line as a list of its columns | DatabaseNormalizer/db_normalizer/csv_handler/reader.py | read_content | pBouillon/Telecom_PPII | 1 | python | def read_content(self, skip_header: Optional[bool]=False, encoding: Optional[str]=Csv.encoding) -> Iterator[List[str]]:
'Read .csv content\n\n :param skip_header:\n :param encoding:\n :return: an iterator with each line as a list of its columns\n '
is_header_skipped = False
for line in self._file.read_text(encoding).split('\n'):
if (skip_header and (not is_header_skipped)):
is_header_skipped = True
continue
if (not line):
continue
values = [field for (field, _) in re.findall(Parsing.parse_regex, line)][:(- 1)]
(yield list(map((lambda field: (field[1:(- 1)] if (field.startswith(Csv.delimiter) and field.endswith(Csv.delimiter)) else field)), values))) | def read_content(self, skip_header: Optional[bool]=False, encoding: Optional[str]=Csv.encoding) -> Iterator[List[str]]:
'Read .csv content\n\n :param skip_header:\n :param encoding:\n :return: an iterator with each line as a list of its columns\n '
is_header_skipped = False
for line in self._file.read_text(encoding).split('\n'):
if (skip_header and (not is_header_skipped)):
is_header_skipped = True
continue
if (not line):
continue
values = [field for (field, _) in re.findall(Parsing.parse_regex, line)][:(- 1)]
(yield list(map((lambda field: (field[1:(- 1)] if (field.startswith(Csv.delimiter) and field.endswith(Csv.delimiter)) else field)), values)))<|docstring|>Read .csv content
:param skip_header:
:param encoding:
:return: an iterator with each line as a list of its columns<|endoftext|> |
fdfbb030c1b8951183305862f67fdedde117fa02970f44d2435961154328e84e | @property
def columns(self) -> int:
'Getter for `_columns`\n :return: the number of columns in the file\n '
return self._columns | Getter for `_columns`
:return: the number of columns in the file | DatabaseNormalizer/db_normalizer/csv_handler/reader.py | columns | pBouillon/Telecom_PPII | 1 | python | @property
def columns(self) -> int:
'Getter for `_columns`\n :return: the number of columns in the file\n '
return self._columns | @property
def columns(self) -> int:
'Getter for `_columns`\n :return: the number of columns in the file\n '
return self._columns<|docstring|>Getter for `_columns`
:return: the number of columns in the file<|endoftext|> |
987e12430d43f75a6897210d1ffc48f563506c301080882665fa0bf35647b36f | @property
def rows(self) -> int:
'Getter for `_rows`\n :return: the number of rows in the file\n '
return self._rows | Getter for `_rows`
:return: the number of rows in the file | DatabaseNormalizer/db_normalizer/csv_handler/reader.py | rows | pBouillon/Telecom_PPII | 1 | python | @property
def rows(self) -> int:
'Getter for `_rows`\n :return: the number of rows in the file\n '
return self._rows | @property
def rows(self) -> int:
'Getter for `_rows`\n :return: the number of rows in the file\n '
return self._rows<|docstring|>Getter for `_rows`
:return: the number of rows in the file<|endoftext|> |
5b9822465bed72863a4e2bb9711fb1ffcb5682ce9b34327093752eb1a3d6e523 | def show(self):
'\n\n :return: string representing object\n '
return ('My Evernote %s' % self.title) | :return: string representing object | th_evernote/models.py | show | luisriverag/django-th | 1,069 | python | def show(self):
'\n\n \n '
return ('My Evernote %s' % self.title) | def show(self):
'\n\n \n '
return ('My Evernote %s' % self.title)<|docstring|>:return: string representing object<|endoftext|> |
8813df494a9c807ad6a3e3f6b80d3ba57e333705f2c26f607827d8ce314cea7d | def __init__(self):
'Initializes the Hparams instance.\n '
self.model_hparams = ModelHparams()
self.training_hparams = TrainingHparams()
self.inference_hparams = InferenceHparams() | Initializes the Hparams instance. | seq2seq-chatbot/hparams.py | __init__ | JEMurcia/Seq2Seq_CornellMovie | 104 | python | def __init__(self):
'\n '
self.model_hparams = ModelHparams()
self.training_hparams = TrainingHparams()
self.inference_hparams = InferenceHparams() | def __init__(self):
'\n '
self.model_hparams = ModelHparams()
self.training_hparams = TrainingHparams()
self.inference_hparams = InferenceHparams()<|docstring|>Initializes the Hparams instance.<|endoftext|> |
45f70828756b70be5d4404bd3e1503bf86b827a183ee823420f72187dfc9a43e | @staticmethod
def load(filepath):
'Loads the hyperparameters from a JSON file.\n\n Args:\n filepath: path of the JSON file.\n '
with open(filepath, 'r') as file:
json = file.read()
hparams = jsonpickle.decode(json)
hparams.training_hparams.input_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.input_vocab_import_mode]
hparams.training_hparams.output_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.output_vocab_import_mode]
return hparams | Loads the hyperparameters from a JSON file.
Args:
filepath: path of the JSON file. | seq2seq-chatbot/hparams.py | load | JEMurcia/Seq2Seq_CornellMovie | 104 | python | @staticmethod
def load(filepath):
'Loads the hyperparameters from a JSON file.\n\n Args:\n filepath: path of the JSON file.\n '
with open(filepath, 'r') as file:
json = file.read()
hparams = jsonpickle.decode(json)
hparams.training_hparams.input_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.input_vocab_import_mode]
hparams.training_hparams.output_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.output_vocab_import_mode]
return hparams | @staticmethod
def load(filepath):
'Loads the hyperparameters from a JSON file.\n\n Args:\n filepath: path of the JSON file.\n '
with open(filepath, 'r') as file:
json = file.read()
hparams = jsonpickle.decode(json)
hparams.training_hparams.input_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.input_vocab_import_mode]
hparams.training_hparams.output_vocab_import_mode = VocabularyImportMode[hparams.training_hparams.output_vocab_import_mode]
return hparams<|docstring|>Loads the hyperparameters from a JSON file.
Args:
filepath: path of the JSON file.<|endoftext|> |
19e8f48887c0a135a36a13941b275f0751407fa7a15a2bfabe102156fb24315c | def __init__(self):
'Initializes the ModelHparams instance.\n '
self.rnn_cell_type = 'lstm'
self.rnn_size = 256
self.use_bidirectional_encoder = True
self.encoder_num_layers = 2
self.decoder_num_layers = 2
self.encoder_embedding_size = 256
self.decoder_embedding_size = 256
self.encoder_embedding_trainable = True
self.decoder_embedding_trainable = True
self.share_embedding = True
self.attention_type = 'normed_bahdanau'
self.beam_width = 10
self.enable_sampling = False
self.optimizer = 'adam'
self.max_gradient_norm = 5.0
self.gpu_dynamic_memory_growth = True | Initializes the ModelHparams instance. | seq2seq-chatbot/hparams.py | __init__ | JEMurcia/Seq2Seq_CornellMovie | 104 | python | def __init__(self):
'\n '
self.rnn_cell_type = 'lstm'
self.rnn_size = 256
self.use_bidirectional_encoder = True
self.encoder_num_layers = 2
self.decoder_num_layers = 2
self.encoder_embedding_size = 256
self.decoder_embedding_size = 256
self.encoder_embedding_trainable = True
self.decoder_embedding_trainable = True
self.share_embedding = True
self.attention_type = 'normed_bahdanau'
self.beam_width = 10
self.enable_sampling = False
self.optimizer = 'adam'
self.max_gradient_norm = 5.0
self.gpu_dynamic_memory_growth = True | def __init__(self):
'\n '
self.rnn_cell_type = 'lstm'
self.rnn_size = 256
self.use_bidirectional_encoder = True
self.encoder_num_layers = 2
self.decoder_num_layers = 2
self.encoder_embedding_size = 256
self.decoder_embedding_size = 256
self.encoder_embedding_trainable = True
self.decoder_embedding_trainable = True
self.share_embedding = True
self.attention_type = 'normed_bahdanau'
self.beam_width = 10
self.enable_sampling = False
self.optimizer = 'adam'
self.max_gradient_norm = 5.0
self.gpu_dynamic_memory_growth = True<|docstring|>Initializes the ModelHparams instance.<|endoftext|> |
4689a0ae470d179f0946d1adc9bd6f79824bcc5844ba147793eaa0ea59efe230 | def __init__(self):
'Initializes the TrainingHparams instance.\n '
self.min_question_words = 1
self.max_question_answer_words = 30
self.max_conversations = (- 1)
self.conv_history_length = 6
self.normalize_words = True
self.input_vocab_threshold = 2
self.output_vocab_threshold = 2
self.input_vocab_import_normalized = True
self.output_vocab_import_normalized = True
self.input_vocab_import_mode = VocabularyImportMode.External
self.output_vocab_import_mode = VocabularyImportMode.Dataset
self.validation_set_percent = 0
self.random_train_val_split = True
self.validation_metric = 'loss'
self.epochs = 500
self.early_stopping_epochs = 500
self.batch_size = 128
self.learning_rate = 2.0
self.learning_rate_decay = 0.99
self.min_learning_rate = 0.1
self.dropout = 0.2
self.checkpoint_on_training = True
self.checkpoint_on_validation = True
self.log_summary = True
self.log_cleaned_dataset = True
self.log_training_data = True
self.stats_after_n_batches = 100
self.backup_on_training_loss = [] | Initializes the TrainingHparams instance. | seq2seq-chatbot/hparams.py | __init__ | JEMurcia/Seq2Seq_CornellMovie | 104 | python | def __init__(self):
'\n '
self.min_question_words = 1
self.max_question_answer_words = 30
self.max_conversations = (- 1)
self.conv_history_length = 6
self.normalize_words = True
self.input_vocab_threshold = 2
self.output_vocab_threshold = 2
self.input_vocab_import_normalized = True
self.output_vocab_import_normalized = True
self.input_vocab_import_mode = VocabularyImportMode.External
self.output_vocab_import_mode = VocabularyImportMode.Dataset
self.validation_set_percent = 0
self.random_train_val_split = True
self.validation_metric = 'loss'
self.epochs = 500
self.early_stopping_epochs = 500
self.batch_size = 128
self.learning_rate = 2.0
self.learning_rate_decay = 0.99
self.min_learning_rate = 0.1
self.dropout = 0.2
self.checkpoint_on_training = True
self.checkpoint_on_validation = True
self.log_summary = True
self.log_cleaned_dataset = True
self.log_training_data = True
self.stats_after_n_batches = 100
self.backup_on_training_loss = [] | def __init__(self):
'\n '
self.min_question_words = 1
self.max_question_answer_words = 30
self.max_conversations = (- 1)
self.conv_history_length = 6
self.normalize_words = True
self.input_vocab_threshold = 2
self.output_vocab_threshold = 2
self.input_vocab_import_normalized = True
self.output_vocab_import_normalized = True
self.input_vocab_import_mode = VocabularyImportMode.External
self.output_vocab_import_mode = VocabularyImportMode.Dataset
self.validation_set_percent = 0
self.random_train_val_split = True
self.validation_metric = 'loss'
self.epochs = 500
self.early_stopping_epochs = 500
self.batch_size = 128
self.learning_rate = 2.0
self.learning_rate_decay = 0.99
self.min_learning_rate = 0.1
self.dropout = 0.2
self.checkpoint_on_training = True
self.checkpoint_on_validation = True
self.log_summary = True
self.log_cleaned_dataset = True
self.log_training_data = True
self.stats_after_n_batches = 100
self.backup_on_training_loss = []<|docstring|>Initializes the TrainingHparams instance.<|endoftext|> |
9fe21fb1991bc15e5a062c74b4dee10c538e9cda5085f7031d25070906d5c925 | def __init__(self):
'Initializes the InferenceHparams instance.\n '
self.beam_length_penalty_weight = 1.25
self.sampling_temperature = 0.5
self.max_answer_words = 100
self.conv_history_length = 6
self.normalize_words = True
self.log_summary = True
self.log_chat = True | Initializes the InferenceHparams instance. | seq2seq-chatbot/hparams.py | __init__ | JEMurcia/Seq2Seq_CornellMovie | 104 | python | def __init__(self):
'\n '
self.beam_length_penalty_weight = 1.25
self.sampling_temperature = 0.5
self.max_answer_words = 100
self.conv_history_length = 6
self.normalize_words = True
self.log_summary = True
self.log_chat = True | def __init__(self):
'\n '
self.beam_length_penalty_weight = 1.25
self.sampling_temperature = 0.5
self.max_answer_words = 100
self.conv_history_length = 6
self.normalize_words = True
self.log_summary = True
self.log_chat = True<|docstring|>Initializes the InferenceHparams instance.<|endoftext|> |
72b066f2dfb089d7e41c6843faa97c2e1ddb113935fb8e1b65986d9ef1e036c6 | def bwareaopen(BW, P):
'Removes all connected components (objects) that have fewer than P pixels from the binary image BW.\n\n Args:\n BW (array): binary image.\n P (int): maximum number of pixels in objects, specified as a nonnegative integer.\n\n Returns:\n [array]: binary image\n '
rows = np.shape(BW)[0]
cols = np.shape(BW)[1]
tag = 2
for row in range(rows):
for col in range(cols):
if (BW[(row, col)] == 1):
BW = _find_connected_components(BW, row, col, tag)
tag = (tag + 1)
for component in range(2, tag):
pixels = np.count_nonzero((BW == component))
if (pixels < P):
BW[(BW == component)] = 0
else:
BW[(BW == component)] = 1
return BW | Removes all connected components (objects) that have fewer than P pixels from the binary image BW.
Args:
BW (array): binary image.
P (int): maximum number of pixels in objects, specified as a nonnegative integer.
Returns:
[array]: binary image | Lab 2/bwareaopen.py | bwareaopen | sotheanith/CECS-553-Collection | 0 | python | def bwareaopen(BW, P):
'Removes all connected components (objects) that have fewer than P pixels from the binary image BW.\n\n Args:\n BW (array): binary image.\n P (int): maximum number of pixels in objects, specified as a nonnegative integer.\n\n Returns:\n [array]: binary image\n '
rows = np.shape(BW)[0]
cols = np.shape(BW)[1]
tag = 2
for row in range(rows):
for col in range(cols):
if (BW[(row, col)] == 1):
BW = _find_connected_components(BW, row, col, tag)
tag = (tag + 1)
for component in range(2, tag):
pixels = np.count_nonzero((BW == component))
if (pixels < P):
BW[(BW == component)] = 0
else:
BW[(BW == component)] = 1
return BW | def bwareaopen(BW, P):
'Removes all connected components (objects) that have fewer than P pixels from the binary image BW.\n\n Args:\n BW (array): binary image.\n P (int): maximum number of pixels in objects, specified as a nonnegative integer.\n\n Returns:\n [array]: binary image\n '
rows = np.shape(BW)[0]
cols = np.shape(BW)[1]
tag = 2
for row in range(rows):
for col in range(cols):
if (BW[(row, col)] == 1):
BW = _find_connected_components(BW, row, col, tag)
tag = (tag + 1)
for component in range(2, tag):
pixels = np.count_nonzero((BW == component))
if (pixels < P):
BW[(BW == component)] = 0
else:
BW[(BW == component)] = 1
return BW<|docstring|>Removes all connected components (objects) that have fewer than P pixels from the binary image BW.
Args:
BW (array): binary image.
P (int): maximum number of pixels in objects, specified as a nonnegative integer.
Returns:
[array]: binary image<|endoftext|> |
a2c6588c0fafe4c7250e4ea5ab8995e3e530c63ea4ebb7305208ae8ab27a13f2 | def _find_connected_components(BW, initial_row, initial_col, tag):
'Perform non-recursive flooding algorithm to find all pixels connected to a component.\n\n Args:\n BW (array): binary image.\n initial_row (int): starting row index.\n initial_col (int): starting column index.\n tag (int): tag used to identify this connected component. \n\n Returns:\n [array]: binary image with tagged area of this connected component \n '
unvisted_pixels = set()
unvisted_pixels.add((initial_row, initial_col))
while (len(unvisted_pixels) > 0):
(row, col) = unvisted_pixels.pop()
BW[(row, col)] = tag
if ((row > 0) and (col > 0) and (BW[((row - 1), (col - 1))] == 1)):
unvisted_pixels.add(((row - 1), (col - 1)))
if ((row > 0) and (BW[((row - 1), col)] == 1)):
unvisted_pixels.add(((row - 1), col))
if ((row > 0) and (col < (np.shape(BW)[1] - 1)) and (BW[((row - 1), (col + 1))] == 1)):
unvisted_pixels.add(((row - 1), (col + 1)))
if ((col > 0) and (BW[(row, (col - 1))] == 1)):
unvisted_pixels.add((row, (col - 1)))
if ((col < (np.shape(BW)[1] - 1)) and (BW[(row, (col + 1))] == 1)):
unvisted_pixels.add((row, (col + 1)))
if ((row < (np.shape(BW)[0] - 1)) and (col > 0) and (BW[((row + 1), (col - 1))] == 1)):
unvisted_pixels.add(((row + 1), (col - 1)))
if ((row < (np.shape(BW)[0] - 1)) and (BW[((row + 1), col)] == 1)):
unvisted_pixels.add(((row + 1), col))
if ((row < (np.shape(BW)[0] - 1)) and (col < (np.shape(BW)[1] - 1)) and (BW[((row + 1), (col + 1))] == 1)):
unvisted_pixels.add(((row + 1), (col + 1)))
return BW | Perform non-recursive flooding algorithm to find all pixels connected to a component.
Args:
BW (array): binary image.
initial_row (int): starting row index.
initial_col (int): starting column index.
tag (int): tag used to identify this connected component.
Returns:
[array]: binary image with tagged area of this connected component | Lab 2/bwareaopen.py | _find_connected_components | sotheanith/CECS-553-Collection | 0 | python | def _find_connected_components(BW, initial_row, initial_col, tag):
'Perform non-recursive flooding algorithm to find all pixels connected to a component.\n\n Args:\n BW (array): binary image.\n initial_row (int): starting row index.\n initial_col (int): starting column index.\n tag (int): tag used to identify this connected component. \n\n Returns:\n [array]: binary image with tagged area of this connected component \n '
unvisted_pixels = set()
unvisted_pixels.add((initial_row, initial_col))
while (len(unvisted_pixels) > 0):
(row, col) = unvisted_pixels.pop()
BW[(row, col)] = tag
if ((row > 0) and (col > 0) and (BW[((row - 1), (col - 1))] == 1)):
unvisted_pixels.add(((row - 1), (col - 1)))
if ((row > 0) and (BW[((row - 1), col)] == 1)):
unvisted_pixels.add(((row - 1), col))
if ((row > 0) and (col < (np.shape(BW)[1] - 1)) and (BW[((row - 1), (col + 1))] == 1)):
unvisted_pixels.add(((row - 1), (col + 1)))
if ((col > 0) and (BW[(row, (col - 1))] == 1)):
unvisted_pixels.add((row, (col - 1)))
if ((col < (np.shape(BW)[1] - 1)) and (BW[(row, (col + 1))] == 1)):
unvisted_pixels.add((row, (col + 1)))
if ((row < (np.shape(BW)[0] - 1)) and (col > 0) and (BW[((row + 1), (col - 1))] == 1)):
unvisted_pixels.add(((row + 1), (col - 1)))
if ((row < (np.shape(BW)[0] - 1)) and (BW[((row + 1), col)] == 1)):
unvisted_pixels.add(((row + 1), col))
if ((row < (np.shape(BW)[0] - 1)) and (col < (np.shape(BW)[1] - 1)) and (BW[((row + 1), (col + 1))] == 1)):
unvisted_pixels.add(((row + 1), (col + 1)))
return BW | def _find_connected_components(BW, initial_row, initial_col, tag):
'Perform non-recursive flooding algorithm to find all pixels connected to a component.\n\n Args:\n BW (array): binary image.\n initial_row (int): starting row index.\n initial_col (int): starting column index.\n tag (int): tag used to identify this connected component. \n\n Returns:\n [array]: binary image with tagged area of this connected component \n '
unvisted_pixels = set()
unvisted_pixels.add((initial_row, initial_col))
while (len(unvisted_pixels) > 0):
(row, col) = unvisted_pixels.pop()
BW[(row, col)] = tag
if ((row > 0) and (col > 0) and (BW[((row - 1), (col - 1))] == 1)):
unvisted_pixels.add(((row - 1), (col - 1)))
if ((row > 0) and (BW[((row - 1), col)] == 1)):
unvisted_pixels.add(((row - 1), col))
if ((row > 0) and (col < (np.shape(BW)[1] - 1)) and (BW[((row - 1), (col + 1))] == 1)):
unvisted_pixels.add(((row - 1), (col + 1)))
if ((col > 0) and (BW[(row, (col - 1))] == 1)):
unvisted_pixels.add((row, (col - 1)))
if ((col < (np.shape(BW)[1] - 1)) and (BW[(row, (col + 1))] == 1)):
unvisted_pixels.add((row, (col + 1)))
if ((row < (np.shape(BW)[0] - 1)) and (col > 0) and (BW[((row + 1), (col - 1))] == 1)):
unvisted_pixels.add(((row + 1), (col - 1)))
if ((row < (np.shape(BW)[0] - 1)) and (BW[((row + 1), col)] == 1)):
unvisted_pixels.add(((row + 1), col))
if ((row < (np.shape(BW)[0] - 1)) and (col < (np.shape(BW)[1] - 1)) and (BW[((row + 1), (col + 1))] == 1)):
unvisted_pixels.add(((row + 1), (col + 1)))
return BW<|docstring|>Perform non-recursive flooding algorithm to find all pixels connected to a component.
Args:
BW (array): binary image.
initial_row (int): starting row index.
initial_col (int): starting column index.
tag (int): tag used to identify this connected component.
Returns:
[array]: binary image with tagged area of this connected component<|endoftext|> |
7b2393cc957622a4ef506efc8007de7b7cde642603a61148e713b2bc9d97b6d3 | def __init__(self):
'Noise, system setting and x0 settings'
super(Test_ss_linear1, self).__init__(nx=2) | Noise, system setting and x0 settings | deepSI/systems/test_systems.py | __init__ | GerbenBeintema/deepSI | 12 | python | def __init__(self):
super(Test_ss_linear1, self).__init__(nx=2) | def __init__(self):
super(Test_ss_linear1, self).__init__(nx=2)<|docstring|>Noise, system setting and x0 settings<|endoftext|> |
32189627b8628fad282fdc61ccccb8f1818804c3d6a1c66a26f4f43435eb198e | def __init__(self):
'Noise, system setting and x0 settings'
super(Test_ss_linear2, self).__init__(nx=2) | Noise, system setting and x0 settings | deepSI/systems/test_systems.py | __init__ | GerbenBeintema/deepSI | 12 | python | def __init__(self):
super(Test_ss_linear2, self).__init__(nx=2) | def __init__(self):
super(Test_ss_linear2, self).__init__(nx=2)<|docstring|>Noise, system setting and x0 settings<|endoftext|> |
4cdcef4640b345f47fc622211982136610cdbc27ece098169408fa565996d222 | def __init__(self, instance_id=None, product_id=None, gateway_id=None, is_cascade_query=None, node_id=None, device_name=None, limit=None, marker=None, offset=None, start_time=None, end_time=None, app_id=None):
'ListDevicesRequest - a model defined in huaweicloud sdk'
self._instance_id = None
self._product_id = None
self._gateway_id = None
self._is_cascade_query = None
self._node_id = None
self._device_name = None
self._limit = None
self._marker = None
self._offset = None
self._start_time = None
self._end_time = None
self._app_id = None
self.discriminator = None
if (instance_id is not None):
self.instance_id = instance_id
if (product_id is not None):
self.product_id = product_id
if (gateway_id is not None):
self.gateway_id = gateway_id
if (is_cascade_query is not None):
self.is_cascade_query = is_cascade_query
if (node_id is not None):
self.node_id = node_id
if (device_name is not None):
self.device_name = device_name
if (limit is not None):
self.limit = limit
if (marker is not None):
self.marker = marker
if (offset is not None):
self.offset = offset
if (start_time is not None):
self.start_time = start_time
if (end_time is not None):
self.end_time = end_time
if (app_id is not None):
self.app_id = app_id | ListDevicesRequest - a model defined in huaweicloud sdk | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | __init__ | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | def __init__(self, instance_id=None, product_id=None, gateway_id=None, is_cascade_query=None, node_id=None, device_name=None, limit=None, marker=None, offset=None, start_time=None, end_time=None, app_id=None):
self._instance_id = None
self._product_id = None
self._gateway_id = None
self._is_cascade_query = None
self._node_id = None
self._device_name = None
self._limit = None
self._marker = None
self._offset = None
self._start_time = None
self._end_time = None
self._app_id = None
self.discriminator = None
if (instance_id is not None):
self.instance_id = instance_id
if (product_id is not None):
self.product_id = product_id
if (gateway_id is not None):
self.gateway_id = gateway_id
if (is_cascade_query is not None):
self.is_cascade_query = is_cascade_query
if (node_id is not None):
self.node_id = node_id
if (device_name is not None):
self.device_name = device_name
if (limit is not None):
self.limit = limit
if (marker is not None):
self.marker = marker
if (offset is not None):
self.offset = offset
if (start_time is not None):
self.start_time = start_time
if (end_time is not None):
self.end_time = end_time
if (app_id is not None):
self.app_id = app_id | def __init__(self, instance_id=None, product_id=None, gateway_id=None, is_cascade_query=None, node_id=None, device_name=None, limit=None, marker=None, offset=None, start_time=None, end_time=None, app_id=None):
self._instance_id = None
self._product_id = None
self._gateway_id = None
self._is_cascade_query = None
self._node_id = None
self._device_name = None
self._limit = None
self._marker = None
self._offset = None
self._start_time = None
self._end_time = None
self._app_id = None
self.discriminator = None
if (instance_id is not None):
self.instance_id = instance_id
if (product_id is not None):
self.product_id = product_id
if (gateway_id is not None):
self.gateway_id = gateway_id
if (is_cascade_query is not None):
self.is_cascade_query = is_cascade_query
if (node_id is not None):
self.node_id = node_id
if (device_name is not None):
self.device_name = device_name
if (limit is not None):
self.limit = limit
if (marker is not None):
self.marker = marker
if (offset is not None):
self.offset = offset
if (start_time is not None):
self.start_time = start_time
if (end_time is not None):
self.end_time = end_time
if (app_id is not None):
self.app_id = app_id<|docstring|>ListDevicesRequest - a model defined in huaweicloud sdk<|endoftext|> |
91dccf761f946b2511778387ff2129d59c76c64cb60db5270ed2c7c69d750a28 | @property
def instance_id(self):
'Gets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :return: The instance_id of this ListDevicesRequest.\n :rtype: str\n '
return self._instance_id | Gets the instance_id of this ListDevicesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ListDevicesRequest.
:rtype: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | instance_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def instance_id(self):
'Gets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :return: The instance_id of this ListDevicesRequest.\n :rtype: str\n '
return self._instance_id | @property
def instance_id(self):
'Gets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :return: The instance_id of this ListDevicesRequest.\n :rtype: str\n '
return self._instance_id<|docstring|>Gets the instance_id of this ListDevicesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ListDevicesRequest.
:rtype: str<|endoftext|> |
105b7a955a41d3905be2129041c895468910ebcf6987cac39f6450701a7b3feb | @instance_id.setter
def instance_id(self, instance_id):
'Sets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :param instance_id: The instance_id of this ListDevicesRequest.\n :type: str\n '
self._instance_id = instance_id | Sets the instance_id of this ListDevicesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ListDevicesRequest.
:type: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | instance_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @instance_id.setter
def instance_id(self, instance_id):
'Sets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :param instance_id: The instance_id of this ListDevicesRequest.\n :type: str\n '
self._instance_id = instance_id | @instance_id.setter
def instance_id(self, instance_id):
'Sets the instance_id of this ListDevicesRequest.\n\n **参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。\n\n :param instance_id: The instance_id of this ListDevicesRequest.\n :type: str\n '
self._instance_id = instance_id<|docstring|>Sets the instance_id of this ListDevicesRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ListDevicesRequest.
:type: str<|endoftext|> |
647f1f489d645d33e27a3da0ed090183c2c668ec211168966f59791607a700bc | @property
def product_id(self):
'Gets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The product_id of this ListDevicesRequest.\n :rtype: str\n '
return self._product_id | Gets the product_id of this ListDevicesRequest.
**参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The product_id of this ListDevicesRequest.
:rtype: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | product_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def product_id(self):
'Gets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The product_id of this ListDevicesRequest.\n :rtype: str\n '
return self._product_id | @property
def product_id(self):
'Gets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The product_id of this ListDevicesRequest.\n :rtype: str\n '
return self._product_id<|docstring|>Gets the product_id of this ListDevicesRequest.
**参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The product_id of this ListDevicesRequest.
:rtype: str<|endoftext|> |
44982e7f9e9ca470b7cd308a6345e275067a7f00bb7bdcb10f68d1e0c1b5c92b | @product_id.setter
def product_id(self, product_id):
'Sets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param product_id: The product_id of this ListDevicesRequest.\n :type: str\n '
self._product_id = product_id | Sets the product_id of this ListDevicesRequest.
**参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param product_id: The product_id of this ListDevicesRequest.
:type: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | product_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @product_id.setter
def product_id(self, product_id):
'Sets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param product_id: The product_id of this ListDevicesRequest.\n :type: str\n '
self._product_id = product_id | @product_id.setter
def product_id(self, product_id):
'Sets the product_id of this ListDevicesRequest.\n\n **参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param product_id: The product_id of this ListDevicesRequest.\n :type: str\n '
self._product_id = product_id<|docstring|>Sets the product_id of this ListDevicesRequest.
**参数说明**:设备关联的产品ID,用于唯一标识一个产品模型,创建产品后获得。方法请参见 [创建产品](https://support.huaweicloud.com/api-iothub/iot_06_v5_0050.html)。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param product_id: The product_id of this ListDevicesRequest.
:type: str<|endoftext|> |
1e76668d1e0c9ddf39776874ed2cc54e8f5b09d53dfab4a82f2e7061e8274e9b | @property
def gateway_id(self):
'Gets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The gateway_id of this ListDevicesRequest.\n :rtype: str\n '
return self._gateway_id | Gets the gateway_id of this ListDevicesRequest.
**参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The gateway_id of this ListDevicesRequest.
:rtype: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | gateway_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def gateway_id(self):
'Gets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The gateway_id of this ListDevicesRequest.\n :rtype: str\n '
return self._gateway_id | @property
def gateway_id(self):
'Gets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The gateway_id of this ListDevicesRequest.\n :rtype: str\n '
return self._gateway_id<|docstring|>Gets the gateway_id of this ListDevicesRequest.
**参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The gateway_id of this ListDevicesRequest.
:rtype: str<|endoftext|> |
f915e6577d960ded234e1cd9e20a9339c9d9ec05a34c73a7365af3d9939860ea | @gateway_id.setter
def gateway_id(self, gateway_id):
'Sets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param gateway_id: The gateway_id of this ListDevicesRequest.\n :type: str\n '
self._gateway_id = gateway_id | Sets the gateway_id of this ListDevicesRequest.
**参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param gateway_id: The gateway_id of this ListDevicesRequest.
:type: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | gateway_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @gateway_id.setter
def gateway_id(self, gateway_id):
'Sets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param gateway_id: The gateway_id of this ListDevicesRequest.\n :type: str\n '
self._gateway_id = gateway_id | @gateway_id.setter
def gateway_id(self, gateway_id):
'Sets the gateway_id of this ListDevicesRequest.\n\n **参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param gateway_id: The gateway_id of this ListDevicesRequest.\n :type: str\n '
self._gateway_id = gateway_id<|docstring|>Sets the gateway_id of this ListDevicesRequest.
**参数说明**:网关ID,用于标识设备所属的父设备,即父设备的设备ID。携带该参数时,表示查询该设备下的子设备,默认查询下一级子设备,如果需要查询该设备下所有各级子设备,请同时携带is_cascade_query参数为true;不携带该参数时,表示查询用户下所有设备。 **取值范围**:长度不超过128,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param gateway_id: The gateway_id of this ListDevicesRequest.
:type: str<|endoftext|> |
1f63aa3959a8ec1b94310e5ceed0159c51a16d4c6faa4e8ee582e43dcf140a32 | @property
def is_cascade_query(self):
'Gets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :return: The is_cascade_query of this ListDevicesRequest.\n :rtype: bool\n '
return self._is_cascade_query | Gets the is_cascade_query of this ListDevicesRequest.
**参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:return: The is_cascade_query of this ListDevicesRequest.
:rtype: bool | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | is_cascade_query | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def is_cascade_query(self):
'Gets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :return: The is_cascade_query of this ListDevicesRequest.\n :rtype: bool\n '
return self._is_cascade_query | @property
def is_cascade_query(self):
'Gets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :return: The is_cascade_query of this ListDevicesRequest.\n :rtype: bool\n '
return self._is_cascade_query<|docstring|>Gets the is_cascade_query of this ListDevicesRequest.
**参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:return: The is_cascade_query of this ListDevicesRequest.
:rtype: bool<|endoftext|> |
5545987e55023ce2a7349a72ac5d5f3dfd2e1543b6cf61a1df6b0679a6c289d2 | @is_cascade_query.setter
def is_cascade_query(self, is_cascade_query):
'Sets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :param is_cascade_query: The is_cascade_query of this ListDevicesRequest.\n :type: bool\n '
self._is_cascade_query = is_cascade_query | Sets the is_cascade_query of this ListDevicesRequest.
**参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:param is_cascade_query: The is_cascade_query of this ListDevicesRequest.
:type: bool | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | is_cascade_query | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @is_cascade_query.setter
def is_cascade_query(self, is_cascade_query):
'Sets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :param is_cascade_query: The is_cascade_query of this ListDevicesRequest.\n :type: bool\n '
self._is_cascade_query = is_cascade_query | @is_cascade_query.setter
def is_cascade_query(self, is_cascade_query):
'Sets the is_cascade_query of this ListDevicesRequest.\n\n **参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。\n\n :param is_cascade_query: The is_cascade_query of this ListDevicesRequest.\n :type: bool\n '
self._is_cascade_query = is_cascade_query<|docstring|>Sets the is_cascade_query of this ListDevicesRequest.
**参数说明**:是否级联查询,该参数仅在同时携带gateway_id时生效。默认值为false。 **取值范围**: - true:表示查询设备ID等于gateway_id参数的设备下的所有各级子设备。 - false:表示查询设备ID等于gateway_id参数的设备下的一级子设备。
:param is_cascade_query: The is_cascade_query of this ListDevicesRequest.
:type: bool<|endoftext|> |
6c9829bc2bef194eb562be0fbf2b2a0bd2db76ea2fc03e5ffaa6a9dfbf228543 | @property
def node_id(self):
'Gets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The node_id of this ListDevicesRequest.\n :rtype: str\n '
return self._node_id | Gets the node_id of this ListDevicesRequest.
**参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The node_id of this ListDevicesRequest.
:rtype: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | node_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def node_id(self):
'Gets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The node_id of this ListDevicesRequest.\n :rtype: str\n '
return self._node_id | @property
def node_id(self):
'Gets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :return: The node_id of this ListDevicesRequest.\n :rtype: str\n '
return self._node_id<|docstring|>Gets the node_id of this ListDevicesRequest.
**参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The node_id of this ListDevicesRequest.
:rtype: str<|endoftext|> |
899660a3dff0c7e4345a8b86ab236c1caabebc032907c1040556a50248f5708b | @node_id.setter
def node_id(self, node_id):
'Sets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param node_id: The node_id of this ListDevicesRequest.\n :type: str\n '
self._node_id = node_id | Sets the node_id of this ListDevicesRequest.
**参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param node_id: The node_id of this ListDevicesRequest.
:type: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | node_id | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @node_id.setter
def node_id(self, node_id):
'Sets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param node_id: The node_id of this ListDevicesRequest.\n :type: str\n '
self._node_id = node_id | @node_id.setter
def node_id(self, node_id):
'Sets the node_id of this ListDevicesRequest.\n\n **参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。\n\n :param node_id: The node_id of this ListDevicesRequest.\n :type: str\n '
self._node_id = node_id<|docstring|>Sets the node_id of this ListDevicesRequest.
**参数说明**:设备标识码,通常使用IMEI、MAC地址或Serial No作为node_id。 **取值范围**:长度不超过64,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param node_id: The node_id of this ListDevicesRequest.
:type: str<|endoftext|> |
99577d49e9d669f847f0f9ca781be5adff725dbd8537c216f027b2298603cc3a | @property
def device_name(self):
"Gets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :return: The device_name of this ListDevicesRequest.\n :rtype: str\n "
return self._device_name | Gets the device_name of this ListDevicesRequest.
**参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。
:return: The device_name of this ListDevicesRequest.
:rtype: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | device_name | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def device_name(self):
"Gets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :return: The device_name of this ListDevicesRequest.\n :rtype: str\n "
return self._device_name | @property
def device_name(self):
"Gets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :return: The device_name of this ListDevicesRequest.\n :rtype: str\n "
return self._device_name<|docstring|>Gets the device_name of this ListDevicesRequest.
**参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。
:return: The device_name of this ListDevicesRequest.
:rtype: str<|endoftext|> |
e9706bd9f51fdd5e566babd6c4cd8ab28de2e58f6fd1493fb143bba60c9028d7 | @device_name.setter
def device_name(self, device_name):
"Sets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :param device_name: The device_name of this ListDevicesRequest.\n :type: str\n "
self._device_name = device_name | Sets the device_name of this ListDevicesRequest.
**参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。
:param device_name: The device_name of this ListDevicesRequest.
:type: str | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | device_name | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @device_name.setter
def device_name(self, device_name):
"Sets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :param device_name: The device_name of this ListDevicesRequest.\n :type: str\n "
self._device_name = device_name | @device_name.setter
def device_name(self, device_name):
"Sets the device_name of this ListDevicesRequest.\n\n **参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。\n\n :param device_name: The device_name of this ListDevicesRequest.\n :type: str\n "
self._device_name = device_name<|docstring|>Sets the device_name of this ListDevicesRequest.
**参数说明**:设备名称。 **取值范围**:长度不超过256,只允许中文、字母、数字、以及_?'#().,&%@!-等字符的组合。
:param device_name: The device_name of this ListDevicesRequest.
:type: str<|endoftext|> |
e4cc8f6597511dbbd13d7b1e3e7fea76608eb8ea8035a9e26108b9ef22919552 | @property
def limit(self):
'Gets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :return: The limit of this ListDevicesRequest.\n :rtype: int\n '
return self._limit | Gets the limit of this ListDevicesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:return: The limit of this ListDevicesRequest.
:rtype: int | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | limit | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @property
def limit(self):
'Gets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :return: The limit of this ListDevicesRequest.\n :rtype: int\n '
return self._limit | @property
def limit(self):
'Gets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :return: The limit of this ListDevicesRequest.\n :rtype: int\n '
return self._limit<|docstring|>Gets the limit of this ListDevicesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:return: The limit of this ListDevicesRequest.
:rtype: int<|endoftext|> |
10dc1e7ad1802b36f18dc35f814c80e63827262fa67d7c2c4f6be3f5fe6b9fa6 | @limit.setter
def limit(self, limit):
'Sets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :param limit: The limit of this ListDevicesRequest.\n :type: int\n '
self._limit = limit | Sets the limit of this ListDevicesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:param limit: The limit of this ListDevicesRequest.
:type: int | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/list_devices_request.py | limit | huaweicloud/huaweicloud-sdk-python-v3 | 64 | python | @limit.setter
def limit(self, limit):
'Sets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :param limit: The limit of this ListDevicesRequest.\n :type: int\n '
self._limit = limit | @limit.setter
def limit(self, limit):
'Sets the limit of this ListDevicesRequest.\n\n **参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。\n\n :param limit: The limit of this ListDevicesRequest.\n :type: int\n '
self._limit = limit<|docstring|>Sets the limit of this ListDevicesRequest.
**参数说明**:分页查询时每页显示的记录数。 **取值范围**:1-50的整数,默认值为10。
:param limit: The limit of this ListDevicesRequest.
:type: int<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.