blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5015f583a686346339dadaed0cc14a8864fa920 | b107883be08ea56bd3a56ddb0e2dd8dacce7db2e | /src/polystar/utils/dataframe.py | e020875b53a3ba4999062cc4d269e2850ea2cef9 | [] | no_license | PolySTAR-mtl/cv | ef7977b62577e520f6c69a9b7891c7f38e307028 | 27564abe89e7dff612e3630c31e080fae4164751 | refs/heads/master | 2023-05-01T16:45:19.777459 | 2021-05-30T10:36:10 | 2021-05-30T10:36:10 | 356,053,312 | 0 | 0 | null | 2021-05-30T10:36:11 | 2021-04-08T21:32:06 | Python | UTF-8 | Python | false | false | 908 | py | from typing import Any, Callable, Iterable, Union
from pandas import DataFrame
Format = Union[str, Callable]
def format_df_column(df: DataFrame, column_name: str, fmt: Format):
df[column_name] = df[column_name].map(fmt.format)
def format_df_columns(df: DataFrame, column_names: Iterable[str], fmt: Format):
for c in column_names:
format_df_column(df, c, fmt)
def format_df_row(df: DataFrame, loc: Any, fmt: Format):
df.loc[loc] = df.loc[loc].map(make_formater(fmt))
def format_df_rows(df: DataFrame, locs: Iterable[Any], fmt: Format):
for loc in locs:
format_df_row(df, loc, fmt)
def make_formater(fmt: Format) -> Callable:
if isinstance(fmt, str):
return fmt.format
return fmt
def add_percentages_to_df(df: DataFrame, axis: int) -> DataFrame:
return df.applymap(str) + df.div(df.sum(axis=axis), axis=(1 - axis)).applymap(" ({:.1%})".format)
| [
"[email protected]"
] | |
8c0b7e0087305801e9385ba26b58b968906a9657 | a2d44f3c89acb7424cc2771f5c0a926e2d902c77 | /transformers/examples/research_projects/luke/run_luke_ner_no_trainer.py | c7a9763d99659dff967f066520b319dc992fe82e | [
"Apache-2.0"
] | permissive | amazon-science/masked-diffusion-lm | 94845ff123eb586fca0247b0db7baf12dfee6a6d | 16b0294398d596198bc9f75375eaa6814f792dcb | refs/heads/main | 2023-08-03T02:23:14.301531 | 2023-05-04T19:54:58 | 2023-05-04T19:54:58 | 626,021,474 | 38 | 0 | Apache-2.0 | 2023-08-14T22:24:30 | 2023-04-10T16:19:44 | Python | UTF-8 | Python | false | false | 29,068 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning (m)LUKE model on token classification tasks (NER, POS, CHUNKS) relying on the accelerate library 🤗
without using a Trainer.
"""
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
import torch
from datasets import ClassLabel, load_dataset, load_metric
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
import transformers
from accelerate import Accelerator, DistributedDataParallelKwargs
from huggingface_hub import Repository
from luke_utils import DataCollatorForLukeTokenClassification, is_punctuation, padding_tensor
from transformers import (
AdamW,
LukeConfig,
LukeForEntitySpanClassification,
LukeTokenizer,
SchedulerType,
default_data_collator,
get_scheduler,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.utils.versions import require_version
logger = logging.getLogger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
def parse_args():
parser = argparse.ArgumentParser(
description="Finetune (m)LUKE on a token classification task (such as NER) with the accelerate library"
)
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv or a json file containing the validation data."
)
parser.add_argument(
"--text_column_name",
type=str,
default=None,
help="The column name of text to input in the file (a csv or JSON file).",
)
parser.add_argument(
"--label_column_name",
type=str,
default=None,
help="The column name of label to input in the file (a csv or JSON file).",
)
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_length` is passed."
),
)
parser.add_argument(
"--max_entity_length",
type=int,
default=32,
help=(
"The maximum total input entity length after tokenization (Used only for (M)Luke models). Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_length` is passed."
),
)
parser.add_argument(
"--max_mention_length",
type=int,
default=30,
help=(
"The maximum total input mention length after tokenization (Used only for (M)Luke models). Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_length` is passed."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--label_all_tokens",
action="store_true",
help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.",
)
parser.add_argument(
"--return_entity_level_metrics",
action="store_true",
help="Indication whether entity level metrics are to be returner.",
)
parser.add_argument(
"--task_name",
type=str,
default="ner",
choices=["ner", "pos", "chunk"],
help="The name of the task.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Activate debug mode and run training only with a subset of data.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
args = parser.parse_args()
# Sanity checks
if args.task_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a task name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
handler = DistributedDataParallelKwargs(find_unused_parameters=True)
accelerator = Accelerator(kwargs_handlers=[handler])
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets for token classification task available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'tokens' or the first column if no column called
# 'tokens' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
else:
data_files = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files)
# Trim a number of training examples
if args.debug:
for split in raw_datasets.keys():
raw_datasets[split] = raw_datasets[split].select(range(100))
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
if raw_datasets["train"] is not None:
column_names = raw_datasets["train"].column_names
features = raw_datasets["train"].features
else:
column_names = raw_datasets["validation"].column_names
features = raw_datasets["validation"].features
if args.text_column_name is not None:
text_column_name = args.text_column_name
elif "tokens" in column_names:
text_column_name = "tokens"
else:
text_column_name = column_names[0]
if args.label_column_name is not None:
label_column_name = args.label_column_name
elif f"{args.task_name}_tags" in column_names:
label_column_name = f"{args.task_name}_tags"
else:
label_column_name = column_names[1]
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
else:
label_list = get_label_list(raw_datasets["train"][label_column_name])
num_labels = len(label_list)
# Map that sends B-Xxx label to its I-Xxx counterpart
b_to_i_label = []
for idx, label in enumerate(label_list):
if label.startswith("B-") and label.replace("B-", "I-") in label_list:
b_to_i_label.append(label_list.index(label.replace("B-", "I-")))
else:
b_to_i_label.append(idx)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = LukeConfig.from_pretrained(args.config_name, num_labels=num_labels)
elif args.model_name_or_path:
config = LukeConfig.from_pretrained(args.model_name_or_path, num_labels=num_labels)
else:
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_name_or_path = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
if not tokenizer_name_or_path:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
tokenizer = LukeTokenizer.from_pretrained(
tokenizer_name_or_path,
use_fast=False,
task="entity_span_classification",
max_entity_length=args.max_entity_length,
max_mention_length=args.max_mention_length,
)
if args.model_name_or_path:
model = LukeForEntitySpanClassification.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = LukeForEntitySpanClassification.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
padding = "max_length" if args.pad_to_max_length else False
def compute_sentence_boundaries_for_luke(examples):
sentence_boundaries = []
for tokens in examples[text_column_name]:
sentence_boundaries.append([0, len(tokens)])
examples["sentence_boundaries"] = sentence_boundaries
return examples
def compute_entity_spans_for_luke(examples):
all_entity_spans = []
texts = []
all_labels_entity_spans = []
all_original_entity_spans = []
for labels, tokens, sentence_boundaries in zip(
examples[label_column_name], examples[text_column_name], examples["sentence_boundaries"]
):
subword_lengths = [len(tokenizer.tokenize(token)) for token in tokens]
total_subword_length = sum(subword_lengths)
_, context_end = sentence_boundaries
if total_subword_length > args.max_length - 2:
cur_length = sum(subword_lengths[:context_end])
idx = context_end - 1
while cur_length > args.max_length - 2:
cur_length -= subword_lengths[idx]
context_end -= 1
idx -= 1
text = ""
sentence_words = tokens[:context_end]
sentence_subword_lengths = subword_lengths[:context_end]
word_start_char_positions = []
word_end_char_positions = []
labels_positions = {}
for word, label in zip(sentence_words, labels):
if word[0] == "'" or (len(word) == 1 and is_punctuation(word)):
text = text.rstrip()
word_start_char_positions.append(len(text))
text += word
word_end_char_positions.append(len(text))
text += " "
labels_positions[(word_start_char_positions[-1], word_end_char_positions[-1])] = label
text = text.rstrip()
texts.append(text)
entity_spans = []
labels_entity_spans = []
original_entity_spans = []
for word_start in range(len(sentence_words)):
for word_end in range(word_start, len(sentence_words)):
if (
sum(sentence_subword_lengths[word_start:word_end]) <= tokenizer.max_mention_length
and len(entity_spans) < tokenizer.max_entity_length
):
entity_spans.append((word_start_char_positions[word_start], word_end_char_positions[word_end]))
original_entity_spans.append((word_start, word_end + 1))
if (
word_start_char_positions[word_start],
word_end_char_positions[word_end],
) in labels_positions:
labels_entity_spans.append(
labels_positions[
(word_start_char_positions[word_start], word_end_char_positions[word_end])
]
)
else:
labels_entity_spans.append(0)
all_entity_spans.append(entity_spans)
all_labels_entity_spans.append(labels_entity_spans)
all_original_entity_spans.append(original_entity_spans)
examples["entity_spans"] = all_entity_spans
examples["text"] = texts
examples["labels_entity_spans"] = all_labels_entity_spans
examples["original_entity_spans"] = all_original_entity_spans
return examples
def tokenize_and_align_labels(examples):
entity_spans = []
for v in examples["entity_spans"]:
entity_spans.append(list(map(tuple, v)))
tokenized_inputs = tokenizer(
examples["text"],
entity_spans=entity_spans,
max_length=args.max_length,
padding=padding,
truncation=True,
)
if padding == "max_length":
tokenized_inputs["labels"] = padding_tensor(
examples["labels_entity_spans"], -100, tokenizer.padding_side, tokenizer.max_entity_length
)
tokenized_inputs["original_entity_spans"] = padding_tensor(
examples["original_entity_spans"], (-1, -1), tokenizer.padding_side, tokenizer.max_entity_length
)
tokenized_inputs[label_column_name] = padding_tensor(
examples[label_column_name], -1, tokenizer.padding_side, tokenizer.max_entity_length
)
else:
tokenized_inputs["labels"] = [ex[: tokenizer.max_entity_length] for ex in examples["labels_entity_spans"]]
tokenized_inputs["original_entity_spans"] = [
ex[: tokenizer.max_entity_length] for ex in examples["original_entity_spans"]
]
tokenized_inputs[label_column_name] = [
ex[: tokenizer.max_entity_length] for ex in examples[label_column_name]
]
return tokenized_inputs
with accelerator.main_process_first():
raw_datasets = raw_datasets.map(
compute_sentence_boundaries_for_luke,
batched=True,
desc="Adding sentence boundaries",
)
raw_datasets = raw_datasets.map(
compute_entity_spans_for_luke,
batched=True,
desc="Adding sentence spans",
)
processed_raw_datasets = raw_datasets.map(
tokenize_and_align_labels,
batched=True,
remove_columns=raw_datasets["train"].column_names,
desc="Running tokenizer on dataset",
)
train_dataset = processed_raw_datasets["train"]
eval_dataset = processed_raw_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
if args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
data_collator = default_data_collator
else:
# Otherwise, `DataCollatorForTokenClassification` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
data_collator = DataCollatorForLukeTokenClassification(
tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Use the device given by the `accelerator` object.
device = accelerator.device
model.to(device)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Metrics
metric = load_metric("seqeval")
def get_luke_labels(outputs, ner_tags, original_entity_spans):
true_predictions = []
true_labels = []
for output, original_spans, tags in zip(outputs.logits, original_entity_spans, ner_tags):
true_tags = [val for val in tags if val != -1]
true_original_spans = [val for val in original_spans if val != (-1, -1)]
max_indices = torch.argmax(output, axis=1)
max_logits = torch.max(output, axis=1).values
predictions = []
for logit, index, span in zip(max_logits, max_indices, true_original_spans):
if index != 0:
predictions.append((logit, span, label_list[index]))
predicted_sequence = [label_list[0]] * len(true_tags)
for _, span, label in sorted(predictions, key=lambda o: o[0], reverse=True):
if all([o == label_list[0] for o in predicted_sequence[span[0] : span[1]]]):
predicted_sequence[span[0]] = label
if span[1] - span[0] > 1:
predicted_sequence[span[0] + 1 : span[1]] = [label] * (span[1] - span[0] - 1)
true_predictions.append(predicted_sequence)
true_labels.append([label_list[tag_id] for tag_id in true_tags])
return true_predictions, true_labels
def compute_metrics():
results = metric.compute()
if args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
for epoch in range(args.num_train_epochs):
model.train()
for step, batch in enumerate(train_dataloader):
_ = batch.pop("original_entity_spans")
outputs = model(**batch)
loss = outputs.loss
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if completed_steps >= args.max_train_steps:
break
model.eval()
for step, batch in enumerate(eval_dataloader):
original_entity_spans = batch.pop("original_entity_spans")
with torch.no_grad():
outputs = model(**batch)
preds, refs = get_luke_labels(outputs, batch[label_column_name], original_entity_spans)
metric.add_batch(
predictions=preds,
references=refs,
) # predictions and preferences are expected to be a nested list of labels, not label_ids
eval_metric = compute_metrics()
accelerator.print(f"epoch {epoch}:", eval_metric)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
6d253e9041fc0f16e07e2166ab6ba8890b19bf1e | 511fd0cb7e338bc5c2d5a9d60de8166efd5882fe | /pyrecs/icp_compat/ICPSequenceFile.py | 52f9c298bca501818f68f0444fe3b05b72aba2a2 | [] | no_license | bmaranville/pyrecs | 43341af4931538e57c8de7655efbcdbdd9099f02 | 29468ae4d8a4a9de5cac8988fd3620f806a71907 | refs/heads/master | 2021-01-15T15:45:47.514371 | 2016-11-04T14:07:50 | 2016-11-04T14:07:50 | 5,635,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,369 | py | from __future__ import with_statement
from StringIO import StringIO
class PyICPSequence:
"""controls and reads from a sequence file, moving the marker around
it is defined as an iterator, so getting the next element moves the marker
can use syntax "for cmd in PyICPSequenceFile(filename):" to iterate
through file, moving the marker
"""
def __init__(self, marker = '%', data = ''):
self.data = data
self.marker = marker
self.last = ''
self.next_command = ''
self.current_command = ''
def LoadData(self):
return self.data
def ParseData(self):
data = self.LoadData()
current_command = None
seek_pos = 0
datalen = len(data)
not_separator = True
def next_cmd(data, seek_pos):
cmd = ''
not_separator = True
while not_separator and seek_pos < datalen:
next_char = data[seek_pos]
if next_char in [';', '\n', '\r']:
not_separator = False
cmd += next_char
seek_pos += 1
return cmd, seek_pos
new_data = ''
match = False
while seek_pos < datalen and match == False:
cmd, new_seek_pos = next_cmd(data, seek_pos)
marker_loc = cmd.rfind(self.marker)
# check to see if there's anything after the marker - if not, proceed
if marker_loc > -1 and cmd[marker_loc+1:].rstrip('; \t\n\r') == '':
#current_command = cmd[:marker_loc]
match = True # we found it! set the flag
current_command = cmd[:marker_loc].strip('; \t\n\r')
replacement_str = cmd[:marker_loc] + cmd[marker_loc+1:]
new_data = data[:seek_pos]+replacement_str
seek_pos = new_seek_pos
if not match:
seek_pos = 0
# or else we've got a match - what's the next command?
next_command = None
commands_left = 0
next_command_found = False
while seek_pos < datalen:
cmd, new_seek_pos = next_cmd(data, seek_pos)
if cmd.strip('; \t\n\r') == '':
new_data += cmd
else: # we have a non-blank command:
commands_left += 1 # add one to the stack
if not next_command_found:
next_command_found = True
next_command = cmd.rstrip('; \t\n\r'+self.marker)
# check to see if it's already got a marker (or more than one) and clear them
# and then put exactly one marker back
end_of_command = len(cmd.rstrip('; \t\r\n'+self.marker))
cmd = cmd[:end_of_command] + self.marker + cmd[end_of_command:].replace(self.marker, '')
#new_data += cmd[:-1] + self.marker + cmd[-1]
new_data += cmd
seek_pos = new_seek_pos
return current_command, next_command, commands_left, new_data
def GetCurrentCommand(self):
current_command, next_command, commands_left, new_data = self.ParseData()
return current_command
def __len__(self):
current_command, next_command, commands_left, new_data = self.ParseData()
return commands_left
def clear(self):
"""move the marker to the last command"""
while self.__len__() > 0:
self.GetNextCommand()
def GetNextCommand(self):
current_command, next_command, commands_left, new_data = self.ParseData()
self.WriteData(new_data)
return next_command
def WriteData(self, new_data):
self.data = new_data
def __iter__(self):
return self
def next(self):
self.next_command = self.GetNextCommand()
if self.next_command == None:
raise StopIteration
else:
self.last = self.next_command
return self.next_command
#def popleft(self):
# return next(self)
class PyICPSequenceFile(PyICPSequence):
"""controls and reads from a sequence file, moving the marker around
it is defined as an iterator, so getting the next element moves the marker
can use syntax "for cmd in PyICPSequenceFile(filename):" to iterate
through file, moving the marker
"""
def __init__(self, filename, marker = '%'):
self.filename = filename
PyICPSequence.__init__(self, marker)
def LoadData(self):
with open(self.filename, 'r') as f:
data = f.read()
return data
def WriteData(self, new_data):
with open(self.filename, 'w') as f:
f.write(new_data)
class PyICPSequenceStringIO(PyICPSequence):
def __init__(self, string_io_obj, marker = '%' ):
self.string_io_obj = string_io_obj
PyICPSequence.__init__(self, marker)
def LoadData(self):
self.string_io_obj.seek(0)
data = self.string_io_obj.read()
return data
def WriteData(self, new_data):
StringIO.truncate(self.string_io_obj, 0)
self.string_io_obj.write(new_data)
| [
"[email protected]"
] | |
bc583257ba2fa8e75999f1420d42612329c9011a | f34c9ba52317b2871ef309d25c6a62ada2a4c4e3 | /2019-1/exemplos/calc/calc-ast.py | 6121565647ee90707c33f27b56bed0f9abc48cf7 | [] | no_license | azurah/compiladores-1 | b2a24e4dc67b39d106803ce431740918feebeddb | b8bcd58aa5c0ffd02b9c24aa3eaa64b8827d9263 | refs/heads/master | 2022-03-25T06:55:48.714820 | 2019-12-16T14:38:25 | 2019-12-16T14:38:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | import ox
import operator as op
lexer_rules = [
('NUMBER', r'\d+'),
('ADD', r'\+'),
('SUB', r'\-'),
('MUL', r'\*'),
('DIV', r'\/'),
('LPAR', r'\('),
('RPAR', r'\)'),
('VAR', r'[a-zA-Z_]+')
]
lexer = ox.make_lexer(lexer_rules)
tokens = [x for x, _ in lexer_rules]
binop = (lambda x, op, y: (op, x, y))
parser = ox.make_parser([
('expr : term ADD expr', binop),
('expr : term SUB expr', binop),
('expr : term', lambda x: x),
('term : atom MUL term', binop),
('term : atom DIV term', binop),
('term : atom', lambda x: x),
('atom : NUMBER', int),
('atom : VAR', lambda x: ('var', x)),
('atom : LPAR expr RPAR', lambda x, y, z: y),
], tokens)
def find_vars(ast, vars=()):
if not isinstance(ast, tuple):
return set()
head, *tail = ast
if head == 'var':
return {tail[0], *vars}
result = set()
for elem in tail:
result.update(find_vars(elem))
return result
FUNCTIONS = {'+': op.add, '-': op.sub,
'*': op.mul, '/': op.truediv}
def eval_ast(ast, ctx):
if not isinstance(ast, tuple):
return ast
head, *tail = ast
if head == 'var':
return ctx[tail[0]]
else:
args = (eval_ast(x, ctx) for x in tail)
func = FUNCTIONS[head]
return func(*args)
if __name__ == '__main__':
ast = parser(lexer(input('expr: ')))
free_vars = find_vars(ast)
ctx = {x: int(input(x + ': '))
for x in free_vars}
print('result:', eval_ast(ast, ctx))
print('ast:', ast) | [
"[email protected]"
] | |
1db6fe1c31490177a0f129ccbd8add2e3939d210 | ee9655d3ffcdb70ae68692f400096b479b39d0f7 | /Python/kebabize.py | 1180f40f74723d5a9da84caa4daaccca95f4a1db | [] | no_license | yaelBrown/Codewars | 4f123387b8c4ea6e55ec1ff5d2ae9b1d674c06cf | efa10770b593e48579c256b9d6b69deede64e9ba | refs/heads/master | 2020-11-27T16:02:43.409465 | 2020-03-20T00:59:49 | 2020-03-20T00:59:49 | 229,521,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | """
Modify the kebabize function so that it converts a camel case string into a kebab case.
kebabize('camelsHaveThreeHumps') // camels-have-three-humps
kebabize('camelsHave3Humps') // camels-have-humps
Notes:
the returned string should only contain lowercase letters
"""
import string
def kebabize(string):
out = ""
for l in string:
if not l.isalpha():
continue
elif l.isupper():
out += "-"
out += l
else:
out += l
if out == "": return out
if out[0] == "-": out = out[1:]
return out.lower()
# aa = "this is a string"
# print(aa.isalpha())
# print(kebabize("iLike4Cookies"))
# print("-S-O-S"[:1])
print(kebabize("SOS"))
"""
def kebabize(s):
return ''.join(c if c.islower() else '-' + c.lower() for c in s if c.isalpha()).strip('-')
import re
def kebabize(s):
return re.sub('\B([A-Z])', r'-\1', re.sub('\d', '', s)).lower()
import re
def kebabize(s):
s = ''.join([i for i in s if not i.isdigit()])
kebablist = filter(None, re.split("([A-Z][^A-Z]*)", s))
return "-".join(x.lower() for x in kebablist)
"""
| [
"[email protected]"
] | |
8b186ae2a6c66100621dcf603ad1b02c54d99e63 | e4200b764d0b4ffba65180e54cf84b30ee84efcc | /selfdrive/loggerd/SConscript | 6a392d15d6fdafd6f37d01e59e8c9462835f717c | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | kegman/openpilot | c9ba96a72d905956f02c684e065091e023942883 | 54a8614b5a6451154817a4c6c86141c96103ae47 | refs/heads/kegman-0.7 | 2022-05-22T17:07:16.656336 | 2020-01-23T16:40:55 | 2020-01-23T16:40:55 | 229,979,925 | 105 | 212 | MIT | 2022-03-13T05:47:51 | 2019-12-24T17:27:11 | C | UTF-8 | Python | false | false | 311 | Import('env', 'messaging', 'common', 'visionipc')
env.Program(['loggerd.cc', 'logger.c', 'raw_logger.cc', 'encoder.c'], LIBS=[
'zmq', 'czmq', 'capnp', 'kj', 'yaml-cpp', 'z',
'avformat', 'avcodec', 'swscale', 'avutil',
'OmxVenc', 'OmxCore', 'yuv',
'bz2', 'cutils', common, 'json', messaging, visionipc])
| [
"[email protected]"
] | ||
f0f3d9ede2624be9ecb55304fb9360137bbef785 | cf7c928d6066da1ce15d2793dcf04315dda9b9ed | /Jungol/Lv1_LCoder_Python/pyg0_함수3/Main_JO_406_함수3_자가진단6.py | 9cb189b371e5e8cb5f56948b15f087c90bbe53ef | [] | no_license | refresh6724/APS | a261b3da8f53de7ff5ed687f21bb1392046c98e5 | 945e0af114033d05d571011e9dbf18f2e9375166 | refs/heads/master | 2022-02-01T23:31:42.679631 | 2021-12-31T14:16:04 | 2021-12-31T14:16:04 | 251,617,280 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # 9자리 이하의 자연수를 입력받아 재귀함수를 이용하여 각 자리 숫자의 제곱의 합을 출력하는 프로그램을 작성하시오.
def recursive(n):
if n < 10:
return n*n
return recursive(n//10) + recursive(n%10)
n = int(input())
print(recursive(n)) | [
"[email protected]"
] | |
01ecef42b6e7f285755d7f03e8bb2dcc7c993ecf | b532a2188d312a377ea89192569897714f500980 | /memorious/operations/store.py | 52c9e9a5bb52970965df0014bb536fb4c9aec676 | [
"MIT"
] | permissive | patcon/memorious | b41baff81656c343770d9bec8743a7f710daac1b | 316a4bc15a83065106de7e34935b77f337bb11e6 | refs/heads/master | 2021-08-20T00:32:33.320287 | 2017-11-27T13:53:44 | 2017-11-27T13:53:44 | 112,242,987 | 0 | 0 | null | 2017-11-27T20:08:07 | 2017-11-27T20:08:07 | null | UTF-8 | Python | false | false | 1,418 | py | import os
import json
import shutil
from normality import safe_filename
from memorious import settings
def _get_directory_path(context):
"""Get the storage path fro the output."""
path = os.path.join(settings.BASE_PATH, 'store')
path = context.params.get('path', path)
path = os.path.join(path, context.crawler.name)
path = os.path.abspath(os.path.expandvars(path))
try:
os.makedirs(path)
except:
pass
return path
def directory(context, data):
"""Store the collected files to a given directory."""
with context.http.rehash(data) as result:
if not result.ok:
return
content_hash = data.get('content_hash')
if content_hash is None:
context.emit_warning("No content hash in data.")
return
path = _get_directory_path(context)
file_name = data.get('file_name', result.file_name)
file_name = safe_filename(file_name, default='raw')
file_name = '%s.%s' % (content_hash, file_name)
data['_file_name'] = file_name
file_path = os.path.join(path, file_name)
if not os.path.exists(file_path):
shutil.copyfile(result.file_path, file_path)
context.log.info("Store [directory]: %s", file_name)
meta_path = os.path.join(path, '%s.json' % content_hash)
with open(meta_path, 'w') as fh:
json.dump(data, fh)
| [
"[email protected]"
] | |
43a4347035b0440386c7229b773e43eacc80d101 | 27010a7ad70bf69511858a91d42dc7a64e61b66d | /src/0342_power_of_four.py | 76a22ceba8325a13c2b1e510bd9f19870f9f5a0f | [
"Apache-2.0"
] | permissive | hariharanragothaman/leetcode-solutions | fb7d967f2c6e3f4c936e3c7afe369415bc8d2dc6 | 44e759f80d3c9df382fdf8d694d6378881e3649d | refs/heads/master | 2023-09-03T20:31:59.200701 | 2021-10-18T00:50:56 | 2021-10-18T00:50:56 | 267,927,538 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | """
Given an integer n, return true if it is a power of four. Otherwise, return false.
An integer n is a power of four, if there exists an integer x such that n == 4x.
Example 1:
Input: n = 16
Output: true
Example 2:
Input: n = 5
Output: false
Example 3:
Input: n = 1
Output: true
Constraints:
-231 <= n <= 231 - 1
"""
import math
from math import log2
class Solution:
def isPowerOfFour(self, n: int) -> bool:
"""
x = math.log(n) / math.log(4)
return x.is_integer()
"""
return (n > 0) and log2(n) % 2 == 0
| [
"[email protected]"
] | |
55c011161382a90a0a4ab3b525884d7be2894ac7 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/tests/unit/hldcl/test_host.py | 9cc2bf7b042fe35468894e0fac75147f25f46bc3 | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,920 | py | import sys
import unittest2 as unittest
from mock import patch, MagicMock
from nose.plugins.attrib import attr
from lxml import etree
from jnpr.toby.hldcl.host import *
@attr('unit')
class TestHost(unittest.TestCase):
def test_host__next_log_file_name(self):
hobject = MagicMock(spec=Host)
hobject._object_counts = {}
# If no logger created for the filename
self.assertEqual(Host._next_log_file_name(hobject, name='Device'), 'Device')
# If the log filename already has logger created
hobject._object_counts['Device'] = 0
self.assertEqual(Host._next_log_file_name(hobject, name='Device'), 'Device.1')
# Check with no arguments
self.assertRaises(Exception, Host._next_log_file_name, hobject)
def test_host_get_credentials_failures(self):
hobject = MagicMock(spec=Host)
hobject.os = 'Test'
self.assertRaises(
Exception,
Host.get_credentials, hobject, **{'os': 'asdad'}
)
@patch('jnpr.toby.hldcl.host.credentials')
def test_host_get_credentials(self, cred_mock):
hobject = MagicMock(spec=Host)
hobject.os = 'JUNOS'
cred_mock.JUNOS = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'UNIX'
cred_mock.UNIX = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'IOS'
cred_mock.IOS = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'SPIRENT'
cred_mock.SPIRENT = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'IXIA'
cred_mock.IXIA = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'WINDOWS'
cred_mock.WINDOWS = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'BREAKINGPOINT'
cred_mock.BREAKINGPOINT = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
hobject.os = 'BPS'
cred_mock.BREAKINGPOINT = {'USERNAME': 'user', 'PASSWORD': 'password'}
self.assertEqual(Host.get_credentials(hobject), ('user', 'password'))
# If default credentials are not available
hobject.os = 'JUNOS'
cred_mock.JUNOS = {'USERNAME': None, 'PASSWORD': None}
self.assertRaises(Exception, Host.get_credentials, hobject)
# Check with user and password passed as arguments
self.assertEqual(Host.get_credentials(hobject, **{'user': 'user', 'password': 'password'}), ('user', 'password'))
self.assertEqual(Host.get_credentials(hobject, **{'user': 'user', 'password': 'password','ssh_key_file':'key_file'}), ('user', 'password'))
@patch('jnpr.toby.hldcl.host.credentials')
def test_host_get_su_credentials_failures(self, cred_mock):
hobject = MagicMock(spec=Host)
hobject.os = 'JUNOS1'
self.assertRaises(Exception, Host.get_su_credentials, hobject)
@patch('jnpr.toby.hldcl.host.credentials')
def test_host_get_su_credentials(self, cred_mock):
hobject = MagicMock(spec=Host)
hobject.os = 'JUNOS'
cred_mock.JUNOS = {'SU': 'user', 'SUPASSWORD': 'password'}
self.assertEqual(Host.get_su_credentials(hobject), ('user', 'password'))
hobject.os = 'UNIX'
cred_mock.UNIX = {'SU': 'user', 'SUPASSWORD': 'password'}
self.assertEqual(Host.get_su_credentials(hobject), ('user', 'password'))
hobject.os = 'IOS'
cred_mock.IOS = {'SU': 'user', 'SUPASSWORD': 'password'}
self.assertEqual(Host.get_su_credentials(hobject), ('user', 'password'))
@patch('jnpr.toby.utils.ftp.FTP')
@patch('jnpr.toby.utils.scp.SCP')
def test_host_upload(self, ftp_mock, scp_mock):
hobject = MagicMock(spec=Host)
hobject.proxy = True
hobject.proxy_host = 'host'
hobject.proxy_user = 'host'
hobject.proxy_password = 'host'
hobject.proxy_ssh_key = 'host'
hobject.proxy_port = 'host'
hobject.connect_mode = 'telnet'
hobject.host = 'device-a'
hobject.user = 'device-a'
hobject.password = 'device-a'
hobject.text_port = None
# telnet as connect mode
hobject.controllers_data = {}
hobject.controllers_data['mgt-ip'] = hobject.host
self.assertTrue(Host.upload(hobject, local_file='', remote_file=''))
# ssh as connect mode
hobject.connect_mode = 'ssh'
self.assertTrue(Host.upload(hobject, local_file='', remote_file=''))
# with only user
self.assertTrue(Host.upload(hobject, local_file='', remote_file='',user='user'))
# with only password
self.assertTrue(Host.upload(hobject, local_file='', remote_file='', password="password"))
# with user and password
self.assertTrue(Host.upload(hobject, local_file='', remote_file='', user="user", password="password"))
@patch('jnpr.toby.utils.ftp.FTP')
@patch('jnpr.toby.utils.scp.SCP')
def test_host_upload_failures(self, ftp_mock, scp_mock):
hobject = MagicMock(spec=Host)
hobject.connect_mode = 'telnet'
hobject.host = 'device-a'
hobject.user = 'device-a'
hobject.password = 'device-a'
# Invalid protocol
self.assertRaises(
Exception,
Host.upload, hobject, local_file='', remote_file='',
protocol='sftp'
)
@patch('jnpr.toby.utils.ftp.FTP')
@patch('jnpr.toby.utils.scp.SCP')
def test_host_download(self, ftp_mock, scp_mock):
hobject = MagicMock(spec=Host)
hobject.proxy = True
hobject.connect_mode = 'telnet'
hobject.host = 'device-a'
hobject.user = 'device-a'
hobject.password = 'device-a'
hobject.proxy_host = 'host'
hobject.proxy_user = 'host'
hobject.proxy_password = 'host'
hobject.proxy_ssh_key = ''
hobject.proxy_port = 'host'
hobject.text_port = None
hobject.controllers_data = {}
hobject.controllers_data['mgt-ip'] = hobject.host
# telnet as connect mode
self.assertTrue(Host.download(hobject, local_file='', remote_file=''))
# ssh as connect mode
hobject.connect_mode = 'ssh'
self.assertTrue(Host.download(hobject, local_file='', remote_file=''))
# with only user
self.assertTrue(Host.download(hobject, local_file='', remote_file='', user="user"))
# with only password
self.assertTrue(Host.download(hobject, local_file='', remote_file='', password="password"))
# with user and password
self.assertTrue(Host.download(hobject, local_file='', remote_file='', user="user", password="password"))
@patch('jnpr.toby.utils.ftp.FTP')
@patch('jnpr.toby.utils.scp.SCP')
def test_host_download_failures(self, ftp_mock, scp_mock):
hobject = MagicMock(spec=Host)
hobject.connect_mode = 'telnet'
hobject.host = 'device-a'
hobject.user = 'device-a'
hobject.password = 'device-a'
# Invalid protocol
self.assertRaises(
Exception,
Host.download, hobject, local_file='', remote_file='',
protocol='sftp'
)
@patch('jnpr.toby.hldcl.host.Host.get_credentials',return_value=('user','password'))
@patch('jnpr.toby.hldcl.host.Logger')
def test_host_init(self, logger_mock, get_cred_patch):
import builtins
builtins.t = self
t.is_robot = True
t.background_logger = MagicMock()
t._script_name = 'name'
t.t_dict = {'console_log':'test'}
type(logger_mock.return_value).level = 10
hobject = Host(host='host', os='Junos',global_logging=True, device_logging=True, re_name='re0')
self.assertEqual(hobject.host, 'host')
self.assertEqual(hobject.os, 'Junos')
self.assertEqual(hobject.tag, None)
self.assertEqual(hobject.logger_name, 'host')
#logger_mock.assert_any_call('host', console=False)
#logger_mock.assert_any_call('name', console=False)
#assert logger_mock.call_count == 2
#self.assertFalse(hobject.proxy)
#self.assertEqual(hobject.proxy_host, 'a')
#self.assertEqual(hobject.proxy_password, 'a')
#self.assertEqual(hobject.proxy_user, 'a')
#self.assertEqual(hobject.proxy_port, 'a')
hobject = Host(host='host', os='Junos', tag='tag',ssh_key_file="test",global_logging=True, device_logging=True, re_name='re0')
self.assertEqual(hobject.host, 'host')
self.assertEqual(hobject.os, 'Junos')
self.assertEqual(hobject.tag, 'tag')
self.assertEqual(hobject.logger_name, 'host.1')
#logger_mock.assert_any_call('host.1', console=False)
#assert logger_mock.call_count == 4
hobject = Host(host='host', os='Junos', tag='tag', hostname='hostname',global_logging=True, device_logging=True, re_name='re0')
self.assertEqual(hobject.host, 'host')
self.assertEqual(hobject.os, 'Junos')
self.assertEqual(hobject.tag, 'tag')
self.assertEqual(hobject.name, 'hostname')
self.assertEqual(hobject.logger_name, 'hostname')
#assert logger_mock.call_count == 6
t.is_robot = False
t.background_logger = None
self.assertIsInstance(Host(host='host', os='Junos',global_logging=True, device_logging=True, re_name='re0'), Host)
#logger_mock.assert_any_call('name', console=True)
#logger_mock.assert_any_call('host.2', console=False)
#assert logger_mock.call_count == 8
del builtins.t
self.assertIsInstance(Host(host='host', os='Junos',global_logging=True, device_logging=True, re_name='re0'), Host)
#logger_mock.assert_any_call('host.3', console=True)
#assert logger_mock.call_count == 9
# With no arguments
self.assertRaises(Exception, Host)
# With only one argument
self.assertRaises(Exception, Host, host='host')
self.assertRaises(Exception, Host, os='Junos')
get_cred_patch.return_value = ('user', None)
self.assertIsInstance(Host(host='host', os='Junos',global_logging=True, device_logging=True, re_name='re0'), Host)
def test_host_log(self):
hobject = MagicMock(spec=Host)
hobject.device_logger = MagicMock()
hobject.logger_name = MagicMock()
hobject.global_logger = MagicMock()
hobject.device_logger_flag = True
hobject.global_logger_flag = True
xmldata = etree.XML('<software-information></software-information>')
import builtins
builtins.t = self
## Check for t_exists = False
hobject.t_exists = False
self.assertIsNone(Host.log(hobject, message=xmldata))
self.assertTrue(hobject.device_logger._log.called)
# Check with only 'level' argument
self.assertIsNone(Host.log(hobject, level='INFO'))
assert hobject.device_logger._log.call_count == 2
self.assertFalse(hobject.global_logger._log.called)
# Check with only 'message' argument
self.assertIsNone(Host.log(hobject, message=xmldata))
assert hobject.device_logger._log.call_count == 3
self.assertFalse(hobject.global_logger._log.called)
# with two arguments
self.assertIsNone(Host.log(hobject, message=xmldata, level='INFO'))
assert hobject.device_logger._log.call_count == 4
self.assertFalse(hobject.global_logger._log.called)
## Check for t_exists = True
hobject.t_exists = True
t.is_robot = True
t.background_logger = MagicMock()
t.t_dict = {'console_log':'test'}
hobject.global_logger.level = 30
with patch('robot.api.logger') as robot_logger:
# with two arguments
self.assertIsNone(Host.log(hobject, message=xmldata, level='WARN'))
assert hobject.device_logger._log.call_count == 5
self.assertTrue(robot_logger.warn.called)
self.assertTrue(hobject.global_logger._log.called)
t.is_robot = False
t.background_logger = None
self.assertIsNone(Host.log(hobject, message=xmldata))
assert hobject.device_logger._log.call_count == 6
self.assertTrue(hobject.global_logger._log.called)
# Check with no arguments
self.assertRaises(Exception, Host.log, hobject)
t.is_robot = True
del t.t_dict['console_log']
self.assertIsNone(Host.log(hobject, message=xmldata))
t.t_dict = {'console_log': None}
self.assertIsNone(Host.log(hobject, message=xmldata))
def test_device_upload_file(self):
dobject = MagicMock()
dobject.upload = MagicMock(return_value='test_str')
self.assertEqual(upload_file(dobject), 'test_str')
# Exception case
dobject.upload = MagicMock(return_value=False)
self.assertRaises(Exception, upload_file, dobject)
def test_device_download_file(self):
dobject = MagicMock()
dobject.download = MagicMock(return_value='test_str')
self.assertEqual(download_file(dobject), 'test_str')
# Exception case
dobject.download = MagicMock(return_value=False)
self.assertRaises(Exception, download_file, dobject)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestHost)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
f6c388f9433fe0af9510b9b05baaba3657776db1 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_126/128.py | 0b43d4fbaa876ff9dcf5d33290291df8461be100 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,683 | py | import os
import unittest
import itertools
from python_toolbox.cute_iter_tools import consecutive_pairs
PROB_NAME = 'consonants'
INPUT_TYPE = 'large'
VOWELS = 'aeiou'
def solve(case):
"""break 'case', solve and return the solution"""
name, n = case
l = len(name)
consecutive_consonants = 0
last_end = 0
nval = 0
for idx, c in enumerate(name):
if c not in VOWELS:
consecutive_consonants += 1
else:
consecutive_consonants = 0
if consecutive_consonants >= n:
start, end = idx - n + 1, idx
if consecutive_consonants > n:
start_ss = start
else:
if n == 1:
start_ss = last_end + 1
else:
start_ss = (last_end - n + 2) if last_end > 0 else 0
end_ss = l
last_end = end
left, right = max(start - start_ss + 1, 1), end_ss - end
nval += left * right
return nval
def read_case(lines):
name, n = lines.pop(0).split()
return (name, int(n))
def read_file(filepath):
"""Read the input file and return a list of cases in a tuple format."""
cases = []
with open(filepath, 'rt') as fobj:
lines = fobj.readlines()
num_cases = int(lines.pop(0))
for _ in range(num_cases):
cases.append(read_case(lines))
return cases
def write_results(results, outfile):
with open(outfile, 'wt') as f:
for idx, result in enumerate(results):
f.write('Case #{}: {}\n'.format(idx + 1, result))
def main(infile, outfile):
cases = read_file(infile)
results = [solve(case) for case in cases]
write_results(results, outfile)
if INPUT_TYPE:
main(os.path.join('io', '{}_{}.in'.format(PROB_NAME, INPUT_TYPE)),
os.path.join('io', '{}_{}.out'.format(PROB_NAME, INPUT_TYPE)))
class UnitTest(unittest.TestCase):
CASES = {('quartz', 3): 4,
('straight', 3): 11,
('gcj', 2): 3,
('tsetse', 2): 11,
('pack', 1): 9}
# ('packmyboxwithfivedozenliquorjugs', 1): 516}
# ('z' * 10 ** 6, 4): 0}
def runTest(self):
message = 'Wrong result for case.\nCase: {}\nResult: {}\n'\
'Expected result: {}'
for case, result in self.CASES.iteritems():
self.assertEqual(solve(case), result, message.format(case,
solve(case),
result))
| [
"[email protected]"
] | |
5eb911a7220230a00c7447f3afc31e62046a0e8e | 36ff0f28aeb47c03d8e22f69057c12f830e917e8 | /Blog/admin.py | 55f78d419afa2db728fbf7600fa18758d465be30 | [] | no_license | michael-basweti/duke | 673721540fa1b260508f03518b0043e8e1fc3f14 | 5eae51ceac89e77c6ab712e6311fef9f15fb51ad | refs/heads/master | 2022-12-06T02:53:04.494299 | 2019-07-30T10:47:06 | 2019-07-30T10:47:06 | 195,955,279 | 0 | 0 | null | 2022-11-22T04:09:15 | 2019-07-09T07:25:08 | CSS | UTF-8 | Python | false | false | 306 | py | from django.contrib import admin
from .models import Blog
class Post(admin.ModelAdmin):
exclude = ('author',)
list_display = ('title', 'author', 'date_added')
def save_model(self, request, obj, form, change):
obj.author = request.user
obj.save()
admin.site.register(Blog, Post)
| [
"[email protected]"
] | |
19a633a72dd7eb16a803a4443726aff405985b67 | 836705d3c321ea8e62f3b2a0ea7e837fe5d45dfd | /3-1.py | 164f25998bf0219f9aa1f0012d4645ca8930a802 | [] | no_license | Accomlish/tensorflow_learn | e11acedbb81f9ef08866a15daf5155853d81cb49 | 19126ae75e1460aa0bb3bd041d96f99db56181d0 | refs/heads/master | 2021-05-22T16:50:09.878737 | 2020-04-04T14:36:22 | 2020-04-04T14:36:22 | 253,009,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | """
回归的例子
非线性回归例子
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#使用numpy生成500样本点
x_data = np.linspace(-1,1,500)[:,np.newaxis]
print(x_data.shape)
noise = np.random.normal(0,0.5,x_data.shape)
y_data = np.square(x_data) + noise
#定义连个placeholder
x = tf.placeholder(tf.float32,[None,1])#浮点型数据,n行1列
y = tf.placeholder(tf.float32,[None,1])
#定义神经网络中间层,
Weight_L1 = tf.Variable(tf.random_normal([1,10]))
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_L1 = tf.matmul(x,Weight_L1)+ biases_L1
L1 = tf.nn.tanh(Wx_plus_L1)
#定义输出层
Weight_L2 = tf.Variable(tf.random_normal([10,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_L2 = tf.matmul(L1,Weight_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_L2)
#二次代价函数
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for _ in range(2000):
sess.run(train_step,feed_dict={x:x_data,y:y_data})
#获得预测值
prediction_value = sess.run(prediction,feed_dict={x:x_data})
#画图
plt.figure()
plt.scatter(x_data,y_data)
plt.plot(x_data,prediction_value,'r-',lw=5)
plt.show()
| [
"your email"
] | your email |
d1143635201d221e500300bb7ebd02e942d5c100 | b3b38ebf386bbd323d832ee077ae249a6ab331e9 | /Day 25/Day 25.py | ef5461ad1763efb757ef34109cad57f402fc1d04 | [] | no_license | bakkerjangert/AoC_2017 | 7bae1b1b9da5b2263d911eff5bbadc2849716be6 | 1c36b80965875cdcbc50c6abe75cc5def72ee573 | refs/heads/master | 2023-02-03T05:19:55.933367 | 2020-12-18T14:39:40 | 2020-12-18T14:39:40 | 322,620,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,906 | py | state = 'A'
steps = 12172063
data = [0]
index = 0
def move_left(index):
if index == 0:
data.insert(0, 0)
else:
index -= 1
return index
def move_right(index):
if index == len(data) - 1:
data.append(0)
index += 1
return index
for step in range(steps):
if step % 10000 == 0:
print(f'Currently at {round(step / steps * 100, 2)}%')
if state == 'A':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'B'
else:
data[index] = 0
index = move_left(index)
state = 'C'
elif state == 'B':
if data[index] == 0:
data[index] = 1
index = move_left(index)
state = 'A'
else:
data[index] = 1
index = move_left(index)
state = 'D'
elif state == 'C':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'D'
else:
data[index] = 0
index = move_right(index)
state = 'C'
elif state == 'D':
if data[index] == 0:
data[index] = 0
index = move_left(index)
state = 'B'
else:
data[index] = 0
index = move_right(index)
state = 'E'
elif state == 'E':
if data[index] == 0:
data[index] = 1
index = move_right(index)
state = 'C'
else:
data[index] = 1
index = move_left(index)
state = 'F'
elif state == 'F':
if data[index] == 0:
data[index] = 1
index = move_left(index)
state = 'E'
else:
data[index] = 1
index = move_right(index)
state = 'A'
print(f'The answer = {data.count(1)}')
| [
"[email protected]"
] | |
b286d2b08daca3903a5d072416370fd615da25e7 | 95b87a3c8f5492feb8c4faea9202c68f560544b5 | /tests/parsers/mcafeeav.py | 084d4b95a852fc78fac08e330c14e5a16a80d540 | [
"Apache-2.0"
] | permissive | sebdraven/plaso | 82e87149e845347a0481d9908117c0c227960446 | 77c7f00f0f648b158bd9c9cc3f698dd5ff294b4d | refs/heads/master | 2020-12-02T08:08:48.427006 | 2017-07-08T17:07:50 | 2017-07-08T17:07:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,402 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the McAfee AV Log parser."""
import unittest
from plaso.formatters import mcafeeav # pylint: disable=unused-import
from plaso.lib import timelib
from plaso.parsers import mcafeeav
from tests import test_lib as shared_test_lib
from tests.parsers import test_lib
class McafeeAccessProtectionUnitTest(test_lib.ParserTestCase):
"""Tests for the McAfee AV Log parser."""
@shared_test_lib.skipUnlessHasTestFile([u'AccessProtectionLog.txt'])
def testParse(self):
"""Tests the Parse function."""
parser_object = mcafeeav.McafeeAccessProtectionParser()
storage_writer = self._ParseFile(
[u'AccessProtectionLog.txt'], parser_object)
# The file contains 14 lines which results in 14 events.
self.assertEqual(storage_writer.number_of_events, 14)
event = storage_writer.events[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-09-27 14:42:26')
self.assertEqual(event.timestamp, expected_timestamp)
# TODO: Test that the UTF-8 byte order mark gets removed from
# the first line.
# Test this entry:
# 9/27/2013 2:42:26 PM Blocked by Access Protection rule
# SOMEDOMAIN\someUser C:\Windows\System32\procexp64.exe C:\Program Files
# (x86)\McAfee\Common Framework\UdaterUI.exe Common Standard
# Protection:Prevent termination of McAfee processes Action blocked :
# Terminate
event = storage_writer.events[1]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-09-27 14:42:39')
self.assertEqual(event.timestamp, expected_timestamp)
self.assertEqual(event.username, u'SOMEDOMAIN\\someUser')
self.assertEqual(
event.filename, u'C:\\Windows\\System32\\procexp64.exe')
expected_message = (
u'File Name: C:\\Windows\\System32\\procexp64.exe '
u'User: SOMEDOMAIN\\someUser '
u'C:\\Program Files (x86)\\McAfee\\Common Framework\\Frame'
u'workService.exe '
u'Blocked by Access Protection rule '
u'Common Standard Protection:Prevent termination of McAfee processes '
u'Action blocked : Terminate')
expected_short_message = (
u'C:\\Windows\\System32\\procexp64.exe '
u'Action blocked : Terminate')
self._TestGetMessageStrings(event, expected_message, expected_short_message)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
955e1f1ce5febef1ea2829471b58315b4d9b2f23 | 3eae9c14c119ee2d6a7d02ef1ba5d61420959e3c | /modules/core/rwvx/rwsched/src/rwsched_gi_filter.py | b06f9bbfdb82967c05b581ec652bad0f46393135 | [
"Apache-2.0"
] | permissive | RIFTIO/RIFT.ware | 94d3a34836a04546ea02ec0576dae78d566dabb3 | 4ade66a5bccbeb4c5ed5b56fed8841e46e2639b0 | refs/heads/RIFT.ware-4.4.1 | 2020-05-21T14:07:31.092287 | 2017-06-05T16:02:48 | 2017-06-05T16:02:48 | 52,545,688 | 9 | 8 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/python
# STANDARD_RIFT_IO_COPYRIGHT
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
import sys
# rwsched_instance_ptr_t -> RwschedInstance
renames = {
0: {'rwsched': 'RwSched'},
1: {'instance': 'Instance',
'CFRunLoop': 'CFRunLoop',
'CFRunLoopSource': 'CFRunLoopSource',
'CFRunLoopTimer': 'CFRunLoopTimer',
'CFRunLoopTimerContext': 'CFRunLoopTimerContext',
'CFSocket': 'CFSocket' },
}
def gobjectify(ident):
if not ident.startswith('rwsched_'):
if not ident.startswith('rwsched'):
return ident
# Remove trailing '_[a-z]' from ident
if ident.endswith('ptr_t'):
ident = ident[:-5]
if ident.endswith('_t'):
ident = ident[:-2]
elif ident.endswith('Ref'):
ident = ident[:-3]
s = ''.join(renames.get(depth, {}).get(name, name.title())
for depth, name in enumerate(ident.split('_')))
return s
if __name__ == '__main__':
text = gobjectify(sys.stdin.read().strip())
sys.stdout.write(text)
| [
"[email protected]"
] | |
07039bdd5738a740ab874c485755f41c392be310 | 2212a32833776a5d5d2164d8efd11bd18bd3f768 | /tf_agents/bandits/agents/neural_linucb_agent.py | 1c803409ec0503339df767da78ddd381ef5a5aa6 | [
"Apache-2.0"
] | permissive | tensorflow/agents | f39805fb98ef9af712dcaff3ba49e1ac6d42804b | eca1093d3a047e538f17f6ab92ab4d8144284f23 | refs/heads/master | 2023-08-14T04:56:30.774797 | 2023-08-02T17:43:44 | 2023-08-02T17:44:09 | 157,936,206 | 2,755 | 848 | Apache-2.0 | 2023-07-26T02:35:32 | 2018-11-17T00:29:12 | Python | UTF-8 | Python | false | false | 25,492 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the Neural + LinUCB bandit algorithm.
Applies LinUCB on top of an encoding network.
Since LinUCB is a linear method, the encoding network is used to capture the
non-linear relationship between the context features and the expected rewards.
The encoding network may be already trained or not; if not trained, the
method can optionally train it using epsilon greedy.
Reference:
Carlos Riquelme, George Tucker, Jasper Snoek,
`Deep Bayesian Bandits Showdown: An Empirical Comparison of Bayesian Deep
Networks for Thompson Sampling`, ICLR 2018.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Optional, Sequence, Text
import gin
import tensorflow as tf
from tf_agents.agents import data_converter
from tf_agents.agents import tf_agent
from tf_agents.bandits.agents import linear_bandit_agent as linear_agent
from tf_agents.bandits.agents import utils as bandit_utils
from tf_agents.bandits.policies import neural_linucb_policy
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.policies import utils as policy_utilities
from tf_agents.typing import types
from tf_agents.utils import common
from tf_agents.utils import eager_utils
class NeuralLinUCBVariableCollection(tf.Module):
"""A collection of variables used by `NeuralLinUCBAgent`."""
def __init__(
self,
num_actions: int,
encoding_dim: int,
dtype: tf.DType = tf.float64,
name: Optional[Text] = None,
):
"""Initializes an instance of `NeuralLinUCBVariableCollection`.
Args:
num_actions: (int) number of actions the agent acts on.
encoding_dim: (int) The dimensionality of the output of the encoding
network.
dtype: The type of the variables. Should be one of `tf.float32` and
`tf.float64`.
name: (string) the name of this instance.
"""
tf.Module.__init__(self, name=name)
self.actions_from_reward_layer = tf.compat.v2.Variable(
True, dtype=tf.bool, name='is_action_from_reward_layer'
)
self.cov_matrix_list = []
self.data_vector_list = []
# We keep track of the number of samples per arm.
self.num_samples_list = []
for k in range(num_actions):
self.cov_matrix_list.append(
tf.compat.v2.Variable(
tf.zeros([encoding_dim, encoding_dim], dtype=dtype),
name='a_{}'.format(k),
)
)
self.data_vector_list.append(
tf.compat.v2.Variable(
tf.zeros(encoding_dim, dtype=dtype), name='b_{}'.format(k)
)
)
self.num_samples_list.append(
tf.compat.v2.Variable(
tf.zeros([], dtype=dtype), name='num_samples_{}'.format(k)
)
)
@gin.configurable
class NeuralLinUCBAgent(tf_agent.TFAgent):
"""An agent implementing the LinUCB algorithm on top of a neural network."""
def __init__(
self,
time_step_spec: types.TimeStep,
action_spec: types.BoundedTensorSpec,
encoding_network: types.Network,
encoding_network_num_train_steps: int,
encoding_dim: int,
optimizer: types.Optimizer,
variable_collection: Optional[NeuralLinUCBVariableCollection] = None,
alpha: float = 1.0,
gamma: float = 1.0,
epsilon_greedy: float = 0.0,
observation_and_action_constraint_splitter: Optional[
types.Splitter
] = None,
accepts_per_arm_features: bool = False,
distributed_train_encoding_network: bool = False,
# Params for training.
error_loss_fn: types.LossFn = tf.compat.v1.losses.mean_squared_error,
gradient_clipping: Optional[float] = None,
# Params for debugging.
debug_summaries: bool = False,
summarize_grads_and_vars: bool = False,
train_step_counter: Optional[tf.Variable] = None,
emit_policy_info: Sequence[Text] = (),
emit_log_probability: bool = False,
dtype: tf.DType = tf.float64,
name: Optional[Text] = 'neural_linucb_agent',
):
"""Initialize an instance of `NeuralLinUCBAgent`.
Args:
time_step_spec: A `TimeStep` spec describing the expected `TimeStep`s.
action_spec: A scalar `BoundedTensorSpec` with `int32` or `int64` dtype
describing the number of actions for this agent.
encoding_network: a Keras network that encodes the observations.
encoding_network_num_train_steps: how many training steps to run for
training the encoding network before switching to LinUCB. If negative,
the encoding network is assumed to be already trained.
encoding_dim: the dimension of encoded observations.
optimizer: The optimizer to use for training.
variable_collection: Instance of `NeuralLinUCBVariableCollection`.
Collection of variables to be updated by the agent. If `None`, a new
instance of `LinearBanditVariables` will be created. Note that this
collection excludes the variables owned by the encoding network.
alpha: (float) positive scalar. This is the exploration parameter that
multiplies the confidence intervals.
gamma: a float forgetting factor in [0.0, 1.0]. When set to 1.0, the
algorithm does not forget.
epsilon_greedy: A float representing the probability of choosing a random
action instead of the greedy action.
observation_and_action_constraint_splitter: A function used for masking
valid/invalid actions with each state of the environment. The function
takes in a full observation and returns a tuple consisting of 1) the
part of the observation intended as input to the bandit agent and
policy, and 2) the boolean mask. This function should also work with a
`TensorSpec` as input, and should output `TensorSpec` objects for the
observation and mask.
accepts_per_arm_features: (bool) Whether the policy accepts per-arm
features.
distributed_train_encoding_network: (bool) whether to train the encoding
network or not. This applies only in distributed training setting. When
set to true this agent will train the encoding network. Otherwise, it
will assume the encoding network is already trained and will train
LinUCB on top of it.
error_loss_fn: A function for computing the error loss, taking parameters
labels, predictions, and weights (any function from tf.losses would
work). The default is `tf.losses.mean_squared_error`.
gradient_clipping: A float representing the norm length to clip gradients
(or None for no clipping.)
debug_summaries: A Python bool, default False. When True, debug summaries
are gathered.
summarize_grads_and_vars: A Python bool, default False. When True,
gradients and network variable summaries are written during training.
train_step_counter: An optional `tf.Variable` to increment every time the
train op is run. Defaults to the `global_step`.
emit_policy_info: (tuple of strings) what side information we want to get
as part of the policy info. Allowed values can be found in
`policy_utilities.PolicyInfo`.
emit_log_probability: Whether the NeuralLinUCBPolicy emits
log-probabilities or not. Since the policy is deterministic, the
probability is just 1.
dtype: The type of the parameters stored and updated by the agent. Should
be one of `tf.float32` and `tf.float64`. Defaults to `tf.float64`.
name: a name for this instance of `NeuralLinUCBAgent`.
Raises:
TypeError if variable_collection is not an instance of
`NeuralLinUCBVariableCollection`.
ValueError if dtype is not one of `tf.float32` or `tf.float64`.
"""
tf.Module.__init__(self, name=name)
common.tf_agents_gauge.get_cell('TFABandit').set(True)
self._num_actions = policy_utilities.get_num_actions_from_tensor_spec(
action_spec
)
self._num_models = 1 if accepts_per_arm_features else self._num_actions
self._observation_and_action_constraint_splitter = (
observation_and_action_constraint_splitter
)
self._accepts_per_arm_features = accepts_per_arm_features
self._alpha = alpha
if variable_collection is None:
variable_collection = NeuralLinUCBVariableCollection(
self._num_models, encoding_dim, dtype
)
elif not isinstance(variable_collection, NeuralLinUCBVariableCollection):
raise TypeError(
'Parameter `variable_collection` should be '
'of type `NeuralLinUCBVariableCollection`.'
)
self._variable_collection = variable_collection
self._gamma = gamma
if self._gamma < 0.0 or self._gamma > 1.0:
raise ValueError('Forgetting factor `gamma` must be in [0.0, 1.0].')
self._dtype = dtype
if dtype not in (tf.float32, tf.float64):
raise ValueError(
'Agent dtype should be either `tf.float32 or `tf.float64`.'
)
self._epsilon_greedy = epsilon_greedy
reward_layer = tf.keras.layers.Dense(
self._num_models,
kernel_initializer=tf.random_uniform_initializer(
minval=-0.03, maxval=0.03
),
use_bias=False,
activation=None,
name='reward_layer',
)
encoding_network.create_variables()
self._encoding_network = encoding_network
reward_layer.build(input_shape=tf.TensorShape([None, encoding_dim]))
self._reward_layer = reward_layer
self._encoding_network_num_train_steps = encoding_network_num_train_steps
self._encoding_dim = encoding_dim
self._optimizer = optimizer
self._error_loss_fn = error_loss_fn
self._gradient_clipping = gradient_clipping
train_step_counter = tf.compat.v1.train.get_or_create_global_step()
self._distributed_train_encoding_network = (
distributed_train_encoding_network
)
policy = neural_linucb_policy.NeuralLinUCBPolicy(
encoding_network=self._encoding_network,
encoding_dim=self._encoding_dim,
reward_layer=self._reward_layer,
epsilon_greedy=self._epsilon_greedy,
actions_from_reward_layer=self.actions_from_reward_layer,
cov_matrix=self.cov_matrix,
data_vector=self.data_vector,
num_samples=self.num_samples,
time_step_spec=time_step_spec,
alpha=alpha,
emit_policy_info=emit_policy_info,
emit_log_probability=emit_log_probability,
accepts_per_arm_features=accepts_per_arm_features,
distributed_use_reward_layer=distributed_train_encoding_network,
observation_and_action_constraint_splitter=(
observation_and_action_constraint_splitter
),
)
training_data_spec = None
if accepts_per_arm_features:
training_data_spec = bandit_spec_utils.drop_arm_observation(
policy.trajectory_spec
)
super(NeuralLinUCBAgent, self).__init__(
time_step_spec=time_step_spec,
action_spec=policy.action_spec,
policy=policy,
collect_policy=policy,
train_sequence_length=None,
training_data_spec=training_data_spec,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=train_step_counter,
)
self._as_trajectory = data_converter.AsTrajectory(
self.data_context, sequence_length=None
)
@property
def num_actions(self):
return self._num_actions
@property
def actions_from_reward_layer(self):
return self._variable_collection.actions_from_reward_layer
@property
def cov_matrix(self):
return self._variable_collection.cov_matrix_list
@property
def data_vector(self):
return self._variable_collection.data_vector_list
@property
def num_samples(self):
return self._variable_collection.num_samples_list
@property
def alpha(self):
return self._alpha
@property
def variables(self):
return (
self.num_samples
+ self.cov_matrix
+ self.data_vector
+ self._encoding_network.trainable_weights
+ self._reward_layer.trainable_weights
+ [self.train_step_counter]
)
@alpha.setter
def update_alpha(self, alpha):
return tf.compat.v1.assign(self._alpha, alpha)
def _initialize(self):
tf.compat.v1.variables_initializer(self.variables)
def compute_summaries(self, loss):
with tf.name_scope('Losses/'):
tf.compat.v2.summary.scalar(
name='total_loss', data=loss, step=self.train_step_counter
)
if self._summarize_grads_and_vars:
with tf.name_scope('Variables/'):
trainable_variables = (
self._encoding_network.trainable_weights
+ self._reward_layer.trainable_weights
)
for var in trainable_variables:
tf.compat.v2.summary.histogram(
name=var.name.replace(':', '_'),
data=var,
step=self.train_step_counter,
)
def _loss_using_reward_layer(
self,
observations: types.NestedTensor,
actions: types.Tensor,
rewards: types.Tensor,
weights: Optional[types.Float] = None,
training: bool = False,
) -> tf_agent.LossInfo:
"""Computes loss for reward prediction training.
Args:
observations: A batch of observations.
actions: A batch of actions.
rewards: A batch of rewards.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights. The output batch loss will be scaled by these weights, and the
final scalar loss is the mean of these values.
training: Whether the loss is being used for training.
Returns:
loss: A `LossInfo` containing the loss for the training step.
"""
with tf.name_scope('loss'):
encoded_observation, _ = self._encoding_network(
observations, training=training
)
encoded_observation = tf.reshape(
encoded_observation, shape=[-1, self._encoding_dim]
)
predicted_rewards = self._reward_layer(
encoded_observation, training=training
)
chosen_actions_predicted_rewards = common.index_with_actions(
predicted_rewards, tf.cast(actions, dtype=tf.int32)
)
loss = self._error_loss_fn(
rewards,
chosen_actions_predicted_rewards,
1 if weights is None else weights,
)
if self._summarize_grads_and_vars:
with tf.name_scope('Per_arm_loss/'):
for k in range(self._num_models):
loss_mask_for_arm = tf.cast(tf.equal(actions, k), tf.float32)
loss_for_arm = self._error_loss_fn(
rewards,
chosen_actions_predicted_rewards,
weights=loss_mask_for_arm,
)
tf.compat.v2.summary.scalar(
name='loss_arm_' + str(k),
data=loss_for_arm,
step=self.train_step_counter,
)
return tf_agent.LossInfo(loss, extra=())
def compute_loss_using_reward_layer(
self,
observation: types.NestedTensor,
action: types.Tensor,
reward: types.Tensor,
weights: Optional[types.Float] = None,
training: bool = False,
) -> tf_agent.LossInfo:
"""Computes loss using the reward layer.
Args:
observation: A batch of observations.
action: A batch of actions.
reward: A batch of rewards.
weights: Optional scalar or elementwise (per-batch-entry) importance
weights. The output batch loss will be scaled by these weights, and the
final scalar loss is the mean of these values.
training: Whether the loss is being used for training.
Returns:
loss: A `LossInfo` containing the loss for the training step.
"""
# Update the neural network params.
with tf.GradientTape() as tape:
loss_info = self._loss_using_reward_layer(
observation, action, reward, weights, training=training
)
tf.debugging.check_numerics(loss_info[0], 'Loss is inf or nan')
tf.compat.v2.summary.scalar(
name='using_reward_layer', data=1, step=self.train_step_counter
)
if self._summarize_grads_and_vars:
self.compute_summaries(loss_info.loss)
variables_to_train = (
self._encoding_network.trainable_weights
+ self._reward_layer.trainable_weights
)
if not variables_to_train:
raise ValueError('No variable to train in the agent.')
grads = tape.gradient(loss_info.loss, variables_to_train)
grads_and_vars = tuple(zip(grads, variables_to_train))
if self._gradient_clipping is not None:
grads_and_vars = eager_utils.clip_gradient_norms(
grads_and_vars, self._gradient_clipping
)
if self._summarize_grads_and_vars:
with tf.name_scope('Reward_network/'):
eager_utils.add_variables_summaries(
grads_and_vars, self.train_step_counter
)
eager_utils.add_gradients_summaries(
grads_and_vars, self.train_step_counter
)
self._optimizer.apply_gradients(grads_and_vars)
self.train_step_counter.assign_add(1)
return loss_info
def compute_loss_using_linucb(
self,
observation: types.NestedTensor,
action: types.Tensor,
reward: types.Tensor,
weights: Optional[types.Float] = None,
training: bool = False,
) -> tf_agent.LossInfo:
"""Computes the loss using LinUCB.
Args:
observation: A batch of observations.
action: A batch of actions.
reward: A batch of rewards.
weights: unused weights.
training: Whether the loss is being used to train.
Returns:
loss: A `LossInfo` containing the loss for the training step.
"""
del weights # unused
# The network is trained now. Update the covariance matrix.
encoded_observation, _ = self._encoding_network(
observation, training=training
)
encoded_observation = tf.cast(encoded_observation, dtype=self._dtype)
encoded_observation = tf.reshape(
encoded_observation, shape=[-1, self._encoding_dim]
)
for k in range(self._num_models):
diag_mask = tf.linalg.tensor_diag(
tf.cast(tf.equal(action, k), self._dtype)
)
observations_for_arm = tf.matmul(diag_mask, encoded_observation)
rewards_for_arm = tf.matmul(diag_mask, tf.reshape(reward, [-1, 1]))
num_samples_for_arm_current = tf.reduce_sum(diag_mask)
tf.compat.v1.assign_add(self.num_samples[k], num_samples_for_arm_current)
num_samples_for_arm_total = self.num_samples[k].read_value()
# Update the matrix A and b.
# pylint: disable=cell-var-from-loop
def update(cov_matrix, data_vector):
a_new, b_new = linear_agent.update_a_and_b_with_forgetting(
cov_matrix,
data_vector,
rewards_for_arm,
observations_for_arm,
self._gamma,
)
return a_new, b_new
a_new, b_new = tf.cond(
tf.squeeze(num_samples_for_arm_total) > 0,
lambda: update(self.cov_matrix[k], self.data_vector[k]),
lambda: (self.cov_matrix[k], self.data_vector[k]),
)
tf.compat.v1.assign(self.cov_matrix[k], a_new)
tf.compat.v1.assign(self.data_vector[k], b_new)
loss_tensor = tf.cast(-1.0 * tf.reduce_sum(reward), dtype=tf.float32)
loss_info = tf_agent.LossInfo(loss=loss_tensor, extra=())
tf.compat.v2.summary.scalar(
name='using_reward_layer', data=0, step=self.train_step_counter
)
self.train_step_counter.assign_add(1)
return loss_info
def compute_loss_using_linucb_distributed(
self,
observation: types.NestedTensor,
action: types.Tensor,
reward: types.Tensor,
weights: Optional[types.Float] = None,
training: bool = False,
) -> tf_agent.LossInfo:
"""Computes the loss using LinUCB distributively.
Args:
observation: A batch of observations.
action: A batch of actions.
reward: A batch of rewards.
weights: unused weights.
training: Whether the loss is being used to train.
Returns:
loss: A `LossInfo` containing the loss for the training step.
"""
del weights # unused
# The network is trained now. Update the covariance matrix.
encoded_observation, _ = self._encoding_network(
observation, training=training
)
encoded_observation = tf.cast(encoded_observation, dtype=self._dtype)
encoded_observation = tf.reshape(
encoded_observation, shape=[-1, self._encoding_dim]
)
self._train_step_counter.assign_add(1)
for k in range(self._num_models):
diag_mask = tf.linalg.tensor_diag(
tf.cast(tf.equal(action, k), self._dtype)
)
observations_for_arm = tf.matmul(diag_mask, encoded_observation)
rewards_for_arm = tf.matmul(diag_mask, tf.reshape(reward, [-1, 1]))
# Compute local updates for the matrix A and b of this arm.
cov_matrix_local_udpate = tf.matmul(
observations_for_arm, observations_for_arm, transpose_a=True
)
data_vector_local_update = bandit_utils.sum_reward_weighted_observations(
rewards_for_arm, observations_for_arm
)
def _merge_fn(
strategy,
per_replica_cov_matrix_update,
per_replica_data_vector_update,
):
"""Merge the per-replica-updates."""
# Reduce the per-replica-updates using SUM.
# pylint: disable=cell-var-from-loop
updates_and_vars = [
(per_replica_cov_matrix_update, self.cov_matrix[k]),
(per_replica_data_vector_update, self.data_vector[k]),
]
reduced_updates = strategy.extended.batch_reduce_to(
tf.distribute.ReduceOp.SUM, updates_and_vars
)
# Update the model variables.
self.cov_matrix[k].assign(
self._gamma * self.cov_matrix[k] + reduced_updates[0]
)
self.data_vector[k].assign(
self._gamma * self.data_vector[k] + reduced_updates[1]
)
# Passes the local_updates to the _merge_fn() above that performs custom
# computation on the per-replica values.
# All replicas pause their execution until merge_call() is done and then,
# execution is resumed.
replica_context = tf.distribute.get_replica_context()
replica_context.merge_call(
_merge_fn, args=(cov_matrix_local_udpate, data_vector_local_update)
)
loss = -1.0 * tf.reduce_sum(reward)
return tf_agent.LossInfo(loss=(loss), extra=())
def _train(self, experience, weights=None):
"""Updates the policy based on the data in `experience`.
Note that `experience` should only contain data points that this agent has
not previously seen. If `experience` comes from a replay buffer, this buffer
should be cleared between each call to `train`.
Args:
experience: A batch of experience data in the form of a `Trajectory`.
weights: (optional) sample weights.
Returns:
A `LossInfo` containing the loss *before* the training step is taken.
In most cases, if `weights` is provided, the entries of this tuple will
have been calculated with the weights. Note that each Agent chooses
its own method of applying weights.
"""
experience = self._as_trajectory(experience)
(observation, action, reward) = (
bandit_utils.process_experience_for_neural_agents(
experience, self._accepts_per_arm_features, self.training_data_spec
)
)
if self._observation_and_action_constraint_splitter is not None:
observation, _ = self._observation_and_action_constraint_splitter(
observation
)
reward = tf.cast(reward, self._dtype)
if tf.distribute.has_strategy():
if self._distributed_train_encoding_network:
loss_info = self.compute_loss_using_reward_layer(
observation, action, reward, weights, training=True
)
else:
loss_info = self.compute_loss_using_linucb_distributed(
observation, action, reward, weights, training=True
)
return loss_info
tf.compat.v1.assign(
self.actions_from_reward_layer,
tf.less(
self._train_step_counter, self._encoding_network_num_train_steps
),
)
def use_actions_from_reward_layer():
return self.compute_loss_using_reward_layer(
observation, action, reward, weights, training=True
)
def no_actions_from_reward_layer():
return self.compute_loss_using_linucb(
observation, action, reward, weights, training=True
)
loss_info = tf.cond(
self.actions_from_reward_layer,
use_actions_from_reward_layer,
no_actions_from_reward_layer,
)
return loss_info
| [
"[email protected]"
] | |
c834c39c8e08fc958e2256b388af4f839efe7988 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_pricier.py | 17e602d16998edcee93d654fd2ff4a313028fae5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py |
#calss header
class _PRICIER():
def __init__(self,):
self.name = "PRICIER"
self.definitions = pricy
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['pricy']
| [
"[email protected]"
] | |
020d3a02c9aba18d9ceb63b09b9389dd7f395e1e | c1bfadbc033efba287ad55a804e9d69d297c3bf2 | /valohai_cli/commands/logout.py | bf4821b4666e80b85d595e51d78ca79a387cdd40 | [
"MIT"
] | permissive | valohai/valohai-cli | 16560b078d20a02c8cdc7388beeea9bebac4be7d | c57cc164e749fb77b622d629a5ad05b2685534bb | refs/heads/master | 2023-08-31T14:04:26.979762 | 2023-08-22T12:54:51 | 2023-08-22T12:54:51 | 81,329,264 | 14 | 5 | MIT | 2023-09-11T13:35:04 | 2017-02-08T12:46:54 | Python | UTF-8 | Python | false | false | 735 | py | import click
from valohai_cli.consts import yes_option
from valohai_cli.messages import success
from valohai_cli.settings import settings
@click.command()
@yes_option
def logout(yes: bool) -> None:
"""Remove local authentication token."""
user = settings.user
token = settings.token
if not (user or token):
click.echo('You\'re not logged in.')
return
if user and not yes:
click.confirm((
f'You are logged in as {user["username"]} (on {settings.host}).\n'
'Are you sure you wish to remove the authentication token?'
), abort=True)
settings.persistence.update(host=None, user=None, token=None)
settings.persistence.save()
success('Logged out.')
| [
"[email protected]"
] | |
4c12237f5cd128fd551d352034e4ac3d458b8a31 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/config/dumpptask.py | 2c8edbee935d28a0593b26649bed118cfd8a2293 | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 18,319 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DumpPTask(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.config.DumpPTask")
meta.moClassName = "configDumpPTask"
meta.rnFormat = "configDumpPTask-%(id)s"
meta.category = MoCategory.TASK
meta.label = "None"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.action.LicensemgrSubj")
meta.parentClasses.add("cobra.model.action.TopomgrSubj")
meta.parentClasses.add("cobra.model.action.ObserverSubj")
meta.parentClasses.add("cobra.model.action.SnmpdSubj")
meta.parentClasses.add("cobra.model.action.VmmmgrSubj")
meta.parentClasses.add("cobra.model.action.AnalyticsSubj")
meta.parentClasses.add("cobra.model.action.ScripthandlerSubj")
meta.parentClasses.add("cobra.model.action.ConfelemSubj")
meta.parentClasses.add("cobra.model.action.EventmgrSubj")
meta.parentClasses.add("cobra.model.action.OspaelemSubj")
meta.parentClasses.add("cobra.model.action.VtapSubj")
meta.parentClasses.add("cobra.model.action.OshSubj")
meta.parentClasses.add("cobra.model.action.DhcpdSubj")
meta.parentClasses.add("cobra.model.action.ObserverelemSubj")
meta.parentClasses.add("cobra.model.action.DomainmgrSubj")
meta.parentClasses.add("cobra.model.action.DbgrelemSubj")
meta.parentClasses.add("cobra.model.action.PlgnhandlerSubj")
meta.parentClasses.add("cobra.model.action.VleafelemSubj")
meta.parentClasses.add("cobra.model.action.NxosmockSubj")
meta.parentClasses.add("cobra.model.action.DbgrSubj")
meta.parentClasses.add("cobra.model.action.PlatformmgrSubj")
meta.parentClasses.add("cobra.model.action.AppliancedirectorSubj")
meta.parentClasses.add("cobra.model.action.OpflexpSubj")
meta.parentClasses.add("cobra.model.action.BootmgrSubj")
meta.parentClasses.add("cobra.model.action.AeSubj")
meta.parentClasses.add("cobra.model.action.PolicymgrSubj")
meta.parentClasses.add("cobra.model.action.ExtXMLApiSubj")
meta.parentClasses.add("cobra.model.action.OpflexelemSubj")
meta.parentClasses.add("cobra.model.action.PolicyelemSubj")
meta.parentClasses.add("cobra.model.action.PolicydistSubj")
meta.parentClasses.add("cobra.model.action.IdmgrSubj")
meta.parentClasses.add("cobra.model.action.EdmgrSubj")
meta.superClasses.add("cobra.model.action.RInst")
meta.superClasses.add("cobra.model.pol.ComplElem")
meta.superClasses.add("cobra.model.task.Inst")
meta.superClasses.add("cobra.model.action.Inst")
meta.rnPrefixes = [
('configDumpPTask-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "data", "data", 52, PropCategory.REGULAR)
prop.label = "Data"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("data", prop)
prop = PropMeta("str", "descr", "descr", 33, PropCategory.REGULAR)
prop.label = "Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "endTs", "endTs", 15575, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("endTs", prop)
prop = PropMeta("str", "fail", "fail", 46, PropCategory.REGULAR)
prop.label = "Fail"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("fail", prop)
prop = PropMeta("str", "flags", "flags", 30392, PropCategory.REGULAR)
prop.label = "Flags"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("flags", prop)
prop = PropMeta("str", "id", "id", 34319, PropCategory.REGULAR)
prop.label = "ID"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("fetchPdConfig", "fetchpdconfig", 2863)
prop._addConstant("none", "none", 0)
meta.props.add("id", prop)
prop = PropMeta("str", "invErrCode", "invErrCode", 49, PropCategory.REGULAR)
prop.label = "Remote Error Code"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("ERR-FILTER-illegal-format", None, 1140)
prop._addConstant("ERR-FSM-no-such-state", None, 1160)
prop._addConstant("ERR-HTTP-set-error", None, 1551)
prop._addConstant("ERR-HTTPS-set-error", None, 1552)
prop._addConstant("ERR-MO-CONFIG-child-object-cant-be-configured", None, 1130)
prop._addConstant("ERR-MO-META-no-such-object-class", None, 1122)
prop._addConstant("ERR-MO-PROPERTY-no-such-property", None, 1121)
prop._addConstant("ERR-MO-PROPERTY-value-out-of-range", None, 1120)
prop._addConstant("ERR-MO-access-denied", None, 1170)
prop._addConstant("ERR-MO-deletion-rule-violation", None, 1107)
prop._addConstant("ERR-MO-duplicate-object", None, 1103)
prop._addConstant("ERR-MO-illegal-containment", None, 1106)
prop._addConstant("ERR-MO-illegal-creation", None, 1105)
prop._addConstant("ERR-MO-illegal-iterator-state", None, 1100)
prop._addConstant("ERR-MO-illegal-object-lifecycle-transition", None, 1101)
prop._addConstant("ERR-MO-naming-rule-violation", None, 1104)
prop._addConstant("ERR-MO-object-not-found", None, 1102)
prop._addConstant("ERR-MO-resource-allocation", None, 1150)
prop._addConstant("ERR-aaa-config-modify-error", None, 1520)
prop._addConstant("ERR-acct-realm-set-error", None, 1513)
prop._addConstant("ERR-add-ctrlr", None, 1574)
prop._addConstant("ERR-admin-passwd-set", None, 1522)
prop._addConstant("ERR-api", None, 1571)
prop._addConstant("ERR-auth-issue", None, 1548)
prop._addConstant("ERR-auth-realm-set-error", None, 1514)
prop._addConstant("ERR-authentication", None, 1534)
prop._addConstant("ERR-authorization-required", None, 1535)
prop._addConstant("ERR-connect", None, 1572)
prop._addConstant("ERR-create-domain", None, 1562)
prop._addConstant("ERR-create-keyring", None, 1560)
prop._addConstant("ERR-create-role", None, 1526)
prop._addConstant("ERR-create-user", None, 1524)
prop._addConstant("ERR-delete-domain", None, 1564)
prop._addConstant("ERR-delete-role", None, 1528)
prop._addConstant("ERR-delete-user", None, 1523)
prop._addConstant("ERR-domain-set-error", None, 1561)
prop._addConstant("ERR-http-initializing", None, 1549)
prop._addConstant("ERR-incompat-ctrlr-version", None, 1568)
prop._addConstant("ERR-internal-error", None, 1540)
prop._addConstant("ERR-invalid-args", None, 1569)
prop._addConstant("ERR-invalid-delimiter", None, 1589)
prop._addConstant("ERR-invalid-domain", None, 1588)
prop._addConstant("ERR-invalid-domain-name", None, 1582)
prop._addConstant("ERR-ldap-delete-error", None, 1510)
prop._addConstant("ERR-ldap-get-error", None, 1509)
prop._addConstant("ERR-ldap-group-modify-error", None, 1518)
prop._addConstant("ERR-ldap-group-set-error", None, 1502)
prop._addConstant("ERR-ldap-set-error", None, 1511)
prop._addConstant("ERR-missing-method", None, 1546)
prop._addConstant("ERR-modify-ctrlr-access", None, 1567)
prop._addConstant("ERR-modify-ctrlr-dvs-version", None, 1576)
prop._addConstant("ERR-modify-ctrlr-rootcont", None, 1575)
prop._addConstant("ERR-modify-ctrlr-scope", None, 1573)
prop._addConstant("ERR-modify-ctrlr-trig-inventory", None, 1577)
prop._addConstant("ERR-modify-domain", None, 1563)
prop._addConstant("ERR-modify-domain-encapmode", None, 1581)
prop._addConstant("ERR-modify-domain-enfpref", None, 1578)
prop._addConstant("ERR-modify-domain-mcastpool", None, 1579)
prop._addConstant("ERR-modify-domain-mode", None, 1580)
prop._addConstant("ERR-modify-domain-prefencapmode", None, 1584)
prop._addConstant("ERR-modify-role", None, 1527)
prop._addConstant("ERR-modify-user", None, 1525)
prop._addConstant("ERR-modify-user-domain", None, 1565)
prop._addConstant("ERR-modify-user-role", None, 1532)
prop._addConstant("ERR-no-buf", None, 1570)
prop._addConstant("ERR-passwd-set-failure", None, 1566)
prop._addConstant("ERR-provider-group-modify-error", None, 1519)
prop._addConstant("ERR-provider-group-set-error", None, 1512)
prop._addConstant("ERR-radius-global-set-error", None, 1505)
prop._addConstant("ERR-radius-group-set-error", None, 1501)
prop._addConstant("ERR-radius-set-error", None, 1504)
prop._addConstant("ERR-request-timeout", None, 1545)
prop._addConstant("ERR-role-set-error", None, 1515)
prop._addConstant("ERR-rsa-global-set-error", None, 1587)
prop._addConstant("ERR-rsa-group-set-error", None, 1585)
prop._addConstant("ERR-rsa-set-error", None, 1586)
prop._addConstant("ERR-secondary-node", None, 1550)
prop._addConstant("ERR-service-not-ready", None, 1539)
prop._addConstant("ERR-set-password-strength-check", None, 1543)
prop._addConstant("ERR-store-pre-login-banner-msg", None, 1521)
prop._addConstant("ERR-tacacs-enable-error", None, 1508)
prop._addConstant("ERR-tacacs-global-set-error", None, 1507)
prop._addConstant("ERR-tacacs-group-set-error", None, 1503)
prop._addConstant("ERR-tacacs-set-error", None, 1506)
prop._addConstant("ERR-user-account-expired", None, 1536)
prop._addConstant("ERR-user-set-error", None, 1517)
prop._addConstant("ERR-xml-parse-error", None, 1547)
prop._addConstant("communication-error", "communication-error", 1)
prop._addConstant("none", "none", 0)
meta.props.add("invErrCode", prop)
prop = PropMeta("str", "invErrDescr", "invErrDescr", 50, PropCategory.REGULAR)
prop.label = "Remote Error Description"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("invErrDescr", prop)
prop = PropMeta("str", "invRslt", "invRslt", 48, PropCategory.REGULAR)
prop.label = "Remote Result"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "not-applicable"
prop._addConstant("capability-not-implemented-failure", "capability-not-implemented-failure", 16384)
prop._addConstant("capability-not-implemented-ignore", "capability-not-implemented-ignore", 8192)
prop._addConstant("capability-not-supported", "capability-not-supported", 32768)
prop._addConstant("capability-unavailable", "capability-unavailable", 65536)
prop._addConstant("end-point-failed", "end-point-failed", 32)
prop._addConstant("end-point-protocol-error", "end-point-protocol-error", 64)
prop._addConstant("end-point-unavailable", "end-point-unavailable", 16)
prop._addConstant("extend-timeout", "extend-timeout", 134217728)
prop._addConstant("failure", "failure", 1)
prop._addConstant("fru-identity-indeterminate", "fru-identity-indeterminate", 4194304)
prop._addConstant("fru-info-malformed", "fru-info-malformed", 8388608)
prop._addConstant("fru-not-ready", "fru-not-ready", 67108864)
prop._addConstant("fru-not-supported", "fru-not-supported", 536870912)
prop._addConstant("fru-state-indeterminate", "fru-state-indeterminate", 33554432)
prop._addConstant("fw-defect", "fw-defect", 256)
prop._addConstant("hw-defect", "hw-defect", 512)
prop._addConstant("illegal-fru", "illegal-fru", 16777216)
prop._addConstant("intermittent-error", "intermittent-error", 1073741824)
prop._addConstant("internal-error", "internal-error", 4)
prop._addConstant("not-applicable", "not-applicable", 0)
prop._addConstant("resource-capacity-exceeded", "resource-capacity-exceeded", 2048)
prop._addConstant("resource-dependency", "resource-dependency", 4096)
prop._addConstant("resource-unavailable", "resource-unavailable", 1024)
prop._addConstant("service-not-implemented-fail", "service-not-implemented-fail", 262144)
prop._addConstant("service-not-implemented-ignore", "service-not-implemented-ignore", 131072)
prop._addConstant("service-not-supported", "service-not-supported", 524288)
prop._addConstant("service-protocol-error", "service-protocol-error", 2097152)
prop._addConstant("service-unavailable", "service-unavailable", 1048576)
prop._addConstant("sw-defect", "sw-defect", 128)
prop._addConstant("task-reset", "task-reset", 268435456)
prop._addConstant("timeout", "timeout", 8)
prop._addConstant("unidentified-fail", "unidentified-fail", 2)
meta.props.add("invRslt", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "oDn", "oDn", 51, PropCategory.REGULAR)
prop.label = "Subject DN"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("oDn", prop)
prop = PropMeta("str", "operSt", "operSt", 15674, PropCategory.REGULAR)
prop.label = "Completion"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "scheduled"
prop._addConstant("cancelled", "cancelled", 3)
prop._addConstant("completed", "completed", 2)
prop._addConstant("crashsuspect", "crash-suspect", 7)
prop._addConstant("failed", "failed", 4)
prop._addConstant("indeterminate", "indeterminate", 5)
prop._addConstant("processing", "processing", 1)
prop._addConstant("ready", "ready", 8)
prop._addConstant("scheduled", "scheduled", 0)
prop._addConstant("suspended", "suspended", 6)
meta.props.add("operSt", prop)
prop = PropMeta("str", "originMinority", "originMinority", 54, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("originMinority", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "runId", "runId", 45, PropCategory.REGULAR)
prop.label = "ID"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("runId", prop)
prop = PropMeta("str", "startTs", "startTs", 36, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("startTs", prop)
prop = PropMeta("str", "startTx", "startTx", 36895, PropCategory.REGULAR)
prop.label = "startTxId"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("none", "none", 0)
meta.props.add("startTx", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "try", "try", 15574, PropCategory.REGULAR)
prop.label = "Try"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("try", prop)
prop = PropMeta("str", "ts", "ts", 47, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("ts", prop)
meta.namingProps.append(getattr(meta.props, "id"))
def __init__(self, parentMoOrDn, id, markDirty=True, **creationProps):
namingVals = [id]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
3db2907e0ec1a60da6727317afaec49ef2217e4c | 96ad67554b01832b873fc0bdab0c33aa2178a2fd | /3_visualExploratory/3_violationDistrict.py | ac3f3009cd46c72bad29b203a4a88e5a3d37b070 | [] | no_license | RobertNguyen125/Datacamp---Project-PoliceActivities | 09447ee1290c40b3c038ccd387e80c7e703cb053 | af14e4d7c4ff864f68cfa3aaecdfee9883c24659 | refs/heads/master | 2021-01-02T02:00:15.928445 | 2020-02-10T06:48:13 | 2020-02-10T06:48:13 | 239,445,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 812 | py | # .crosstab(), short for cross_tabulation
import pandas as pd
import matplotlib.pyplot as plt
ri2 = pd.read_csv('/Users/apple/desktop/policeActivities/dataset/ri2.csv')
table = pd.crosstab(ri2['driver_race'], ri2['driver_gender']) # NOTE: frequency table in form of dataframe
print(table)
# check the result of frequency table
asian_female = ri2[(ri2['driver_gender']=='F') & (ri2['driver_race']=='Asian')]
print(asian_female.shape)
table = table.loc['Asian':'Hispanic']
print(table)
# create stacked bar plot
# table.plot(kind='bar', stacked=True)
# plt.show()
# district violation
# create frequency table with distric and violation
all_zones = pd.crosstab(ri2['district'],ri2['violation'])
print(all_zones)
# slice the dataframe to get k1-k3:
k_zones = all_zones.loc['Zone K1': 'Zone K3']
print(k_zones)
| [
"[email protected]"
] | |
4fa0a7eb80583b752126f933c7de41b6086d7e94 | f9e3a0fb511470561d3d94bc984dafaee06000cb | /9780596009250/PP3E-Examples-1.2/Examples/PP3E/System/App/Bases/app.py | 9e971584749335e8bfed0687516b1d673471aca7 | [
"LicenseRef-scancode-oreilly-notice"
] | permissive | Sorath93/Programming-Python-book | 359b6fff4e17b44b9842662f484bbafb490cfd3d | ebe4c93e265edd4ae135491bd2f96904d08a911c | refs/heads/master | 2022-12-03T01:49:07.815439 | 2020-08-16T22:19:38 | 2020-08-16T22:19:38 | 287,775,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,823 | py | ################################################################################
# an application class hierarchy, for handling top-level components;
# App is the root class of the App hierarchy, extended in other files;
################################################################################
import sys, os, traceback
class AppError(Exception): pass # errors raised here
class App: # the root class
def __init__(self, name=None):
self.name = name or self.__class__.__name__ # the lowest class
self.args = sys.argv[1:]
self.env = os.environ
self.verbose = self.getopt('-v') or self.getenv('VERBOSE')
self.input = sys.stdin
self.output = sys.stdout
self.error = sys.stderr # stdout may be piped
def closeApp(self): # not __del__: ref's?
pass # nothing at this level
def help(self):
print self.name, 'command-line arguments:' # extend in subclass
print '-v (verbose)'
##############################
# script environment services
##############################
def getopt(self, tag):
try: # test "-x" command arg
self.args.remove(tag) # not real argv: > 1 App?
return 1
except:
return 0
def getarg(self, tag, default=None):
try: # get "-x val" command arg
pos = self.args.index(tag)
val = self.args[pos+1]
self.args[pos:pos+2] = []
return val
except:
return default # None: missing, no default
def getenv(self, name, default=''):
try: # get "$x" environment var
return self.env[name]
except KeyError:
return default
def endargs(self):
if self.args:
self.message('extra arguments ignored: ' + repr(self.args))
self.args = []
def restargs(self):
res, self.args = self.args, [] # no more args/options
return res
def message(self, text):
self.error.write(text + '\n') # stdout may be redirected
def exception(self):
return tuple(sys.exc_info()[:2]) # the last exception type,data
def exit(self, message='', status=1):
if message:
self.message(message)
sys.exit(status)
def shell(self, command, fork=0, inp=''):
if self.verbose:
self.message(command) # how about ipc?
if not fork:
os.system(command) # run a shell cmd
elif fork == 1:
return os.popen(command, 'r').read() # get its output
else: # readlines too?
pipe = os.popen(command, 'w')
pipe.write(inp) # send it input
pipe.close()
#################################################
# input/output-stream methods for the app itself;
# redefine in subclasses if not using files, or
# set self.input/output to file-like objects;
#################################################
def read(self, *size):
return self.input.read(*size)
def readline(self):
return self.input.readline()
def readlines(self):
return self.input.readlines()
def write(self, text):
self.output.write(text)
def writelines(self, text):
self.output.writelines(text)
###################################################
# to run the app
# main() is the start/run/stop execution protocol;
###################################################
def main(self):
res = None
try:
self.start()
self.run()
res = self.stop() # optional return val
except SystemExit: # ignore if from exit()
pass
except:
self.message('uncaught: ' + str(self.exception()))
traceback.print_exc()
self.closeApp()
return res
def start(self):
if self.verbose: self.message(self.name + ' start.')
def stop(self):
if self.verbose: self.message(self.name + ' done.')
def run(self):
raise AppError, 'run must be redefined!'
| [
"[email protected]"
] | |
1376fbee52bacc27bd80efd4d16b435c5e946b03 | 549270020f6c8724e2ef1b12e38d11b025579f8d | /recipes/libnetfilter_queue/all/test_package/conanfile.py | 1097433829a7c2a75801555fd3e085e9063cd7b5 | [
"MIT"
] | permissive | conan-io/conan-center-index | 1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43 | 3b17e69bb4e5601a850b6e006e44775e690bac33 | refs/heads/master | 2023-08-31T11:34:45.403978 | 2023-08-31T11:13:23 | 2023-08-31T11:13:23 | 204,671,232 | 844 | 1,820 | MIT | 2023-09-14T21:22:42 | 2019-08-27T09:43:58 | Python | UTF-8 | Python | false | false | 469 | py | import os
from conans import ConanFile, CMake, tools
class Libnetfilter_queueTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
bin_path = os.path.join("bin", "example")
self.run("{} {}".format(bin_path, 0), run_environment=True)
| [
"[email protected]"
] | |
8263c0e2c597868a62777d0b2bf18d2d862238d2 | 632d7759536ed0726499c2d52c8eb13b5ab213ab | /Data/Packages/Default/swap_line.py | 5c098bc61d83d61eb12c2cf637e2417ebeab613c | [] | no_license | Void2403/sublime_text_3_costomize | e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc | c19977e498bd948fd6d8f55bd48c8d82cbc317c3 | refs/heads/master | 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,886 | py | import sublime
import sublime_plugin
def expand_to_line(view, region):
"""
As view.full_line, but doesn't expand to the next line if a full line is
already selected
"""
if not (region.a == region.b) and view.substr(region.end() - 1) == '\n':
return sublime.Region(view.line(region).begin(), region.end())
else:
return view.full_line(region)
def extract_line_blocks(view):
blocks = [expand_to_line(view, s) for s in view.sel()]
if len(blocks) == 0:
return blocks
# merge any adjacent blocks
merged_blocks = [blocks[0]]
for block in blocks[1:]:
last_block = merged_blocks[-1]
if block.begin() <= last_block.end():
merged_blocks[-1] = sublime.Region(last_block.begin(), block.end())
else:
merged_blocks.append(block)
return merged_blocks
class SwapLineUpCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at BOF
if blocks[0].begin() == 0:
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n') and blocks[-1].b == self.view.size()
if add_trailing_newline:
# The insert can cause the selection to move. This isn't wanted, so
# reset the selection if it has moved to EOF
sel = [r for r in self.view.sel()]
self.view.insert(edit, self.view.size(), '\n')
if self.view.sel()[-1].end() == self.view.size():
# Selection has moved, restore the previous selection
self.view.sel().clear()
for r in sel:
self.view.sel().add(r)
# Fix up any block that should now include this newline
blocks[-1] = sublime.Region(blocks[-1].a, blocks[-1].b + 1)
# Process in reverse order
blocks.reverse()
for b in blocks:
prev_line = self.view.full_line(b.begin() - 1)
self.view.insert(edit, b.end(), self.view.substr(prev_line))
self.view.erase(edit, prev_line)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
class SwapLineDownCommand(sublime_plugin.TextCommand):
def run(self, edit):
blocks = extract_line_blocks(self.view)
# No selection
if len(blocks) == 0:
return
# Already at EOF
if blocks[-1].end() == self.view.size():
return
# Add a trailing newline if required, the logic is simpler if every line
# ends with a newline
add_trailing_newline = (self.view.substr(self.view.size() - 1) != '\n')
if add_trailing_newline:
# No block can be at EOF (checked above), so no need to fix up the
# blocks
self.view.insert(edit, self.view.size(), '\n')
# Process in reverse order
blocks.reverse()
for b in blocks:
next_line = self.view.full_line(b.end())
contents = self.view.substr(next_line)
self.view.erase(edit, next_line)
self.view.insert(edit, b.begin(), contents)
if add_trailing_newline:
# Remove the added newline
self.view.erase(edit, sublime.Region(self.view.size() - 1, self.view.size()))
# Ensure the selection is visible
self.view.show(self.view.sel(), False)
| [
"[email protected]"
] | |
e62ab15957a3c82e8578924508c3baeabde046be | b550eda62179ffd8e49a59df7f8a30163140204f | /backend/openshift-old/services/job/worker/src/nodes/requests/openshift.py | 169b62b8c283420c6106a524f7d57862ca40833b | [
"Apache-2.0"
] | permissive | bgoesswe/openeo-repeatability | 6222fb235b70fda9da998b63fec92c0e5ac07169 | 087b9965e710d16cd6f29cb25e2cb94e443c2b30 | refs/heads/master | 2022-12-11T03:43:35.365574 | 2018-08-07T20:02:02 | 2018-08-07T20:02:02 | 139,158,921 | 0 | 1 | null | 2022-12-08T02:15:15 | 2018-06-29T14:27:34 | Python | UTF-8 | Python | false | false | 801 | py | from os import environ
from utils import send_post
# OPENSHIFT_URL = environ.get("OPENSHIFT_API")
# OPENSHIFT_AUTH = auth = {"Authorization": "Bearer " + environ.get("SERVICEACCOUNT_TOKEN")}
# OPENSHIFT_NAMESPACE = environ.get("EXECUTION_NAMESPACE")
# OPENSHIFT_STORAGE_CLASS = environ.get("STORAGE_CLASS")
# OPENSHIFT_VERIFY = True if environ.get("VERIFY") == "true" else False
# def execute_template(path, template):
# url = "{0}/{1}".format(OPENSHIFT_URL, path)
# send_post(url, template, OPENSHIFT_AUTH, OPENSHIFT_VERIFY)
# url = environ.get("OPENSHIFT_API") + self.path
# response = post(url, data=self.get_json(), headers=auth, verify=verify)
# verify =
# # Execute template
# if response.ok == False:
# self.raise_error(response.text)
# self.status = "Created" | [
"[email protected]"
] | |
72455241a618db9120f1ce31fffb5ed5a14566bd | fbfb724f8d0c3a6b64b2d6773c6f723bedb9f7f5 | /Python/Django_full/courses/apps/course_app/views.py | 49c2de66092e1c0453f40735e9ff07ab1f17f2ca | [] | no_license | eddieverity/DojoAssignments | 32ae4a1de768069d6636d1f109845e86bb20dec5 | 8860b4ca87633e722fa5aa93952ea719e9e95413 | refs/heads/master | 2020-04-06T03:59:56.185985 | 2017-04-26T18:04:41 | 2017-04-26T18:04:41 | 83,149,714 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | from django.shortcuts import render, redirect, HttpResponse
from .models import Course, Description, Comment
# Create your views here.
def index(request):
course=Course.objects.all()
desc= Description.objects.all()
context = {
"courses": course,
"desc": desc
}
return render(request, "course_app/index.html", context)
def go_back(request):
return redirect('/')
def add(request):
course = Course.objects.create(name=request.POST['name']) # course automatically getting assigned course_id, then referenced in description.create below
Description.objects.create(desc=request.POST['desc'], course=course)
return redirect('/')
def delete(request, id):
instance = Course.objects.filter(id = id).delete()
return redirect('/')
| [
"[email protected]"
] | |
a5146ae5de1b53ffccabf6a5318027797a5bb10a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_psoriasis.py | 8b4a1ae76769598d8296034103fda0e42994b41d | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py |
#calss header
class _PSORIASIS():
def __init__(self,):
self.name = "PSORIASIS"
self.definitions = [u'a disease in which areas of skin turn red and are covered with small dry pieces of skin']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
ac87a80c7946de405af73d9d842c2e7763946960 | fee6d256bb4430569f9c055735d5f52a04afac45 | /admin/town_get.py | 3c317a73eecfa09a13f9b9e9edfd840faf725e1f | [] | no_license | microprediction/pandemic | 633367e3a11af1418e255a595b4c01a9c1f4c1bb | 4ca339b8c6e1925d7d70e9659b34e7cf8d7b534b | refs/heads/master | 2021-05-23T15:27:12.726299 | 2020-11-12T13:52:56 | 2020-11-12T13:52:56 | 253,360,903 | 9 | 8 | null | 2020-05-18T14:00:25 | 2020-04-06T00:34:55 | Python | UTF-8 | Python | false | false | 260 | py |
from pandemic.config_private import REDIS_CONFIG
from pprint import pprint
import json
if __name__=="__main__":
import redis
r = redis.Redis(**REDIS_CONFIG)
key = '00021250616501801290085'
data = r.hgetall(name='town::hash')
pprint(data)
| [
"[email protected]"
] | |
7aba0b9e83fa79101172ddd4c5618b3be76aada9 | d17a8870ff8ac77b82d0d37e20c85b23aa29ca74 | /lite/tests/unittest_py/op/common/test_unsqueeze_op_base.py | 2501e10cdaad936fb10b222f6afd2e47286d2faa | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle-Lite | 4ab49144073451d38da6f085a8c56822caecd5b2 | e241420f813bd91f5164f0d9ee0bc44166c0a172 | refs/heads/develop | 2023-09-02T05:28:14.017104 | 2023-09-01T10:32:39 | 2023-09-01T10:32:39 | 104,208,128 | 2,545 | 1,041 | Apache-2.0 | 2023-09-12T06:46:10 | 2017-09-20T11:41:42 | C++ | UTF-8 | Python | false | false | 2,275 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('..')
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import numpy as np
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import assume
import hypothesis.strategies as st
def sample_program_configs(draw):
in_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=5), min_size=4, max_size=4))
axes_data = draw(
st.lists(
st.integers(
min_value=0, max_value=3), min_size=1, max_size=2))
def generate_AxesTensor_data():
return np.random.choice([0, 1, 2, 3], axes_data, replace=True)
def generate_AxesTensorList_data():
return np.random.choice([0, 1, 2, 3], [], replace=True)
unsqueeze_op = OpConfig(
type="unsqueeze",
inputs={
"X": ["X_data"],
"AxesTensor": ["AxesTensor_data"],
"AxesTensorList": ["AxesTensorList_data"]
},
outputs={"Out": ["Out_data"]},
attrs={"axes": axes_data, })
program_config = ProgramConfig(
ops=[unsqueeze_op],
weights={},
inputs={
"X_data": TensorConfig(shape=in_shape),
"AxesTensor_data":
TensorConfig(data_gen=partial(generate_AxesTensor_data)),
# TensorList is not supported ,so comment them out
"AxesTensorList_data":
TensorConfig(data_gen=partial(generate_AxesTensorList_data))
},
outputs=["Out_data"])
return program_config
| [
"[email protected]"
] | |
d86b2af56d25376ca533a9b8f5974a461cddc95f | 41e22cef6ded081632f21cd3877884f76c69bef3 | /flaskmob/api.py | 2835e31958cc88e0b8e048455be2281aea280abb | [
"MIT"
] | permissive | brotherjack/Flask-Mob | 737cac3623c8a062653e2eefa981de30526b4510 | f0f4f5fe79f2fe7e63c2f882dc4b5d61276dbf45 | refs/heads/master | 2021-01-20T09:37:26.091977 | 2017-03-04T22:09:56 | 2017-03-04T22:09:56 | 83,924,618 | 0 | 0 | null | 2017-03-04T21:03:59 | 2017-03-04T21:03:59 | null | UTF-8 | Python | false | false | 1,012 | py | from flask import jsonify
from flaskmob import app, db
from flask_restful import Resource, Api
api = Api(app)
class Pokeymon(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
color = db.Column(db.String)
def __init__(self, name, color=None):
if not color:
color = "Not Specified"
self.name = name
self.color = color
def __repr__(self):
return str(self.name)
class PokeymonNapTime(Resource):
def get(self, name):
result = Pokeymon.query.filter_by(name=name).first()
del result.__dict__['_sa_instance_state']
return jsonify(result.__dict__)
def post(self, name, color=None):
new_pokeymon = Pokeymon(name, color)
db.session.add(new_pokeymon)
try:
db.session.commit()
except:
db.session.rollback()
raise
return "Success"
api.add_resource(PokeymonNapTime, "/api/1.0/pokeyman/<string:name>")
| [
"[email protected]"
] | |
d7ce57bb2a34be6eaabcd84fa54ce1e9684ed2ad | e60a342f322273d3db5f4ab66f0e1ffffe39de29 | /parts/zodiac/pyramid/config/__init__.py | 353c270d3b02ea3c082abb98347a3ef4673e089e | [] | no_license | Xoting/GAExotZodiac | 6b1b1f5356a4a4732da4c122db0f60b3f08ff6c1 | f60b2b77b47f6181752a98399f6724b1cb47ddaf | refs/heads/master | 2021-01-15T21:45:20.494358 | 2014-01-13T15:29:22 | 2014-01-13T15:29:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | /home/alex/myenv/zodiac/eggs/pyramid-1.4-py2.7.egg/pyramid/config/__init__.py | [
"[email protected]"
] | |
d84f0b803d8be1aa81bc7e7291137ca415656a52 | 9870d2c6880fd3fa558c46e3bf160aae20c74157 | /removeNthFromEnd.py | 5719f47e75ed040bbcce08e05727590f9c52fbbc | [] | no_license | Yigang0622/LeetCode | e7f7f115c6e730c486296ef2f1a3dd1a3fdca526 | c873cd1ee70a2bdb54571bdd50733db9f6475e9e | refs/heads/master | 2023-03-03T14:32:25.498633 | 2021-02-15T13:59:00 | 2021-02-15T13:59:00 | 281,423,565 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,322 | py | # LeetCode
# removeNthFromEnd
# Created by Yigang Zhou on 2020/7/22.
# Copyright © 2020 Yigang Zhou. All rights reserved.
# 给定一个链表,删除链表的倒数第 n 个节点,并且返回链表的头结点。
#
# 示例:
#
# 给定一个链表: 1->2->3->4->5, 和 n = 2.
#
# 当删除了倒数第二个节点后,链表变为 1->2->3->5.
# 说明:
#
# 给定的 n 保证是有效的。
#
# 进阶:
#
# 你能尝试使用一趟扫描实现吗?
#Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def printLinkedList(head: ListNode):
while head is not None:
print(head.val)
head = head.next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
arr = []
while head is not None:
arr.append(head)
head = head.next
if len(arr) == 1:
return []
i = len(arr) - n - 1
if i == -1:
printLinkedList(arr[1])
return arr[1]
else:
arr[i].next = arr[i].next.next
printLinkedList(arr[0])
return arr[0]
n = ListNode(1)
n2 = ListNode(2)
n3 = ListNode(3)
n4 = ListNode(4)
n.next = n2
n2.next = n3
n3.next = n4
s = Solution().removeNthFromEnd(n,4)
| [
"[email protected]"
] | |
65ea9516ef90c51096c29190ac5a836c0bd9ae28 | 71d9245d5264c25e56a6bb36512049da2c608875 | /docs/rtd/bin/pip | ab30e4a91c21a624e2ee5a7fdf76a1543ed863c5 | [
"BSD-3-Clause"
] | permissive | kuacuia/CrazyEyeDoc | 1ef0d05e5de51b22f8126b92344348b41b4b2ae7 | f6614e4c9811356942213c7c4d8744d27b90bf57 | refs/heads/master | 2021-05-01T20:20:24.015721 | 2015-10-19T05:36:48 | 2015-10-19T05:36:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/Users/jieli/PycharmProjects/CrazyEye/docs/rtd/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
13549ec011843c3269631dae4df79481e9adcee9 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03759/s647507996.py | 7c66b69c12d94e46dcce65b1e8b12fc11d1775b6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | i = list(map(int, input().split()))
a=i[0]
b=i[1]
c=i[2]
j_1 = b-a
j_2=c-b
if j_1==j_2:
print('YES')
else :
print('NO')
| [
"[email protected]"
] | |
54dc0172f201f8adc5440482208dbc2e4a20f88b | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /Fe6wvtjcNFwuANuLu_1.py | 19b5746b7b1b7d4c659dd13e0c67a617790fb718 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | """
A game of table tennis almost always sounds like _Ping!_ followed by _Pong!_
Therefore, you know that Player 2 has won if you hear _Pong!_ as the last
sound (since Player 1 didn't return the ball back).
Given a list of _Ping!_ , create a function that inserts _Pong!_ in between
each element. Also:
* If `win` equals `True`, end the list with _Pong!_.
* If `win` equals `False`, end with _Ping!_ instead.
### Examples
ping_pong(["Ping!"], True) ➞ ["Ping!", "Pong!"]
ping_pong(["Ping!", "Ping!"], False) ➞ ["Ping!", "Pong!", "Ping!"]
ping_pong(["Ping!", "Ping!", "Ping!"], True) ➞ ["Ping!", "Pong!", "Ping!", "Pong!", "Ping!", "Pong!"]
### Notes
* You will always return the ball (i.e. the Pongs are yours).
* Player 1 serves the ball and makes _Ping!_.
* Return a list of strings.
"""
def ping_pong(lst, win):
res = ['Ping!','Pong!'] * len(lst)
return res if win else res[:-1]
| [
"[email protected]"
] | |
d8656572c733b1f9a10bc318e47dbba7721dca6b | beea74a2a1f2445b107af411197e8b6300e715e6 | /supervised_learning/0x07-cnn/0-conv_forward.py | ce94808fac80cec28daaffce4ba0d4471128adfc | [] | no_license | 95ktsmith/holbertonschool-machine_learning | 0240d8fa8523b06d3353c2bffa74205b84253be8 | 2757c8526290197d45a4de33cda71e686ddcbf1c | refs/heads/master | 2023-07-26T16:02:26.399758 | 2021-09-09T15:57:57 | 2021-09-09T15:57:57 | 310,087,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,360 | py | #!/usr/bin/env python3
""" Convolution forward propagation """
import numpy as np
def conv_forward(A_prev, W, b, activation, padding="same", stride=(1, 1)):
"""
Performs forward propagation over a convolutional layer of a neural network
A_prev is a numpy.ndarray of shape (m, h_prev, w_prev, c_prev) containing
the output of the previous layer
m is the number of examples
h_prev is the height of the previous layer
w_prev is the width of the previous layer
c_prev is the number of channels in the previous layer
W is a numpy.ndarray of shape (kh, kw, c_prev, c_new) containing the
kernels for the convolution
kh is the filter height
kw is the filter width
c_prev is the number of channels in the previous layer
c_new is the number of channels in the output
b is a numpy.ndarray of shape (1, 1, 1, c_new) containing the biases
applied to the convolution
activation is an activation function applied to the convolution
padding is a string that is either same or valid, indicating the type of
padding used
stride is a tuple of (sh, sw) containing the strides for the convolution
sh is the stride for the height
sw is the stride for the width
Returns: the output of the convolutional layer
"""
m, h_prev, w_prev, c_prev = A_prev.shape
kh, kw, c_prev, c_new = W.shape
sh = stride[0]
sw = stride[1]
if padding == "valid":
ph = 0
pw = 0
ch = int((h_prev - kh) / sh + 1)
cw = int((w_prev - kw) / sw + 1)
else: # padding == "same"
ch = h_prev
cw = w_prev
ph = int((ch * sh - h_prev + kh - 1) / 2)
pw = int((cw * sw - w_prev + kw - 1) / 2)
padded = np.pad(A_prev,
((0, 0), (ph, ph), (pw, pw), (0, 0)),
'constant',
constant_values=0)
convolved = np.zeros((m, ch, cw, c_new))
for channel in range(c_new):
for row in range(ch):
for col in range(cw):
mask = padded[:, row*sh:row*sh + kh, col*sw:col*sw + kw, :] *\
W[None, :, :, :, channel]
out = np.sum(mask, axis=(1, 2, 3)) + b[:, :, :, channel]
convolved[:, row, col, channel] = activation(out)
return convolved
| [
"[email protected]"
] | |
3c516ada6af314021aa4340dc715126b4d3b5c3d | 2e94ded940d9a8015f5cf877bfbef71a77b5ddaf | /bigml/api_handlers/clusterhandler.py | 133a66bc205f295795f8d4e768542ab1a9575aa3 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | mmerce/python | 9ac63efacec3e54285a969b6c6279eeba6bceb78 | 696ddc2a10c985cfe266ec2807c24b98f0c9a317 | refs/heads/master | 2023-08-04T09:10:17.016748 | 2020-11-10T23:43:34 | 2020-11-10T23:43:34 | 5,256,921 | 0 | 0 | null | 2017-10-03T22:54:20 | 2012-08-01T08:38:09 | Python | UTF-8 | Python | false | false | 3,898 | py | # -*- coding: utf-8 -*-
#
# Copyright 2014-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for clusters' REST calls
https://bigml.com/api/clusters
"""
try:
import simplejson as json
except ImportError:
import json
from bigml.api_handlers.resourcehandler import ResourceHandlerMixin
from bigml.api_handlers.resourcehandler import check_resource_type, \
resource_is_ready, get_cluster_id
from bigml.constants import CLUSTER_PATH
class ClusterHandlerMixin(ResourceHandlerMixin):
"""This class is used by the BigML class as
a mixin that provides the REST calls models. It should not
be instantiated independently.
"""
def __init__(self):
"""Initializes the ClusterHandler. This class is intended to be
used as a mixin on ResourceHandler, that inherits its
attributes and basic method from BigMLConnection, and must not be
instantiated independently.
"""
self.cluster_url = self.url + CLUSTER_PATH
def create_cluster(self, datasets, args=None, wait_time=3, retries=10):
"""Creates a cluster from a `dataset` or a list o `datasets`.
"""
create_args = self._set_create_from_datasets_args(
datasets, args=args, wait_time=wait_time, retries=retries)
body = json.dumps(create_args)
return self._create(self.cluster_url, body)
def get_cluster(self, cluster, query_string='',
shared_username=None, shared_api_key=None):
"""Retrieves a cluster.
The model parameter should be a string containing the
cluster id or the dict returned by create_cluster.
As cluster is an evolving object that is processed
until it reaches the FINISHED or FAULTY state, the function will
return a dict that encloses the cluster values and state info
available at the time it is called.
If this is a shared cluster, the username and sharing api key must
also be provided.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.get_resource(cluster,
query_string=query_string,
shared_username=shared_username,
shared_api_key=shared_api_key)
def cluster_is_ready(self, cluster, **kwargs):
"""Checks whether a cluster's status is FINISHED.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
resource = self.get_cluster(cluster, **kwargs)
return resource_is_ready(resource)
def list_clusters(self, query_string=''):
"""Lists all your clusters.
"""
return self._list(self.cluster_url, query_string)
def update_cluster(self, cluster, changes):
"""Updates a cluster.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.update_resource(cluster, changes)
def delete_cluster(self, cluster):
"""Deletes a cluster.
"""
check_resource_type(cluster, CLUSTER_PATH,
message="A cluster id is needed.")
return self.delete_resource(cluster)
| [
"[email protected]"
] | |
6eab9a88af0ceee39b0d08197e81ce32a0290429 | 88ae8695987ada722184307301e221e1ba3cc2fa | /third_party/grpc/src/src/python/grpcio_csds/setup.py | 6523648516b6ebe0624f0243eb91978bdf3a3b93 | [
"Apache-2.0",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"BSD-3-Clause",
"MPL-2.0"
] | permissive | iridium-browser/iridium-browser | 71d9c5ff76e014e6900b825f67389ab0ccd01329 | 5ee297f53dc7f8e70183031cff62f37b0f19d25f | refs/heads/master | 2023-08-03T16:44:16.844552 | 2023-07-20T15:17:00 | 2023-07-23T16:09:30 | 220,016,632 | 341 | 40 | BSD-3-Clause | 2021-08-13T13:54:45 | 2019-11-06T14:32:31 | null | UTF-8 | Python | false | false | 2,120 | py | # Copyright 2021 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup module for CSDS in gRPC Python."""
import os
import sys
import setuptools
_PACKAGE_PATH = os.path.realpath(os.path.dirname(__file__))
_README_PATH = os.path.join(_PACKAGE_PATH, 'README.rst')
# Ensure we're in the proper directory whether or not we're being used by pip.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Break import-style to ensure we can actually find our local modules.
import grpc_version
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
]
PACKAGE_DIRECTORIES = {
'': '.',
}
INSTALL_REQUIRES = (
'protobuf>=4.21.6',
'xds-protos>=0.0.7',
'grpcio>={version}'.format(version=grpc_version.VERSION),
)
SETUP_REQUIRES = INSTALL_REQUIRES
setuptools.setup(name='grpcio-csds',
version=grpc_version.VERSION,
license='Apache License 2.0',
description='xDS configuration dump library',
long_description=open(_README_PATH, 'r').read(),
author='The gRPC Authors',
author_email='[email protected]',
classifiers=CLASSIFIERS,
url='https://grpc.io',
package_dir=PACKAGE_DIRECTORIES,
packages=setuptools.find_packages('.'),
python_requires='>=3.6',
install_requires=INSTALL_REQUIRES,
setup_requires=SETUP_REQUIRES)
| [
"[email protected]"
] | |
20119dd4bf027bc85b6d0743586dd8843d61e207 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-sa/huaweicloudsdksa/v2/model/update_playbook_action_request.py | 65f38548f9f7ce143f3cb61570505abafe3b769a | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 6,398 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdatePlaybookActionRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'project_id': 'str',
'workspace_id': 'str',
'version_id': 'str',
'action_id': 'str',
'body': 'ModifyActionInfo'
}
attribute_map = {
'project_id': 'project_id',
'workspace_id': 'workspace_id',
'version_id': 'version_id',
'action_id': 'action_id',
'body': 'body'
}
def __init__(self, project_id=None, workspace_id=None, version_id=None, action_id=None, body=None):
"""UpdatePlaybookActionRequest
The model defined in huaweicloud sdk
:param project_id: ID of project
:type project_id: str
:param workspace_id: ID of workspace
:type workspace_id: str
:param version_id: version Id value
:type version_id: str
:param action_id: ID of action
:type action_id: str
:param body: Body of the UpdatePlaybookActionRequest
:type body: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
self._project_id = None
self._workspace_id = None
self._version_id = None
self._action_id = None
self._body = None
self.discriminator = None
self.project_id = project_id
self.workspace_id = workspace_id
self.version_id = version_id
self.action_id = action_id
if body is not None:
self.body = body
@property
def project_id(self):
"""Gets the project_id of this UpdatePlaybookActionRequest.
ID of project
:return: The project_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this UpdatePlaybookActionRequest.
ID of project
:param project_id: The project_id of this UpdatePlaybookActionRequest.
:type project_id: str
"""
self._project_id = project_id
@property
def workspace_id(self):
"""Gets the workspace_id of this UpdatePlaybookActionRequest.
ID of workspace
:return: The workspace_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this UpdatePlaybookActionRequest.
ID of workspace
:param workspace_id: The workspace_id of this UpdatePlaybookActionRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def version_id(self):
"""Gets the version_id of this UpdatePlaybookActionRequest.
version Id value
:return: The version_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._version_id
@version_id.setter
def version_id(self, version_id):
"""Sets the version_id of this UpdatePlaybookActionRequest.
version Id value
:param version_id: The version_id of this UpdatePlaybookActionRequest.
:type version_id: str
"""
self._version_id = version_id
@property
def action_id(self):
"""Gets the action_id of this UpdatePlaybookActionRequest.
ID of action
:return: The action_id of this UpdatePlaybookActionRequest.
:rtype: str
"""
return self._action_id
@action_id.setter
def action_id(self, action_id):
"""Sets the action_id of this UpdatePlaybookActionRequest.
ID of action
:param action_id: The action_id of this UpdatePlaybookActionRequest.
:type action_id: str
"""
self._action_id = action_id
@property
def body(self):
"""Gets the body of this UpdatePlaybookActionRequest.
:return: The body of this UpdatePlaybookActionRequest.
:rtype: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdatePlaybookActionRequest.
:param body: The body of this UpdatePlaybookActionRequest.
:type body: :class:`huaweicloudsdksa.v2.ModifyActionInfo`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdatePlaybookActionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
17733708ab790b089dff9a1d04c596afc74c55a1 | d1df4725a5354915d01f17fa92ccffa77189f852 | /bot.py | 0fe01e48f515b51e4ede7f3e93c7539d5996f518 | [] | no_license | goldaqua/aqua | 0b5baf48e87061ffe64a639c88e30cc38f5c8db4 | 74b42eb4360e3e736bd8aa554b0d961020d159f1 | refs/heads/main | 2023-01-29T19:58:37.009722 | 2020-12-16T12:23:51 | 2020-12-16T12:23:51 | 321,970,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,711 | py | import configure
import telebot
from telebot import types # кнопки
from string import Template
bot = telebot.TeleBot(configure.config['token'])
user_dict = {}
tovary = {
"Вода в баллоне 36 грн": 36,
'Баллон с водой 170 грн': 170,
'Трубочка для помпы (1 часть) 15 грн': 15,
'Носик для помпы 25 грн': 25,
'Помпа економ 80 грн': 80,
'Помпа улучшенная 100 грн': 100,
'Баллон + помпа економ + вода 230 грн': 230,
'Баллон + помпа улучшенная + вода 250 грн': 250
}
class User:
def __init__(self, city):
self.city = city
self.adres = None
self.tovar = {}
self.phone = None
self.prim = None
@property
def summa(self):
output = 0
for k, v in self.tovar.items():
output += tovary[k] * v
return output
def kbs(buttons, one_time_keyboard=True, row_width=None):
kb = types.ReplyKeyboardMarkup(
resize_keyboard=True,
one_time_keyboard=one_time_keyboard,
row_width=row_width or len(buttons)//2
)
kb.add(*[types.KeyboardButton(i) for i in buttons])
return kb
@bot.message_handler(commands=['help', 'start'])
def send_welcome(message):
markup_menu = kbs(['О нас 🏢', 'Заказать 📝', 'Как стать клиентом Аквасвит 🙋♂️', 'Обратная связь 📞', 'График работы ⏰'])
bot.send_message(message.chat.id, "Вас приветствует компания \"Аквасвит\"" + " 👋" + " "
+ message.from_user.first_name
+ ", выберите интересующий раздел.", reply_markup=markup_menu)
@bot.message_handler(content_types=["text"])
def user_reg(message):
if message.text == 'Заказать 📝':
markup = kbs(['Бахмут', 'Часов ЯР', 'Константиновка', 'Торецк (Дзержинск)'])
msg = bot.send_message(message.chat.id, 'Выбирете город:', reply_markup=markup)
bot.register_next_step_handler(msg, process_city_step)
elif message.text == 'О нас 🏢':
bot.send_message(message.chat.id, "Наша компания \"Аквасвит\" уже 15 лет работает для своих клиентов. С каждым годом мы усовершенствоваемся и делаем все возможное для того,"
+ " чтобы каждому клиенту было удобно и приятно с нами работать.")
bot.send_message(message.chat.id," Наша вода, прежде чем попоасть к вам в дом/офис проходит сложные этапы отчистки:"
+"\n1. ✅ Механическая очистка пятью видами фильтров."
+"\n2. ✅Мультимедийная угольная колонна."
+"\n3. ✅Мультимедийная колонна."
+"\n4. ✅Фильтрация через мембраны с отверстиями в одну десятитысячную микрона."
+"\n5. ✅Минерализация с помощью электронных дозаторов."
+"\n6. ✅Постфильтры с углями из скорлупы кокосового ореха и цеолитого-шунгитовый с углем и серебром."
+"\n7. ✅Обработка ультрафиолетом.")
bot.send_message(message.chat.id,"❗️ Поставщик воды по просьбе клиента обязан предоставить обязательный ежемесячный бактерицидный анализ воды.")
elif message.text == 'Обратная связь 📞':
bot.send_message(message.chat.id, "Мы находимся по адресу: Донецкая обл, г. Бахмут, ул. Юбилейная, 50 магазин \"Аквасвит\""
+ "\nНомера телефона Диспетчера: \n050-537-82-49 \n066-420-94-50 \n050-041-28-29 \n066-226-91-00 \n067-745-27-28 \n093-165-02-06")
elif message.text == 'Как стать клиентом Аквасвит 🙋♂️':
markup = kbs(['Заказать 📝', 'Вернутся в главное меню'])
msg = bot.send_message(message.chat.id, 'Для того, чтобы стать клиентом компании Аквасвит, нужно приобрести комплект:'
+ '\nБаллон + помпа економ + вода 230 грн \nили \nБаллон + помпа улучшенная + вода 250 грн', reply_markup=markup)
elif message.text == 'Вернутся в главное меню':
send_welcome(message)
elif message.text == 'Заказать 📝':
user_reg(message)
elif message.text == 'График работы ⏰':
bot.send_message(message.chat.id, "График работы доставки: 🚚"
+"\n ПН-ПТ: 8:00-17:00"
+"\n СБ: 8:00-16:00"
+"\n ВС: Выходной")
def process_city_step(message):
try:
chat_id = message.chat.id
user_dict[chat_id] = User(message.text)
# удалить старую клавиатуру
markup = types.ReplyKeyboardRemove(selective=False)
msg = bot.send_message(chat_id, 'Введите адрес:', reply_markup=markup)
bot.register_next_step_handler(msg, process_adres)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_adres(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.adres = message.text
return process_tovar(message)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_tovar(message):
user = user_dict[message.chat.id]
available_items = list(tovary) + ['Оформить заказ ✅', 'Очистить корзину 🗑']
chosen_items = []
def inner(message):
nonlocal chosen_items, available_items
try:
if message.text == 'Оформить заказ ✅':
bot.send_message(message.chat.id, "Ваши вы выбрали:\n" + get_items_string(user.tovar, '\n'))
msg = bot.send_message(message.chat.id, 'Введите ваш номер телефона в формате: 0ХХYYYYYYY')
return bot.register_next_step_handler(msg, process_phone)
elif message.text == 'Очистить корзину 🗑':
bot.send_message(message.chat.id, 'Корзина очищена')
return process_tovar(message)
elif message.text in available_items:
available_items.remove(message.text)
chosen_items.append(message.text)
bot.send_message(message.chat.id, 'Выберите кол-во товара', reply_markup=kbs(['1', '2', '3', '4']))
return bot.register_next_step_handler(message, ask_number, message.text)
else:
raise ValueError
except ValueError: # if item not in available items
bot.send_message(message.chat.id, "Вводите только доступные товары")
return bot.register_next_step_handler(message, inner)
else:
bot.register_next_step_handler(message, inner)
def ask_number(message, item):
nonlocal user
try:
amount = int(message.text)
except ValueError:
bot.send_message(message.text, 'Вводите только целые числа')
return bot.register_next_step_handler(message, ask_number, item)
else:
user.tovar[item] = amount
bot.send_message(message.chat.id, 'Выберите ещё товары', reply_markup=kbs(available_items, row_width=1))
return bot.register_next_step_handler(message, inner)
bot.send_message(
message.chat.id,
"Выберите покупки среди предложеных",
reply_markup=kbs(available_items, row_width=1)
)
bot.register_next_step_handler(message, inner)
def process_phone(message):
try:
int(message.text)
chat_id = message.chat.id
user = user_dict[chat_id]
user.phone = message.text
msg = bot.send_message(chat_id, 'Ведите примечание, если нет примечание напишите "нет"')
bot.register_next_step_handler(msg, process_prim)
except Exception as e:
msg = bot.reply_to(message, 'Вы ввели что то другое. Пожалуйста введите номер телефона.')
bot.register_next_step_handler(msg, process_phone)
def process_prim(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.prim = message.text
if user.city == "Торецк (Дзержинск)" or user.city == "Константиновка":
bot.send_message(chat_id, getRegData(user, 'Ваша заявка', message.from_user.first_name), parse_mode="Markdown")
bot.send_message(message.chat.id, "Ваш заказ принят, ожидайте пожалуйста заказ будет выполнен в тичении 2-3 часов." + "\n По г. Часов ЯР доставка осуществляется только по средам, во второй половине дня."
+ "\n В случии отмены или редактирование заказа просим вас обратится к диспетчеру тел. 050-537-82-49 \nC Ув. Аквасвит")
send_welcome(message)
# отправить в группу
bot.send_message(735422335, getRegData(user, 'Заявка от бота', bot.get_me().username), parse_mode="Markdown")
else:
# ваша заявка "Имя пользователя"
bot.send_message(chat_id, getRegData(user, 'Ваша заявка', message.from_user.first_name), parse_mode="Markdown")
bot.send_message(message.chat.id, "Ваш заказ принят, ожидайте пожалуйста заказ будет выполнен в тичении 2-3 часов." + "\n По г. Часов ЯР доставка осуществляется только по средам, во второй половине дня."
+"\nПо г. Дзержинск (Торецк) доставка осуществляется по средам и субботам."
+"\n В случии отмены или редактирование заказа просим вас обратится к диспетчеру тел. 050-537-82-49 \nC Ув. Аквасвит")
send_welcome(message)
# отправить в группу
bot.send_message(1413116688, getRegData(user, 'Заявка от бота', bot.get_me().username), parse_mode="Markdown")
except Exception as e:
bot.reply_to(message, 'Что то пошло не так!')
def get_items_string(dct:dict, sep:str=', '):
return sep.join([f"{k} ({v} шт.)" for k, v in dct.items()])
# формирует вид заявки регистрации
# нельзя делать перенос строки Template
# в send_message должно стоять parse_mode="Markdown"
def getRegData(user, title, name):
t = Template('$title *$name* \n Город: *$userCity* \n Адресс: *$adres* \n Товар: *$tovar* \n Телефон: *$phone* \n Примечание: *$prim* \n К оплате курьеру: *$summa* грн.')
return t.substitute({
'title': title,
'name': name,
'userCity': user.city,
'adres': user.adres,
'tovar': get_items_string(user.tovar),
'phone': user.phone,
'prim': user.prim,
'summa': user.summa,
})
@bot.message_handler(content_types=["text"])
def mine1(message):
if message.text == 'Главное меню':
bot.send_welcome(message)
@bot.message_handler(content_types=["text"])
def send_help(message):
bot.send_message(message.chat.id, '/start')
# произвольное фото
@bot.message_handler(content_types=["photo"])
def send_help_text(message):
bot.send_message(message.chat.id, '/start')
bot.polling(none_stop=True) | [
"[email protected]"
] | |
7cbb792e2cb7f0c7d51684f1e7fad31c4ff22284 | 23c944ff03ea82cb1b557780bbe9810a4f5e001c | /mymath/tests/features/increment-steps.py | 61521a4c6f342460fe2a1e3af70507f51d283d1f | [] | no_license | akshar-raaj/hack | 711e13659530c0202879b815bf295efed661bb7d | 4cab4d8ededd7adf8877b56741db2df7dabd0828 | refs/heads/master | 2020-04-04T00:33:29.900091 | 2015-09-05T12:05:13 | 2015-09-05T12:05:13 | 41,952,605 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | from lettuce import *
from fact import num, increment
@step('I have access to increment')
def access(step):
pass
@step('I use increment')
def use_increment(step):
increment()
@step('num is (\d+)')
def num_is(step, number):
number = int(number)
assert num == number, "Expected %d, found %d" % (number, num) | [
"[email protected]"
] | |
607dfdbb73a76a1d24d155ff47e6ddc1db2483d1 | c8d13f0efb453f8119aa55303c7bb70b506d51a1 | /73.py | d9a6063d4db904a480c30d6a781f6229a887ee42 | [] | no_license | rubivenkatesan/rubi21 | ab5f248e4cba664330fb8d7b4632d20c527a221e | f91d55852c1ab64dcec75a13ca835e2bf6c3dcb8 | refs/heads/master | 2020-04-15T04:58:04.682460 | 2019-06-05T13:18:13 | 2019-06-05T13:18:13 | 164,403,882 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #rubi
n=int(input())
m,k=map(int,input().split())
print("yes" if ((n<k) & (n>=m)) else "no")
| [
"[email protected]"
] | |
ae7c82de852c37f2276fa60c5a266cb353d7610c | afc8d5a9b1c2dd476ea59a7211b455732806fdfd | /Configurations/ggH/Full2016_nanoAOD/aliases.py | 7e9a914cfb01546c67ef05e6dd9d77d585774d16 | [] | no_license | latinos/PlotsConfigurations | 6d88a5ad828dde4a7f45c68765081ed182fcda21 | 02417839021e2112e740607b0fb78e09b58c930f | refs/heads/master | 2023-08-18T20:39:31.954943 | 2023-08-18T09:23:34 | 2023-08-18T09:23:34 | 39,819,875 | 10 | 63 | null | 2023-08-10T14:08:04 | 2015-07-28T07:36:50 | Python | UTF-8 | Python | false | false | 7,480 | py | import os
import copy
import inspect
configurations = os.path.realpath(inspect.getfile(inspect.currentframe())) # this file
configurations = os.path.dirname(configurations) # ggH2016
configurations = os.path.dirname(configurations) # Differential
configurations = os.path.dirname(configurations) # Configurations
#aliases = {}
# imported from samples.py:
# samples, signals
mc = [skey for skey in samples if skey not in ('Fake', 'DATA')]
eleWP = 'mva_90p_Iso2016'
muWP = 'cut_Tight80x'
aliases['LepWPCut'] = {
'expr': 'LepCut2l__ele_'+eleWP+'__mu_'+muWP,
'samples': mc + ['DATA']
}
aliases['gstarLow'] = {
'expr': 'Gen_ZGstar_mass >0 && Gen_ZGstar_mass < 4',
'samples': 'VgS'
}
aliases['gstarHigh'] = {
'expr': 'Gen_ZGstar_mass <0 || Gen_ZGstar_mass > 4',
'samples': 'VgS'
}
# Fake leptons transfer factor
aliases['fakeW'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP,
'samples': ['Fake']
}
# And variations - already divided by central values in formulas !
aliases['fakeWEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleUp',
'samples': ['Fake']
}
aliases['fakeWEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_EleDown',
'samples': ['Fake']
}
aliases['fakeWMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuUp',
'samples': ['Fake']
}
aliases['fakeWMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_MuDown',
'samples': ['Fake']
}
aliases['fakeWStatEleUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleUp',
'samples': ['Fake']
}
aliases['fakeWStatEleDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statEleDown',
'samples': ['Fake']
}
aliases['fakeWStatMuUp'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuUp',
'samples': ['Fake']
}
aliases['fakeWStatMuDown'] = {
'expr': 'fakeW2l_ele_'+eleWP+'_mu_'+muWP+'_statMuDown',
'samples': ['Fake']
}
# gen-matching to prompt only (GenLepMatch2l matches to *any* gen lepton)
aliases['PromptGenLepMatch2l'] = {
'expr': 'Alt$(Lepton_promptgenmatched[0]*Lepton_promptgenmatched[1], 0)',
'samples': mc
}
aliases['Top_pTrw'] = {
'expr': '(topGenPt * antitopGenPt > 0.) * (TMath::Sqrt(TMath::Exp(0.0615 - 0.0005 * topGenPt) * TMath::Exp(0.0615 - 0.0005 * antitopGenPt))) + (topGenPt * antitopGenPt <= 0.)',
'samples': ['top']
}
# Jet bins
# using Alt$(CleanJet_pt[n], 0) instead of Sum$(CleanJet_pt >= 30) because jet pt ordering is not strictly followed in JES-varied samples
# No jet with pt > 30 GeV
aliases['zeroJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) < 30.'
}
aliases['oneJet'] = {
'expr': 'Alt$(CleanJet_pt[0], 0) > 30.'
}
aliases['multiJet'] = {
'expr': 'Alt$(CleanJet_pt[1], 0) > 30.'
}
# B tagging
aliases['bVeto'] = {
'expr': 'Sum$(CleanJet_pt > 20. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.2217) == 0'
}
aliases['bReq'] = {
'expr': 'Sum$(CleanJet_pt > 30. && abs(CleanJet_eta) < 2.5 && Jet_btagDeepB[CleanJet_jetIdx] > 0.2217) >= 1'
}
# CR definitions
aliases['topcr'] = {
'expr': 'mtw2>30 && mll>50 && ((zeroJet && !bVeto) || bReq)'
}
aliases['dycr'] = {
'expr': 'mth<60 && mll>40 && mll<80 && bVeto'
}
aliases['wwcr'] = {
'expr': 'mth>60 && mtw2>30 && mll>100 && bVeto'
}
# SR definition
aliases['sr'] = {
'expr': 'mth>60 && mtw2>30 && bVeto'
}
# B tag scale factors
btagSFSource = '%s/src/PhysicsTools/NanoAODTools/data/btagSF/DeepCSV_2016LegacySF_V1.csv' % os.getenv('CMSSW_BASE')
aliases['Jet_btagSF_shapeFix'] = {
'linesToAdd': [
'gSystem->Load("libCondFormatsBTauObjects.so");',
'gSystem->Load("libCondToolsBTau.so");',
'gSystem->AddIncludePath("-I%s/src");' % os.getenv('CMSSW_RELEASE_BASE'),
'.L %s/patches/btagsfpatch.cc+' % configurations
],
'class': 'BtagSF',
'args': (btagSFSource,),
'samples': mc
}
aliases['bVetoSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>20 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<20 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['bReqSF'] = {
'expr': 'TMath::Exp(Sum$(TMath::Log((CleanJet_pt>30 && abs(CleanJet_eta)<2.5)*Jet_btagSF_shapeFix[CleanJet_jetIdx]+1*(CleanJet_pt<30 || abs(CleanJet_eta)>2.5))))',
'samples': mc
}
aliases['btagSF'] = {
'expr': '(bVeto || (topcr && zeroJet))*bVetoSF + (topcr && !zeroJet)*bReqSF',
'samples': mc
}
for shift in ['jes','lf','hf','lfstats1','lfstats2','hfstats1','hfstats2','cferr1','cferr2']:
aliases['Jet_btagSF_shapeFix_up_%s' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'up_' + shift),
'samples': mc
}
aliases['Jet_btagSF_shapeFix_down_%s' % shift] = {
'class': 'BtagSF',
'args': (btagSFSource, 'down_' + shift),
'samples': mc
}
for targ in ['bVeto', 'bReq']:
alias = aliases['%sSF%sup' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shapeFix', 'btagSF_shapeFix_up_%s' % shift)
alias = aliases['%sSF%sdown' % (targ, shift)] = copy.deepcopy(aliases['%sSF' % targ])
alias['expr'] = alias['expr'].replace('btagSF_shapeFix', 'btagSF_shapeFix_down_%s' % shift)
aliases['btagSF%sup' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'up'),
'samples': mc
}
aliases['btagSF%sdown' % shift] = {
'expr': aliases['btagSF']['expr'].replace('SF', 'SF' + shift + 'down'),
'samples': mc
}
# data/MC scale factors
aliases['SFweight'] = {
'expr': ' * '.join(['SFweight2l', 'LepSF2l__ele_' + eleWP + '__mu_' + muWP, 'LepWPCut', 'btagSF', 'PrefireWeight']),
'samples': mc
}
# variations
aliases['SFweightEleUp'] = {
'expr': 'LepSF2l__ele_'+eleWP+'__Up',
'samples': mc
}
aliases['SFweightEleDown'] = {
'expr': 'LepSF2l__ele_'+eleWP+'__Do',
'samples': mc
}
aliases['SFweightMuUp'] = {
'expr': 'LepSF2l__mu_'+muWP+'__Up',
'samples': mc
}
aliases['SFweightMuDown'] = {
'expr': 'LepSF2l__mu_'+muWP+'__Do',
'samples': mc
}
aliases['nllWOTF'] = {
'linesToAdd': ['.L %s/Differential/nllW.cc+' % configurations],
'class': 'WWNLLW',
'args': ('central',),
'samples': ['WW']
}
# In WpWmJJ_EWK events, partons [0] and [1] are always the decay products of the first W
aliases['lhe_mW1'] = {
'expr': 'TMath::Sqrt(2. * LHEPart_pt[0] * LHEPart_pt[1] * (TMath::CosH(LHEPart_eta[0] - LHEPart_eta[1]) - TMath::Cos(LHEPart_phi[0] - LHEPart_phi[1])))',
'samples': ['WWewk']
}
# and [2] [3] are the second W
aliases['lhe_mW2'] = {
'expr': 'TMath::Sqrt(2. * LHEPart_pt[2] * LHEPart_pt[3] * (TMath::CosH(LHEPart_eta[2] - LHEPart_eta[3]) - TMath::Cos(LHEPart_phi[2] - LHEPart_phi[3])))',
'samples': ['WWewk']
}
# use HTXS_njets30 when moving to NanoAODv5 for all trees
aliases['nCleanGenJet'] = {
'linesToAdd': ['.L %s/Differential/ngenjet.cc+' % configurations],
'class': 'CountGenJet',
'samples': signals
}
# GGHUncertaintyProducer wasn't run for 2016 nAODv5 non-private
thus = [
'ggH_mu',
'ggH_res',
'ggH_mig01',
'ggH_mig12',
'ggH_VBF2j',
'ggH_VBF3j',
'ggH_pT60',
'ggH_pT120',
'ggH_qmtop'
]
for thu in thus:
aliases[thu] = {
'linesToAdd': ['.L %s/Differential/gghuncertainty.cc+' % configurations],
'class': 'GGHUncertainty',
'args': (thu,),
'samples': ['ggH_hww'],
'nominalOnly': True
}
| [
"[email protected]"
] | |
adff771b2088a82c77b2f650a290c0117b99034f | 5eb29ce7104e10a399d9afd7e253f029bf8bc0ff | /cu_image_search/memex_tools/image_dl.py | dd8e2583167f9141fbc46ae7257f0a4980fbc490 | [
"BSD-2-Clause"
] | permissive | svebk/DeepSentiBank_memex | 69789dc09316e97aad711edeb251837a60184e7e | 4e69ce66e3a177817ff360ddc263f55c6e0b63f7 | refs/heads/master | 2021-01-18T18:55:10.870052 | 2017-10-19T22:51:29 | 2017-10-19T22:51:29 | 36,091,024 | 22 | 1 | null | 2017-02-09T20:31:20 | 2015-05-22T19:20:54 | Python | UTF-8 | Python | false | false | 4,721 | py | import os
import requests
import shutil
import time
import warnings
import numpy as np
imagedltimeout = 3
session = requests.Session()
session.trust_env = False
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def mkpath(outpath):
pos_slash=[pos for pos,c in enumerate(outpath) if c=="/"]
for pos in pos_slash:
try:
os.mkdir(outpath[:pos])
except:
pass
def dlimage_basepath(url,basepath,logf=None):
start_time = time.time()
if not url:
return None
pos_slash=[pos for pos,c in enumerate(url) if c=="/"]
#pos_point=[pos for pos,c in enumerate(url) if c=="."]
if not pos_slash:
return None
file_img=url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath=os.path.join(basepath,str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
uptomkpath_time = time.time()
#print "Downloading image from {} to {}.".format(url,outpath)
try:
#r = requests.get(url, stream=True, timeout=imagedltimeout)
# still slow with session.trust_env
# verify=False induces a InsecureRequestWarning
r = session.get(url, stream=True, timeout=imagedltimeout, verify=False)
uptorequest_time = time.time()
if r.status_code == 200:
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
uptowrite_time = time.time()
mkpath_time = uptomkpath_time - start_time
dl_time = uptorequest_time - uptomkpath_time
write_time = uptowrite_time - uptorequest_time
print("[dlimage_basepath] mkpath_time {}, dl_time {}, write_time {}".format(mkpath_time, dl_time, write_time))
return outpath
except Exception as inst:
if logf:
logf.write("Download failed for img that should be saved at {} from url {}.\n".format(outpath,url))
else:
print "Download failed for img that should be saved at {} from url {}.".format(outpath,url)
print inst
return None
def dlimage_basepath_integritycheck(url, basepath, logf=None):
import subprocess as sub
if not url:
return None
pos_slash = [pos for pos,c in enumerate(url) if c=="/"]
if not pos_slash:
return None
file_img = url[pos_slash[-1]+1:]
# path with time and random to ensure unique names
outpath = os.path.join(basepath,str(time.time())+'_'+str(np.int32(np.random.random()*(10e6)))+'_'+file_img)
mkpath(outpath)
#print "Downloading image from {} to {}.".format(url,outpath)
try:
#r = requests.get(url, stream=True, timeout=imagedltimeout)
# verify=False induces a InsecureRequestWarning
r = session.get(url, stream=True, timeout=imagedltimeout, verify=False)
if r.status_code == 200:
if int(r.headers['content-length']) == 0:
raise ValueError("Empty image.")
with open(outpath, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
# integrity check here
ok_tag = '[OK]'
error_tag = '[ERROR]'
command = 'jpeginfo -c '+ outpath
output, error = sub.Popen(command.split(' '), stdout=sub.PIPE, stderr=sub.PIPE).communicate()
if output.find(ok_tag)<0 or output.find(error_tag)>=0:
# some images are not JPEG, either PNG or even HTML...
raise ValueError("Integrity check failed, output was: {}".format(output.strip()))
return outpath
except Exception as inst:
if logf:
logf.write("[dlimage_basepath_integritycheck: error] Download failed for img that should be saved at {} from url {}. {}\n".format(outpath, url, inst))
else:
print "[dlimage_basepath_integritycheck: error] Download failed for img that should be saved at {} from url {}. {}".format(outpath, url, inst)
return None
def dlimage(url,logf=None):
return dlimage_basepath(url,'./',logf)
def dlimage_args(args):
if len(args)==2:
#print args[0],args[1]
return dlimage_basepath(args[0],args[1])
else:
print "[dl_image_args: warning] incorrect agruments: {}.".format(args)
return None
def dlimage_args_integritycheck(args):
if len(args)==2:
#print args[0],args[1]
return dlimage_basepath_integritycheck(args[0], args[1])
else:
print "[dl_image_args_integritycheck: warning] incorrect agruments: {}.".format(args)
return None
| [
"[email protected]"
] | |
5d7ac2ba25b18ff4484f8328d3f21f2d5fe93401 | 810ce1c1ac47743e253171ec7541c0e431d952c2 | /standard_library/Concurrency/Subprocess/subprocess_signal_parent_shell.py | f65410bbf08ac27c3089d736b913256dd8f8f41d | [] | no_license | hjlarry/practise-py | 91052c25dc7ab706c6234f6d657db76667a27124 | 871e06b9652d356f55e3888f1f7ea180ac2b1954 | refs/heads/master | 2022-09-11T17:47:48.557194 | 2022-08-10T02:07:24 | 2022-08-10T02:07:24 | 136,263,989 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | """
用于发送信号的 pid 与等待信号的运行 shell 脚本的子进程 id 不同,因为这个例子中有三个独立的进程在交互:
1. 主程序 subprocess_signal_parent_shell.py
2. 主程序创建的运行脚本的 shell 进程。
3. 程序signal_child.py
如果由 Popen 创建的进程产生子进程,那么子进程将不会收到任何发送给父进程的任何信号。
"""
import os
import signal
import subprocess
import tempfile
import time
import sys
print("由于父进程创建的子进程是shell,shell再创建的进程才是signal_child,signal_child无法收到信号")
script = """#!/bin/sh
echo "Shell script in process $$"
set -x
python3 signal_child.py
"""
script_file = tempfile.NamedTemporaryFile("wt")
script_file.write(script)
script_file.flush()
proc = subprocess.Popen(["sh", script_file.name])
print(f"Parent: Pausing before signal {proc.pid}")
sys.stdout.flush()
time.sleep(1)
print(f"Parent: Signaling child {proc.pid}")
sys.stdout.flush()
os.kill(proc.pid, signal.SIGUSR1)
time.sleep(3)
| [
"[email protected]"
] | |
e27f776e66186c3805e38f5fe1037c380b83a772 | 97f2f0d821ce8d12b6d03f200692721418458e4b | /ths/test/testsentimentensemble.py | 828bd399e2ea80bf67545e102de46b91a1a2fe46 | [] | no_license | manuelr417/DetectDiseaseTHS | 0851f3c2fe5caa460eacfe1fc57c790fcd43fd0a | 43ae6482a4e3009fcf0899d0a1047590c4c77f7f | refs/heads/master | 2021-04-15T08:23:43.430178 | 2020-04-13T11:46:34 | 2020-04-13T11:46:34 | 126,485,918 | 0 | 3 | null | 2018-10-17T13:32:44 | 2018-03-23T13:01:29 | Python | UTF-8 | Python | false | false | 488 | py | from ths.nn.sequences.processemsemble import ProcessTweetsWord2VecOnePassEnsemble
def main():
print("Working:")
#P = ProcessTweetsWord2VecOnePass2DCNNv2_1("data/cleantextlabels3.csv", "trained/embedding3.csv")
P = ProcessTweetsWord2VecOnePassEnsemble("data/cleantextlabels3.csv", "data/glove.6B.50d.txt")
#Bueno el model12cnnv2
P.process("trained/modelensemble6.json", "trained/modelensemble6.h5", plot=True, epochs=20)
#joderme
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
64e423abf7ebbca4e0426ebdce632030f0eb92f9 | ae87b11560c543cb678c52a28916ea2252d7aa52 | /tests/parsers/mac_appfirewall.py | af22d404ed067295db1745d695a435ad49dfadcc | [
"Apache-2.0"
] | permissive | CNR-ITTIG/plasodfaxp | 19ccf77d0be62cfa8a9b246eb6797cf64a480d80 | 923797fc00664fa9e3277781b0334d6eed5664fd | refs/heads/master | 2016-09-13T11:14:08.877399 | 2016-04-11T15:01:42 | 2016-04-11T15:01:42 | 55,975,921 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,765 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Mac AppFirewall log file parser."""
import unittest
# pylint: disable=unused-import
from plaso.formatters import mac_appfirewall as mac_appfirewall_formatter
from plaso.lib import timelib
from plaso.parsers import mac_appfirewall
from tests.parsers import test_lib
class MacAppFirewallUnitTest(test_lib.ParserTestCase):
"""Tests for Mac AppFirewall log file parser."""
def setUp(self):
"""Makes preparations before running an individual test."""
self._parser = mac_appfirewall.MacAppFirewallParser()
def testParseFile(self):
"""Test parsing of a Mac Wifi log file."""
knowledge_base_values = {u'year': 2013}
test_file = self._GetTestFilePath([u'appfirewall.log'])
event_queue_consumer = self._ParseFile(
self._parser, test_file, knowledge_base_values=knowledge_base_values)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 47)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-02 04:07:35')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'socketfilterfw[112]')
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.status, u'Error')
self.assertEqual(event_object.process_name, u'Logging')
self.assertEqual(event_object.action, u'creating /var/log/appfirewall.log')
expected_msg = (
u'Computer: DarkTemplar-2.local '
u'Agent: socketfilterfw[112] '
u'Status: Error '
u'Process name: Logging '
u'Log: creating /var/log/appfirewall.log')
expected_msg_short = (
u'Process name: Logging '
u'Status: Error')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[9]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-11-03 13:25:15')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.agent, u'socketfilterfw[87]')
self.assertEqual(event_object.computer_name, u'DarkTemplar-2.local')
self.assertEqual(event_object.status, u'Info')
self.assertEqual(event_object.process_name, u'Dropbox')
self.assertEqual(event_object.action, u'Allow TCP LISTEN (in:0 out:1)')
expected_msg = (
u'Computer: DarkTemplar-2.local '
u'Agent: socketfilterfw[87] '
u'Status: Info '
u'Process name: Dropbox '
u'Log: Allow TCP LISTEN (in:0 out:1)')
expected_msg_short = (
u'Process name: Dropbox '
u'Status: Info')
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
# Check repeated lines.
event_object = event_objects[38]
repeated_event_object = event_objects[39]
self.assertEqual(event_object.agent, repeated_event_object.agent)
self.assertEqual(
event_object.computer_name, repeated_event_object.computer_name)
self.assertEqual(event_object.status, repeated_event_object.status)
self.assertEqual(
event_object.process_name, repeated_event_object.process_name)
self.assertEqual(event_object.action, repeated_event_object.action)
# Year changes.
event_object = event_objects[45]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2013-12-31 23:59:23')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[46]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-01 01:13:23')
self.assertEqual(event_object.timestamp, expected_timestamp)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
f0148135b7890c0e3aa022d70d08522b3a367bec | c49a6e67a63a541f8d420e725af155505d1e7f84 | /Tree/unique-binary-search-trees-ii.py | edd7063cc4f05e9ecfc78755a5d57aa38199fcdf | [] | no_license | wttttt-wang/leetcode_withTopics | b41ed0f8a036fd00f3b457e5b56efe32f872ca13 | e2837f3d6c23f012148a2d1f9d0ef6d34d4e6912 | refs/heads/master | 2021-09-05T05:03:47.519344 | 2018-01-24T08:28:58 | 2018-01-24T08:28:58 | 112,893,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | """
Unique Binary Search Trees II
@ Tree + Recursion
"""
class Solution(object):
def generateTrees(self, n):
"""
:type n: int
:rtype: List[TreeNode]
"""
if n < 1:
return []
return self.helper(1, n)
def helper(self, start, end):
if start > end:
return [None]
results = []
for i in range(start, end + 1):
ls, rs = self.helper(start, i - 1), self.helper(i + 1, end)
for l in ls:
for r in rs:
root = TreeNode(i)
root.left, root.right = l, r
results.append(root)
return results
| [
"[email protected]"
] | |
7cd3bda3b5b650d556ae4214a4aabe90dc98c7c0 | 1c2111220259c76520f59be5e4aa67f32e638127 | /google/cloud/securitycenter_v1p1beta1/services/security_center/transports/base.py | 784492f2da9fc68bd8c2523cbcc1b178531e77ea | [
"Apache-2.0"
] | permissive | renovate-bot/python-securitycenter | 81ca5e96340bcf3151faa43d08e0bc74fd11a4d8 | 729ee2f7bffad25f93777d5fa44ed22ac9dd51af | refs/heads/master | 2023-06-09T19:25:22.974179 | 2021-08-27T18:44:46 | 2021-08-27T18:44:46 | 239,139,881 | 0 | 0 | Apache-2.0 | 2020-02-08T13:51:46 | 2020-02-08T13:51:46 | null | UTF-8 | Python | false | false | 23,098 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.securitycenter_v1p1beta1.types import finding
from google.cloud.securitycenter_v1p1beta1.types import finding as gcs_finding
from google.cloud.securitycenter_v1p1beta1.types import notification_config
from google.cloud.securitycenter_v1p1beta1.types import (
notification_config as gcs_notification_config,
)
from google.cloud.securitycenter_v1p1beta1.types import organization_settings
from google.cloud.securitycenter_v1p1beta1.types import (
organization_settings as gcs_organization_settings,
)
from google.cloud.securitycenter_v1p1beta1.types import (
security_marks as gcs_security_marks,
)
from google.cloud.securitycenter_v1p1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1p1beta1.types import source
from google.cloud.securitycenter_v1p1beta1.types import source as gcs_source
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-securitycenter",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class SecurityCenterTransport(abc.ABC):
"""Abstract transport class for SecurityCenter."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "securitycenter.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials is service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(
cls, host: str, scopes: Optional[Sequence[str]]
) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_source: gapic_v1.method.wrap_method(
self.create_source, default_timeout=60.0, client_info=client_info,
),
self.create_finding: gapic_v1.method.wrap_method(
self.create_finding, default_timeout=60.0, client_info=client_info,
),
self.create_notification_config: gapic_v1.method.wrap_method(
self.create_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.delete_notification_config: gapic_v1.method.wrap_method(
self.delete_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.get_iam_policy: gapic_v1.method.wrap_method(
self.get_iam_policy,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_notification_config: gapic_v1.method.wrap_method(
self.get_notification_config,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_organization_settings: gapic_v1.method.wrap_method(
self.get_organization_settings,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.get_source: gapic_v1.method.wrap_method(
self.get_source,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.group_assets: gapic_v1.method.wrap_method(
self.group_assets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.group_findings: gapic_v1.method.wrap_method(
self.group_findings,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_assets: gapic_v1.method.wrap_method(
self.list_assets,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_findings: gapic_v1.method.wrap_method(
self.list_findings,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=480.0,
),
default_timeout=480.0,
client_info=client_info,
),
self.list_notification_configs: gapic_v1.method.wrap_method(
self.list_notification_configs,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.list_sources: gapic_v1.method.wrap_method(
self.list_sources,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.run_asset_discovery: gapic_v1.method.wrap_method(
self.run_asset_discovery, default_timeout=60.0, client_info=client_info,
),
self.set_finding_state: gapic_v1.method.wrap_method(
self.set_finding_state, default_timeout=60.0, client_info=client_info,
),
self.set_iam_policy: gapic_v1.method.wrap_method(
self.set_iam_policy, default_timeout=60.0, client_info=client_info,
),
self.test_iam_permissions: gapic_v1.method.wrap_method(
self.test_iam_permissions,
default_retry=retries.Retry(
initial=0.1,
maximum=60.0,
multiplier=1.3,
predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=60.0,
),
default_timeout=60.0,
client_info=client_info,
),
self.update_finding: gapic_v1.method.wrap_method(
self.update_finding, default_timeout=60.0, client_info=client_info,
),
self.update_notification_config: gapic_v1.method.wrap_method(
self.update_notification_config,
default_timeout=60.0,
client_info=client_info,
),
self.update_organization_settings: gapic_v1.method.wrap_method(
self.update_organization_settings,
default_timeout=60.0,
client_info=client_info,
),
self.update_source: gapic_v1.method.wrap_method(
self.update_source, default_timeout=60.0, client_info=client_info,
),
self.update_security_marks: gapic_v1.method.wrap_method(
self.update_security_marks,
default_timeout=480.0,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_source(
self,
) -> Callable[
[securitycenter_service.CreateSourceRequest],
Union[gcs_source.Source, Awaitable[gcs_source.Source]],
]:
raise NotImplementedError()
@property
def create_finding(
self,
) -> Callable[
[securitycenter_service.CreateFindingRequest],
Union[gcs_finding.Finding, Awaitable[gcs_finding.Finding]],
]:
raise NotImplementedError()
@property
def create_notification_config(
self,
) -> Callable[
[securitycenter_service.CreateNotificationConfigRequest],
Union[
gcs_notification_config.NotificationConfig,
Awaitable[gcs_notification_config.NotificationConfig],
],
]:
raise NotImplementedError()
@property
def delete_notification_config(
self,
) -> Callable[
[securitycenter_service.DeleteNotificationConfigRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def get_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.GetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def get_notification_config(
self,
) -> Callable[
[securitycenter_service.GetNotificationConfigRequest],
Union[
notification_config.NotificationConfig,
Awaitable[notification_config.NotificationConfig],
],
]:
raise NotImplementedError()
@property
def get_organization_settings(
self,
) -> Callable[
[securitycenter_service.GetOrganizationSettingsRequest],
Union[
organization_settings.OrganizationSettings,
Awaitable[organization_settings.OrganizationSettings],
],
]:
raise NotImplementedError()
@property
def get_source(
self,
) -> Callable[
[securitycenter_service.GetSourceRequest],
Union[source.Source, Awaitable[source.Source]],
]:
raise NotImplementedError()
@property
def group_assets(
self,
) -> Callable[
[securitycenter_service.GroupAssetsRequest],
Union[
securitycenter_service.GroupAssetsResponse,
Awaitable[securitycenter_service.GroupAssetsResponse],
],
]:
raise NotImplementedError()
@property
def group_findings(
self,
) -> Callable[
[securitycenter_service.GroupFindingsRequest],
Union[
securitycenter_service.GroupFindingsResponse,
Awaitable[securitycenter_service.GroupFindingsResponse],
],
]:
raise NotImplementedError()
@property
def list_assets(
self,
) -> Callable[
[securitycenter_service.ListAssetsRequest],
Union[
securitycenter_service.ListAssetsResponse,
Awaitable[securitycenter_service.ListAssetsResponse],
],
]:
raise NotImplementedError()
@property
def list_findings(
self,
) -> Callable[
[securitycenter_service.ListFindingsRequest],
Union[
securitycenter_service.ListFindingsResponse,
Awaitable[securitycenter_service.ListFindingsResponse],
],
]:
raise NotImplementedError()
@property
def list_notification_configs(
self,
) -> Callable[
[securitycenter_service.ListNotificationConfigsRequest],
Union[
securitycenter_service.ListNotificationConfigsResponse,
Awaitable[securitycenter_service.ListNotificationConfigsResponse],
],
]:
raise NotImplementedError()
@property
def list_sources(
self,
) -> Callable[
[securitycenter_service.ListSourcesRequest],
Union[
securitycenter_service.ListSourcesResponse,
Awaitable[securitycenter_service.ListSourcesResponse],
],
]:
raise NotImplementedError()
@property
def run_asset_discovery(
self,
) -> Callable[
[securitycenter_service.RunAssetDiscoveryRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def set_finding_state(
self,
) -> Callable[
[securitycenter_service.SetFindingStateRequest],
Union[finding.Finding, Awaitable[finding.Finding]],
]:
raise NotImplementedError()
@property
def set_iam_policy(
self,
) -> Callable[
[iam_policy_pb2.SetIamPolicyRequest],
Union[policy_pb2.Policy, Awaitable[policy_pb2.Policy]],
]:
raise NotImplementedError()
@property
def test_iam_permissions(
self,
) -> Callable[
[iam_policy_pb2.TestIamPermissionsRequest],
Union[
iam_policy_pb2.TestIamPermissionsResponse,
Awaitable[iam_policy_pb2.TestIamPermissionsResponse],
],
]:
raise NotImplementedError()
@property
def update_finding(
self,
) -> Callable[
[securitycenter_service.UpdateFindingRequest],
Union[gcs_finding.Finding, Awaitable[gcs_finding.Finding]],
]:
raise NotImplementedError()
@property
def update_notification_config(
self,
) -> Callable[
[securitycenter_service.UpdateNotificationConfigRequest],
Union[
gcs_notification_config.NotificationConfig,
Awaitable[gcs_notification_config.NotificationConfig],
],
]:
raise NotImplementedError()
@property
def update_organization_settings(
self,
) -> Callable[
[securitycenter_service.UpdateOrganizationSettingsRequest],
Union[
gcs_organization_settings.OrganizationSettings,
Awaitable[gcs_organization_settings.OrganizationSettings],
],
]:
raise NotImplementedError()
@property
def update_source(
self,
) -> Callable[
[securitycenter_service.UpdateSourceRequest],
Union[gcs_source.Source, Awaitable[gcs_source.Source]],
]:
raise NotImplementedError()
@property
def update_security_marks(
self,
) -> Callable[
[securitycenter_service.UpdateSecurityMarksRequest],
Union[
gcs_security_marks.SecurityMarks,
Awaitable[gcs_security_marks.SecurityMarks],
],
]:
raise NotImplementedError()
__all__ = ("SecurityCenterTransport",)
| [
"[email protected]"
] | |
1b45af41c9bb4a3ddf55e8aac6b235e7d8843cac | f5d2a1459c81eb23a745bd63f41ef980c41ea0a4 | /ZG-PhaseFour/code/controller/diffcontroller.py | 22365e714fd1d2fa625911ff9f88462b4fcaa379 | [] | no_license | ErBingBing/django-tonado-crawler | 6800bb0269e99e2454fb0a9079175ffe9d4d0a0b | db31b4cdf7ecc509f1a87aa325621943df825e98 | refs/heads/master | 2021-08-22T11:30:08.419583 | 2017-11-30T04:04:40 | 2017-11-30T04:04:40 | 112,562,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,508 | py | # -*- coding: utf-8 -*-
###################################################################################################
# @file: diffcontroller.py
# @author: Sun Xinghua
# @date: 2016/11/21 0:15
# @version: Ver0.0.0.100
# @note:
###################################################################################################
from configuration import constant
from configuration.environment.configure import SpiderConfigure
from dao.spiderdao import SpiderDao
from log.spiderlog import Logger
################################################################################################################
# @class:DiffController
# @author:Sun Xinghua
# @date:2016/11/21 9:44
# @note:
################################################################################################################
from utility import const
from utility.fileutil import FileUtility
from utility.timeutility import TimeUtility
class DiffController:
DIFF_FILE_NAME_FORMAT = '{suffix}_{ts}_diff.txt'
###################################################################################################
# @functions:__init__
# @param: none
# @return:none
# @note:初始化内部变量
###################################################################################################
def __init__(self):
self.database = SpiderDao()
suffix = SpiderConfigure.getconfig(const.SPIDER_STORAGE_DOMAIN,
const.SPIDER_OUTPUT_FILENAME_SUFFIX)
ts = TimeUtility.getcurrentdate(TimeUtility.TIMESTAMP_FORMAT)
self.difffile = '{path}/{dt}/{file}'.format(
path=SpiderConfigure.getinstance().getconfig(const.SPIDER_STORAGE_DOMAIN, const.SPIDER_OUTPUT_PATH),
dt=TimeUtility.getcurrentdate(),
file=DiffController.DIFF_FILE_NAME_FORMAT.format(suffix=suffix, ts=ts))
###################################################################################################
# @functions:printdetail
# @param: none
# @return:none
# @note:输出差分信息到日志
###################################################################################################
def show(self):
diffinfolist = {}
predict = self.database.getall()
instances = URLStorage.getinstances()
Logger.getlogging().info(
'##############################################################################################')
Logger.getlogging().info('%8s|%8s|%8s|%8s|%8s|%8s|%8s|%20s|%16s' %
('key',
'flag',
'cmtnum',
'clicknum',
'votenum',
'fansnum',
'realnum',
'pubtime',
'timestamp'))
for ins in instances.keys():
diffinfolist[ins] = DiffInfomation()
if ins != constant.SPIDER_CHANNEL_S1:
diffinfolist[ins].channel = constant.SPIDER_CHANNEL_S2
diffinfolist[ins].query = ins
for key in instances[ins].urlinfodict:
if instances[ins].urlinfodict[key].realnum > 0:
StatisticsManager.updategotcomments(1)
elif instances[ins].urlinfodict[key].cmtnum > 0:
StatisticsManager.updatefailgotcomment(1)
if predict and key in predict:
info = URLCommentInfo.fromstring(predict[key])
if not instances[ins].urlinfodict[key].isequal(info):
self.printinfo(ins, info, '-')
self.printinfo(ins, instances[ins].urlinfodict[key], '+')
if instances[ins].urlinfodict[key].cmtnum > 0:
diffinfolist[ins].deltacmt += self.diff(instances[ins].urlinfodict[key].cmtnum, info.cmtnum)
else:
diffinfolist[ins].deltacmt += self.diff(instances[ins].urlinfodict[key].realnum,
info.realnum)
diffinfolist[ins].deltaclick += self.diff(instances[ins].urlinfodict[key].clicknum,
info.clicknum)
diffinfolist[ins].deltavote += self.diff(instances[ins].urlinfodict[key].votenum, info.votenum)
diffinfolist[ins].deltafans += self.diff(instances[ins].urlinfodict[key].fansnum, info.fansnum)
else:
self.printinfo(ins, instances[ins].urlinfodict[key], '+')
if instances[ins].urlinfodict[key].cmtnum > 0:
diffinfolist[ins].deltacmt += instances[ins].urlinfodict[key].cmtnum
else:
diffinfolist[ins].deltacmt += max(0, instances[ins].urlinfodict[key].realnum)
diffinfolist[ins].deltaclick += max(0, instances[ins].urlinfodict[key].clicknum)
diffinfolist[ins].deltavote += max(0, instances[ins].urlinfodict[key].votenum)
diffinfolist[ins].deltafans += max(0, instances[ins].urlinfodict[key].fansnum)
Logger.getlogging().info(
'##############################################################################################')
if FileUtility.exists(self.difffile):
FileUtility.remove(self.difffile)
for key in diffinfolist.keys():
Logger.getlogging().info(diffinfolist[key].tostring())
FileUtility.writeline(self.difffile, diffinfolist[key].tostring())
###################################################################################################
# @functions:printinfo
# @param: info 信息
# @param: flag 添加为+ 删除为-
# @return:none
# @note:输出差分信息到日志
###################################################################################################
def printinfo(self, key, info, flag):
Logger.getlogging().info('%8s|%8s|%8s|%8s|%8s|%8s|%8s|%20s|%16s' %
(key,
flag,
str(info.cmtnum if info.cmtnum > 0 else info.realnum),
str(info.clicknum),
str(info.votenum),
str(info.fansnum),
str(info.realnum),
str(info.pubtime),
str(info.timestamp)))
def diff(self, x, y):
delta = max(0, x) - max(0, y)
return max(0, delta)
class DiffInfomation:
STRING_FORMAT = '{channel}\t{query}\t{cmtnum}\t{clicknum}\t{votenum}\t{fansnum}'
def __init__(self):
self.channel = constant.SPIDER_CHANNEL_S1
self.query = ''
self.deltacmt = 0
self.deltaclick = 0
self.deltavote = 0
self.deltafans = 0
def tostring(self):
return DiffInfomation.STRING_FORMAT.format(channel=self.channel, query=self.query, cmtnum=self.deltacmt,
clicknum=self.deltaclick, votenum=self.deltavote,
fansnum=self.deltafans)
| [
"[email protected]"
] | |
81b0af19642bc53232aa4eb85eae5f78ac7a6495 | 4e8ac215b672b333f19da87787c0d8768fee439e | /MIDI Remote Scripts/ableton/v2/control_surface/components/drum_group.py | 3a23650ce45ebcd2d6372554cd2f9e072ac3a329 | [
"MIT"
] | permissive | aarkwright/ableton_devices | 593f47293c673aa56f6e0347ca6444b7fce2812a | fe5df3bbd64ccbc136bba722ba1e131a02969798 | refs/heads/master | 2020-07-02T08:11:21.137438 | 2019-08-09T13:48:06 | 2019-08-09T13:48:06 | 201,467,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,824 | py | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\ableton\v2\control_surface\components\drum_group.py
# Compiled at: 2018-11-30 15:48:12
from __future__ import absolute_import, print_function, unicode_literals
from itertools import imap
from ...base import depends, find_if, first, clamp, listenable_property, listens_group, listens, liveobj_changed, liveobj_valid
from ..control import ButtonControl
from .slide import SlideComponent, Slideable
from .playable import PlayableComponent
BASE_DRUM_RACK_NOTE = 36
class DrumGroupComponent(PlayableComponent, SlideComponent, Slideable):
mute_button = ButtonControl()
solo_button = ButtonControl()
delete_button = ButtonControl()
quantize_button = ButtonControl()
@depends(set_pad_translations=None)
def __init__(self, translation_channel=None, set_pad_translations=None, *a, **k):
self._drum_group_device = None
self._selected_drum_pad = None
self._all_drum_pads = []
self._assigned_drum_pads = []
self._translation_channel = translation_channel
super(DrumGroupComponent, self).__init__(*a, **k)
self._set_pad_translations = set_pad_translations
return
position_count = 32
page_length = 4
page_offset = 1
def contents_range(self, pmin, pmax):
pos_count = self.position_count
first_pos = max(int(pmin - 0.05), 0)
last_pos = min(int(pmax + 0.2), pos_count)
return xrange(first_pos, last_pos)
def contents(self, index):
drum = self._drum_group_device
if liveobj_valid(drum):
return any(imap(lambda pad: pad.chains, drum.drum_pads[index * 4:index * 4 + 4]))
return False
@property
def position(self):
if liveobj_valid(self._drum_group_device):
return self._drum_group_device.view.drum_pads_scroll_position
return 0
@position.setter
def position(self, index):
assert 0 <= index <= 28
if liveobj_valid(self._drum_group_device):
self._drum_group_device.view.drum_pads_scroll_position = index
@property
def assigned_drum_pads(self):
return self._assigned_drum_pads
@property
def min_pitch(self):
if self.assigned_drum_pads:
return self.assigned_drum_pads[0].note
return BASE_DRUM_RACK_NOTE
@property
def max_pitch(self):
if self.assigned_drum_pads:
return self.assigned_drum_pads[(-1)].note
return BASE_DRUM_RACK_NOTE
def _update_assigned_drum_pads(self):
assigned_drum_pads = []
visible_drum_pads = self._drum_group_device.visible_drum_pads if liveobj_valid(self._drum_group_device) else None
if visible_drum_pads and self._all_drum_pads:
first_pad = first(visible_drum_pads)
if first_pad:
size = self.width * self.height
first_note = first_pad.note
if first_note > 128 - size:
size = 128 - first_note
offset = clamp(first_note, 0, 128 - len(visible_drum_pads))
assigned_drum_pads = self._all_drum_pads[offset:offset + size]
self._assigned_drum_pads = assigned_drum_pads
return
def set_matrix(self, matrix):
super(DrumGroupComponent, self).set_matrix(matrix)
self._update_assigned_drum_pads()
self._create_and_set_pad_translations()
def set_drum_group_device(self, drum_group_device):
if drum_group_device and not drum_group_device.can_have_drum_pads:
drum_group_device = None
if drum_group_device != self._drum_group_device:
self.__on_visible_drum_pads_changed.subject = drum_group_device
drum_group_view = drum_group_device.view if drum_group_device else None
self.__on_selected_drum_pad_changed.subject = drum_group_view
self.__on_drum_pads_scroll_position_changed.subject = drum_group_view
self._drum_group_device = drum_group_device
self._update_drum_pad_listeners()
self._update_selected_drum_pad()
self._update_note_translations()
super(DrumGroupComponent, self).update()
return
def update(self):
super(DrumGroupComponent, self).update()
if self.is_enabled():
self.notify_position()
def _update_drum_pad_listeners(self):
"""
add and remove listeners for visible drum pads, including
mute and solo state
"""
if liveobj_valid(self._drum_group_device):
self._all_drum_pads = self._drum_group_device.drum_pads
visible_drum_pads = self._drum_group_device.visible_drum_pads
self.__on_solo_changed.replace_subjects(visible_drum_pads)
self.__on_mute_changed.replace_subjects(visible_drum_pads)
self._update_assigned_drum_pads()
self._update_note_translations()
@listens_group(b'solo')
def __on_solo_changed(self, pad):
self._update_led_feedback()
@listens_group(b'mute')
def __on_mute_changed(self, pad):
self._update_led_feedback()
def _update_led_feedback(self):
if liveobj_valid(self._drum_group_device):
super(DrumGroupComponent, self)._update_led_feedback()
def _update_button_color(self, button):
pad = self._pad_for_button(button)
button.color = self._color_for_pad(pad) if pad else b'DefaultButton.On'
def _color_for_pad(self, pad):
has_soloed_pads = bool(find_if(lambda pad: pad.solo, self._all_drum_pads))
button_color = b'DrumGroup.PadEmpty'
if pad == self._selected_drum_pad:
button_color = b'DrumGroup.PadSelected'
if has_soloed_pads and not pad.solo and not pad.mute:
button_color = b'DrumGroup.PadSelectedNotSoloed'
elif pad.mute and not pad.solo:
button_color = b'DrumGroup.PadMutedSelected'
elif has_soloed_pads and pad.solo:
button_color = b'DrumGroup.PadSoloedSelected'
elif pad.chains:
button_color = b'DrumGroup.PadFilled'
if has_soloed_pads and not pad.solo:
button_color = b'DrumGroup.PadFilled' if not pad.mute else b'DrumGroup.PadMuted'
elif not has_soloed_pads and pad.mute:
button_color = b'DrumGroup.PadMuted'
elif has_soloed_pads and pad.solo:
button_color = b'DrumGroup.PadSoloed'
return button_color
def _button_coordinates_to_pad_index(self, first_note, coordinates):
y, x = coordinates
y = self.height - y - 1
if x < 4 and y >= 4:
first_note += 16
elif x >= 4 and y < 4:
first_note += 4 * self.width
elif x >= 4 and y >= 4:
first_note += 4 * self.width + 16
index = x % 4 + y % 4 * 4 + first_note
return index
def _on_matrix_pressed(self, button):
selected_drum_pad = self._pad_for_button(button)
if self.mute_button.is_pressed:
selected_drum_pad.mute = not selected_drum_pad.mute
if self.solo_button.is_pressed:
selected_drum_pad.solo = not selected_drum_pad.solo
if self.quantize_button.is_pressed:
button.color = b'DrumGroup.PadAction'
self.quantize_pitch(selected_drum_pad.note)
if self.delete_button.is_pressed:
button.color = b'DrumGroup.PadAction'
self.delete_pitch(selected_drum_pad)
if self.select_button.is_pressed:
self._drum_group_device.view.selected_drum_pad = selected_drum_pad
self.select_drum_pad(selected_drum_pad)
super(DrumGroupComponent, self)._on_matrix_pressed(button)
if self.mute_button.is_pressed or self.solo_button.is_pressed:
self._update_led_feedback()
@listens(b'visible_drum_pads')
def __on_visible_drum_pads_changed(self):
self._update_drum_pad_listeners()
self._update_led_feedback()
@listens(b'drum_pads_scroll_position')
def __on_drum_pads_scroll_position_changed(self):
self._update_note_translations()
self._update_led_feedback()
self.notify_position()
@listens(b'selected_drum_pad')
def __on_selected_drum_pad_changed(self):
self._update_selected_drum_pad()
def _update_selected_drum_pad(self):
selected_drum_pad = self._drum_group_device.view.selected_drum_pad if liveobj_valid(self._drum_group_device) else None
if liveobj_changed(self._selected_drum_pad, selected_drum_pad):
self._selected_drum_pad = selected_drum_pad
self._update_led_feedback()
self._on_selected_drum_pad_changed()
return
def _on_selected_drum_pad_changed(self):
pass
@mute_button.value
def mute_button(self, value, button):
self._set_control_pads_from_script(bool(value))
@solo_button.value
def solo_button(self, value, button):
self._set_control_pads_from_script(bool(value))
@delete_button.value
def delete_button(self, value, button):
self._set_control_pads_from_script(bool(value))
@quantize_button.value
def quantize_button(self, value, button):
self._set_control_pads_from_script(bool(value))
@property
def has_assigned_pads(self):
return self._assigned_drum_pads and liveobj_valid(first(self._assigned_drum_pads))
def _pad_for_button(self, button):
if self.has_assigned_pads:
index = self._button_coordinates_to_pad_index(first(self._assigned_drum_pads).note, button.coordinate)
if index < 128:
return self._all_drum_pads[index]
return
return
def _note_translation_for_button(self, button):
identifier = None
channel = None
if self.has_assigned_pads:
identifier = self._button_coordinates_to_pad_index(first(self._assigned_drum_pads).note, button.coordinate)
channel = self._translation_channel
return (
identifier, channel)
def _update_note_translations(self):
if self._assigned_drum_pads:
if not self._can_set_pad_translations():
super(DrumGroupComponent, self)._update_note_translations()
def _can_set_pad_translations(self):
return self.width <= 4 and self.height <= 4
def _create_and_set_pad_translations(self):
def create_translation_entry(button):
row, col = button.coordinate
return (
col, row, button.identifier, button.channel)
if self._can_set_pad_translations():
translations = []
for button in self.matrix:
button.channel = self._translation_channel
button.identifier = self._button_coordinates_to_pad_index(BASE_DRUM_RACK_NOTE, button.coordinate)
button.enabled = True
translations.append(create_translation_entry(button))
self._set_pad_translations(tuple(translations))
else:
self._update_note_translations()
self._set_pad_translations(None)
return
def select_drum_pad(self, drum_pad):
""" Override when you give it a select button """
raise NotImplementedError
def quantize_pitch(self, note):
""" Override when you give it a quantize button """
raise NotImplementedError
def delete_pitch(self, drum_pad):
""" Override when you give it a delete button """
raise NotImplementedError | [
"[email protected]"
] | |
8a1cc7180086b8e03033515e70b945d413b517ef | 7fe5f16fe49e71926c1dfc3a3b41e28741176f06 | /example.py | 4608fef3c68d41da1e74e4d68aeba516f6aac7ee | [] | no_license | codesharedot/augur-price | 5b7b315fed28a042bb32e0bf5059e96a263bf6f5 | 2fb9e29ba3eab108e09b5d95c5f390bedfd89530 | refs/heads/master | 2020-07-27T04:09:20.915412 | 2020-03-05T17:50:04 | 2020-03-05T17:50:04 | 208,862,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 601 | py | import requests
import json
from forex_python.converter import CurrencyRates
import os
c = CurrencyRates()
rate = c.get_rate('USD', 'EUR')
print(rate)
augur_api_url = 'https://api.coinmarketcap.com/v1/ticker/augur/'
response = requests.get(augur_api_url)
response_json = response.json()
print(response_json)
for coin in response.json():
price = coin.get("price_usd", "U$S Price not provided")
coin_price = float(("{0:.2f}").format(float(price)))
print("$ " + str(coin_price))
coin_price_eur = float(("{0:.2f}").format(float(price)*rate))
print("€ " + str(coin_price_eur))
| [
"[email protected]"
] | |
bc290340823ce97833d91f4123951f04075608e3 | a84e1a1aac96612b32ba5adcc49a4005c0c5129e | /tensorflow_probability/python/experimental/mcmc/__init__.py | 9bebbe5296b9f126968b664c8cafa86a5e6c0a37 | [
"Apache-2.0"
] | permissive | jedisom/probability | 4fc31473d691d242a3e88c179ae3a9c555a29bb6 | 6791e7ce1c2b0a9057a19a8ea697aeaf796d4da7 | refs/heads/master | 2022-04-23T00:21:46.097126 | 2020-04-22T20:03:04 | 2020-04-22T20:04:59 | 258,031,151 | 1 | 0 | Apache-2.0 | 2020-04-22T22:08:57 | 2020-04-22T22:08:56 | null | UTF-8 | Python | false | false | 3,073 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability experimental NUTS package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.mcmc.elliptical_slice_sampler import EllipticalSliceSampler
from tensorflow_probability.python.experimental.mcmc.nuts import NoUTurnSampler
from tensorflow_probability.python.experimental.mcmc.particle_filter import ess_below_threshold
from tensorflow_probability.python.experimental.mcmc.particle_filter import infer_trajectories
from tensorflow_probability.python.experimental.mcmc.particle_filter import particle_filter
from tensorflow_probability.python.experimental.mcmc.particle_filter import reconstruct_trajectories
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_deterministic_minimum_error
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_independent
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_minimum_variance
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import default_make_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import gen_make_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import gen_make_transform_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import make_rwmh_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import sample_sequential_monte_carlo
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import simple_heuristic_tuning
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'EllipticalSliceSampler',
'NoUTurnSampler',
'ess_below_threshold',
'infer_trajectories',
'default_make_hmc_kernel_fn',
'gen_make_hmc_kernel_fn',
'gen_make_transform_hmc_kernel_fn',
'make_rwmh_kernel_fn',
'particle_filter',
'sample_sequential_monte_carlo',
'simple_heuristic_tuning',
'reconstruct_trajectories',
'resample_independent',
'resample_minimum_variance',
'resample_deterministic_minimum_error',
]
remove_undocumented(__name__, _allowed_symbols)
| [
"[email protected]"
] | |
492e20fa5f9a33cc62fcd94e23aae05134077702 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/mixedreality/list_object_anchors_account_keys.py | 477a8c32f1615fa9c02298fcbe21bb5a88e16df1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 2,818 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ListObjectAnchorsAccountKeysResult',
'AwaitableListObjectAnchorsAccountKeysResult',
'list_object_anchors_account_keys',
]
@pulumi.output_type
class ListObjectAnchorsAccountKeysResult:
"""
Developer Keys of account
"""
def __init__(__self__, primary_key=None, secondary_key=None):
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
value of primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
value of secondary key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListObjectAnchorsAccountKeysResult(ListObjectAnchorsAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListObjectAnchorsAccountKeysResult(
primary_key=self.primary_key,
secondary_key=self.secondary_key)
def list_object_anchors_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListObjectAnchorsAccountKeysResult:
"""
Developer Keys of account
API Version: 2021-03-01-preview.
:param str account_name: Name of an Mixed Reality Account.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:mixedreality:listObjectAnchorsAccountKeys', __args__, opts=opts, typ=ListObjectAnchorsAccountKeysResult).value
return AwaitableListObjectAnchorsAccountKeysResult(
primary_key=__ret__.primary_key,
secondary_key=__ret__.secondary_key)
| [
"[email protected]"
] | |
55579935208423de76144450d6a979bb0a66cb9c | f856c993a34fa2fbb228369dd267909445fa69b5 | /vel/augmentations/to_tensor.py | d97ac0fb52984932b782019a024aec8100f5995f | [
"MIT"
] | permissive | cclauss/vel | 06fabeb75925ac2509162f12ac82fff3b8291720 | 78a6a20af80ff613898d2983c83fdb223634aaad | refs/heads/master | 2020-04-01T03:46:50.339279 | 2018-10-09T05:36:21 | 2018-10-09T05:36:21 | 152,836,186 | 0 | 0 | MIT | 2018-10-13T04:48:44 | 2018-10-13T04:48:44 | null | UTF-8 | Python | false | false | 436 | py | import numpy as np
import torchvision.transforms.functional as F
import vel.api.data as data
class ToTensor(data.Augmentation):
def __init__(self, mode='x', tags=None):
super().__init__(mode, tags)
def __call__(self, datum):
return F.to_tensor(datum)
def denormalize(self, datum):
return np.transpose(datum.numpy(), (1, 2, 0))
def create(mode='x', tags=None):
return ToTensor(mode, tags)
| [
"[email protected]"
] | |
41cc274eb12a46f98a11e97f115641445f2a7322 | d0bdf444c71b724ecfd59b5bc6850962c56494cb | /labs/03-apply_vis/tests/q1_3.py | f28f5962b8a4bbc4dfe837cb9f86d0772094554c | [] | no_license | ucsd-ets/dsc10-su20-public | 10e3d0ff452b337f222baee330fe60d1465b0071 | 38787e6cc3e6210b4cc8a46350e5120845971c9f | refs/heads/master | 2022-12-13T23:28:20.512649 | 2020-09-03T19:28:06 | 2020-09-03T19:28:06 | 275,905,339 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 957 | py | test = {
'hidden': False,
'name': '1.3',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # Your answer should be a number
>>> type(mark_hurd_pay) != str
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Don't forget to give your answer in dollars, not millions of
>>> # Dollars!
>>> mark_hurd_pay != 5325
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Don't forget to give your answer in dollars, not millions of
>>> # Dollars!
>>> mark_hurd_pay == 53250000
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
| [
"[email protected]"
] | |
5f1652113a026b33730dd0979a69e1a786d7a16f | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba2842.pngMap.py | 2abe96fbd929a7d98d47428252e3966ff592519d | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba2842.pngMap = [
'11111111111111111111111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000101111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000000000011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000000000000001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111110000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000000011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111110000011111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111011111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111101111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100011111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111000001000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111100000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000011111011111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000000000000000010111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000000000000000011111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110100000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111010000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000000010111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000010010111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000011000001011111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000001101111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110010000000000000000000011111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110100000000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000010011001111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111110000000000001010000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000000000111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000110111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111110000000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111100000000000000000111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111000000000000000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111010000000000000000000000111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111000000000011000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111100000100000000001111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111000100000000000000001011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111110000000000000000000000000000001111111111111111111111111111111111111111111111111111',
]
| [
"[email protected]"
] | |
0f66442a96c3559f8d552b7803a53026e3a5c9f9 | a9cac1a83b74a42b908b1913bd087d14de2f4a11 | /test/git/test_invocations.py | ed17c44d455670a377cf5538adcaa108a32ebd67 | [
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] | permissive | justusschock/python-remote-versioneer | 0ff76cf416a3fc79f9f89592a112d5b4152bddba | 69cda6a931a71772047404705822784dd2f59fcd | refs/heads/master | 2020-06-29T06:36:18.730253 | 2019-08-04T08:14:51 | 2019-08-04T08:14:51 | 200,464,396 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,972 | py | from __future__ import print_function
import os, sys, shutil, unittest, tempfile, tarfile, virtualenv, warnings
sys.path.insert(0, "src")
from from_file import versions_from_file
import common
pyver_major = "py%d" % sys.version_info[0]
pyver = "py%d.%d" % sys.version_info[:2]
if not hasattr(unittest, "skip"): # py26
def _skip(reason):
def _skip_decorator(f):
def _skipped_test(*args, **kwargs):
print("skipping: %s" % reason)
return _skipped_test
return _skip_decorator
unittest.skip = _skip
if not hasattr(unittest, "expectedFailure"):
def _expectedFailure(reason="expected to fail"):
def _ef_decorator(f):
def _ef(*args, **kwargs):
print("skipping: %s" % reason)
return _ef
return _ef_decorator
unittest.expectedFailure = _expectedFailure
class _Invocations(common.Common):
def setUp(self):
if False:
# when debugging, put the generated files in a predictable place
self.testdir = os.path.abspath("t")
if os.path.exists(self.testdir):
return
os.mkdir(self.testdir)
else:
self.testdir = tempfile.mkdtemp()
os.mkdir(self.subpath("cache"))
os.mkdir(self.subpath("cache", "distutils"))
os.mkdir(self.subpath("cache", "setuptools"))
self.gitdir = None
self.projdir = None
def make_venv(self, mode):
if not os.path.exists(self.subpath("venvs")):
os.mkdir(self.subpath("venvs"))
venv_dir = self.subpath("venvs/%s" % mode)
# python3 on OS-X uses a funky two-part executable and an environment
# variable to communicate between them. If this variable is still set
# by the time a virtualenv's 'pip' or 'python' is run, and if that
# command spawns another sys.executable underneath it, that second
# child may use the wrong python, and can install things into the
# real system library instead of the virtualenv. Invoking
# virtualenv.create_environment() clears this as a side-effect, but
# to make things safe I'll just clear this now. See
# https://github.com/pypa/virtualenv/issues/322 and
# https://bugs.python.org/issue22490 for some hints. I tried
# switching to 'venv' on py3, but only py3.4 includes pip, and even
# then it's an ancient version.
os.environ.pop("__PYVENV_LAUNCHER__", None)
virtualenv.logger = virtualenv.Logger([]) # hush
# virtualenv causes DeprecationWarning/ResourceWarning on py3
with warnings.catch_warnings():
warnings.simplefilter("ignore")
virtualenv.create_environment(venv_dir)
return venv_dir
def run_in_venv(self, venv, workdir, command, *args):
bins = {"python": os.path.join(venv, "bin", "python"),
"pip": os.path.join(venv, "bin", "pip"),
"rundemo": os.path.join(venv, "bin", "rundemo"),
"easy_install": os.path.join(venv, "bin", "easy_install"),
}
if command == "pip":
args = ["--no-cache-dir"] + list(args)
maj, min = sys.version_info[0:2]
if ((maj == 2 and min >= 7) or
(maj == 3 and min >= 4) or
maj > 3):
# We prefer pip --isolated, but py2.6/py3.2/py3.3 (at least
# on travis) can't handle the --no-user-cfg that it uses
args = ["--isolated"] + list(args)
return self.command(bins[command], *args, workdir=workdir)
def check_in_venv(self, venv):
out = self.run_in_venv(venv, venv, "rundemo")
v = dict([line.split(":", 1) for line in out.splitlines()])
self.assertEqual(v["version"], "2.0")
return v
def check_in_venv_withlib(self, venv):
v = self.check_in_venv(venv)
self.assertEqual(v["demolib"], "1.0")
# "demolib" has a version of 1.0 and is built with distutils
# "demoapp2-distutils" is v2.0, uses distutils, and has no deps
# "demoapp2-setuptools" is v2.0, uses setuptools, and depends on demolib
# repos and unpacked git-archive tarballs come in two flavors: normal (in
# which the setup.py/setup.cfg/versioneer.py files live in the root of
# the source tree), and "subproject" (where they live in a subdirectory).
# sdists are always "normal" (although they might have come from either
# normal or subproject -style source trees), and wheels/eggs don't have
# these files at all.
# TODO: git-archive subproject-flavor
def make_demolib_sdist(self):
# create an sdist of demolib-1.0 . for the *lib*, we only use the
# tarball, never the repo.
demolib_sdist = self.subpath("cache", "demolib-1.0.tar")
if os.path.exists(demolib_sdist):
return demolib_sdist
libdir = self.subpath("build-demolib")
shutil.copytree("test/demolib", libdir)
shutil.copy("versioneer.py", libdir)
self.git("init", workdir=libdir)
self.python("versioneer.py", "setup", workdir=libdir)
self.git("add", "--all", workdir=libdir)
self.git("commit", "-m", "comment", workdir=libdir)
self.git("tag", "demolib-1.0", workdir=libdir)
self.python("setup.py", "sdist", "--format=tar", workdir=libdir)
created = os.path.join(libdir, "dist", "demolib-1.0.tar")
self.assertTrue(os.path.exists(created))
shutil.copyfile(created, demolib_sdist)
return demolib_sdist
def make_linkdir(self):
# create/populate a fake pypi directory for use with --find-links
linkdir = self.subpath("linkdir")
if os.path.exists(linkdir):
return linkdir
os.mkdir(linkdir)
demolib_sdist = self.make_demolib_sdist()
shutil.copy(demolib_sdist, linkdir)
return linkdir
def make_empty_indexdir(self):
indexdir = self.subpath("indexdir")
if os.path.exists(indexdir):
return indexdir
os.mkdir(indexdir)
return indexdir
def make_distutils_repo(self):
# create a clean repo of demoapp2-distutils at 2.0
repodir = self.subpath("demoapp2-distutils-repo")
if os.path.exists(repodir):
shutil.rmtree(repodir)
shutil.copytree("test/demoapp2-distutils", repodir)
shutil.copy("versioneer.py", repodir)
self.git("init", workdir=repodir)
self.python("versioneer.py", "setup", workdir=repodir)
self.git("add", "--all", workdir=repodir)
self.git("commit", "-m", "comment", workdir=repodir)
self.git("tag", "demoapp2-2.0", workdir=repodir)
return repodir
def make_distutils_repo_subproject(self):
# create a clean repo of demoapp2-distutils at 2.0
repodir = self.subpath("demoapp2-distutils-repo-subproject")
if os.path.exists(repodir):
shutil.rmtree(repodir)
shutil.copytree("test/demoapp2-distutils-subproject", repodir)
projectdir = os.path.join(repodir, "subproject")
shutil.copy("versioneer.py", projectdir)
self.git("init", workdir=repodir)
self.python("versioneer.py", "setup", workdir=projectdir)
self.git("add", "--all", workdir=repodir)
self.git("commit", "-m", "comment", workdir=repodir)
self.git("tag", "demoapp2-2.0", workdir=repodir)
return projectdir
def make_distutils_wheel_with_pip(self):
# create an wheel of demoapp2-distutils at 2.0
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
demoapp2_distutils_wheel = self.subpath("cache", "distutils", wheelname)
if os.path.exists(demoapp2_distutils_wheel):
return demoapp2_distutils_wheel
repodir = self.make_distutils_repo()
venv = self.make_venv("make-distutils-wheel-with-pip")
self.run_in_venv(venv, repodir,
"pip", "wheel", "--wheel-dir", "wheelhouse",
"--no-index",# "--find-links", linkdir,
".")
created = os.path.join(repodir, "wheelhouse", wheelname)
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_distutils_wheel)
return demoapp2_distutils_wheel
def make_distutils_sdist(self):
# create an sdist tarball of demoapp2-distutils at 2.0
demoapp2_distutils_sdist = self.subpath("cache", "distutils",
"demoapp2-2.0.tar")
if os.path.exists(demoapp2_distutils_sdist):
return demoapp2_distutils_sdist
repodir = self.make_distutils_repo()
self.python("setup.py", "sdist", "--format=tar", workdir=repodir)
created = os.path.join(repodir, "dist", "demoapp2-2.0.tar")
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_distutils_sdist)
return demoapp2_distutils_sdist
def make_distutils_sdist_subproject(self):
demoapp2_distutils_sdist = self.subpath("cache", "distutils",
"demoapp2-subproject-2.0.tar")
if os.path.exists(demoapp2_distutils_sdist):
return demoapp2_distutils_sdist
projectdir = self.make_distutils_repo_subproject()
self.python("setup.py", "sdist", "--format=tar", workdir=projectdir)
created = os.path.join(projectdir, "dist", "demoapp2-2.0.tar")
# if that gets the version wrong, it will make the wrong tarball, and
# this check will fail
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_distutils_sdist)
return demoapp2_distutils_sdist
def make_distutils_unpacked(self):
sdist = self.make_distutils_sdist()
unpack_into = self.subpath("demoapp2-distutils-unpacked")
if os.path.exists(unpack_into):
shutil.rmtree(unpack_into)
os.mkdir(unpack_into)
t = tarfile.TarFile(sdist)
t.extractall(path=unpack_into)
t.close()
unpacked = os.path.join(unpack_into, "demoapp2-2.0")
self.assertTrue(os.path.exists(unpacked))
return unpacked
def make_distutils_subproject_unpacked(self):
sdist = self.make_distutils_sdist_subproject()
unpack_into = self.subpath("demoapp2-distutils-unpacked-subproject")
if os.path.exists(unpack_into):
shutil.rmtree(unpack_into)
os.mkdir(unpack_into)
t = tarfile.TarFile(sdist)
t.extractall(path=unpack_into)
t.close()
unpacked = os.path.join(unpack_into, "demoapp2-2.0")
self.assertTrue(os.path.exists(unpacked))
return unpacked
def make_setuptools_repo(self):
# create a clean repo of demoapp2-setuptools at 2.0
repodir = self.subpath("demoapp2-setuptools-repo")
if os.path.exists(repodir):
shutil.rmtree(repodir)
shutil.copytree("test/demoapp2-setuptools", repodir)
shutil.copy("versioneer.py", repodir)
self.git("init", workdir=repodir)
self.python("versioneer.py", "setup", workdir=repodir)
self.git("add", "--all", workdir=repodir)
self.git("commit", "-m", "comment", workdir=repodir)
self.git("tag", "demoapp2-2.0", workdir=repodir)
return repodir
def make_setuptools_repo_subproject(self):
# create a clean repo of demoapp2-setuptools at 2.0
repodir = self.subpath("demoapp2-setuptools-repo-subproject")
if os.path.exists(repodir):
shutil.rmtree(repodir)
shutil.copytree("test/demoapp2-setuptools-subproject", repodir)
projectdir = os.path.join(repodir, "subproject")
shutil.copy("versioneer.py", projectdir)
self.git("init", workdir=repodir)
self.python("versioneer.py", "setup", workdir=projectdir)
self.git("add", "--all", workdir=repodir)
self.git("commit", "-m", "comment", workdir=repodir)
self.git("tag", "demoapp2-2.0", workdir=repodir)
return projectdir
def make_setuptools_sdist(self):
# create an sdist tarball of demoapp2-setuptools at 2.0
demoapp2_setuptools_sdist = self.subpath("cache", "setuptools",
"demoapp2-2.0.tar")
if os.path.exists(demoapp2_setuptools_sdist):
return demoapp2_setuptools_sdist
repodir = self.make_setuptools_repo()
self.python("setup.py", "sdist", "--format=tar", workdir=repodir)
created = os.path.join(repodir, "dist", "demoapp2-2.0.tar")
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_setuptools_sdist)
return demoapp2_setuptools_sdist
def make_setuptools_sdist_subproject(self):
demoapp2_setuptools_sdist = self.subpath("cache", "setuptools",
"demoapp2-subproject-2.0.tar")
if os.path.exists(demoapp2_setuptools_sdist):
return demoapp2_setuptools_sdist
projectdir = self.make_setuptools_repo_subproject()
self.python("setup.py", "sdist", "--format=tar", workdir=projectdir)
created = os.path.join(projectdir, "dist", "demoapp2-2.0.tar")
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_setuptools_sdist)
return demoapp2_setuptools_sdist
def make_setuptools_unpacked(self):
sdist = self.make_setuptools_sdist()
unpack_into = self.subpath("demoapp2-setuptools-unpacked")
if os.path.exists(unpack_into):
shutil.rmtree(unpack_into)
os.mkdir(unpack_into)
t = tarfile.TarFile(sdist)
t.extractall(path=unpack_into)
t.close()
unpacked = os.path.join(unpack_into, "demoapp2-2.0")
self.assertTrue(os.path.exists(unpacked))
return unpacked
def make_setuptools_subproject_unpacked(self):
sdist = self.make_setuptools_sdist_subproject()
unpack_into = self.subpath("demoapp2-setuptools-unpacked-subproject")
if os.path.exists(unpack_into):
shutil.rmtree(unpack_into)
os.mkdir(unpack_into)
t = tarfile.TarFile(sdist)
t.extractall(path=unpack_into)
t.close()
unpacked = os.path.join(unpack_into, "demoapp2-2.0")
self.assertTrue(os.path.exists(unpacked))
return unpacked
def make_setuptools_egg(self):
# create an egg of demoapp2-setuptools at 2.0
demoapp2_setuptools_egg = self.subpath("cache", "setuptools",
"demoapp2-2.0-%s.egg" % pyver)
if os.path.exists(demoapp2_setuptools_egg):
return demoapp2_setuptools_egg
repodir = self.make_setuptools_repo()
self.python("setup.py", "bdist_egg", workdir=repodir)
created = os.path.join(repodir, "dist", "demoapp2-2.0-%s.egg" % pyver)
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_setuptools_egg)
return demoapp2_setuptools_egg
def make_setuptools_wheel_with_setup_py(self):
# create an wheel of demoapp2-setuptools at 2.0
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
demoapp2_setuptools_wheel = self.subpath("cache", "setuptools",
wheelname)
if os.path.exists(demoapp2_setuptools_wheel):
# there are two ways to make this .whl, and we need to exercise
# both, so don't actually cache the results
os.unlink(demoapp2_setuptools_wheel)
repodir = self.make_setuptools_repo()
self.python("setup.py", "bdist_wheel", workdir=repodir)
created = os.path.join(repodir, "dist", wheelname)
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_setuptools_wheel)
return demoapp2_setuptools_wheel
def make_setuptools_wheel_with_pip(self):
# create an wheel of demoapp2-setuptools at 2.0
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
demoapp2_setuptools_wheel = self.subpath("cache", "setuptools",
wheelname)
if os.path.exists(demoapp2_setuptools_wheel):
# there are two ways to make this .whl, and we need to exercise
# both, so don't actually cache the results
os.unlink(demoapp2_setuptools_wheel)
linkdir = self.make_linkdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("make-setuptools-wheel-with-pip")
self.run_in_venv(venv, repodir,
"pip", "wheel", "--wheel-dir", "wheelhouse",
"--no-index", "--find-links", linkdir,
".")
created = os.path.join(repodir, "wheelhouse", wheelname)
self.assertTrue(os.path.exists(created), created)
shutil.copyfile(created, demoapp2_setuptools_wheel)
return demoapp2_setuptools_wheel
class DistutilsRepo(_Invocations, unittest.TestCase):
def test_build(self):
repodir = self.make_distutils_repo()
self.python("setup.py", "build", workdir=repodir)
# test that the built _version.py is correct. Ideally we'd actually
# run PYTHONPATH=.../build/lib build/scripts-PYVER/rundemo and check
# the output, but that's more fragile than I want to deal with today
fn = os.path.join(repodir, "build", "lib", "demo", "_version.py")
data = versions_from_file(fn)
self.assertEqual(data["version"], "2.0")
def test_install(self):
repodir = self.make_distutils_repo()
venv = self.make_venv("distutils-repo-install")
self.run_in_venv(venv, repodir, "python", "setup.py", "install")
self.check_in_venv(venv)
def test_install_subproject(self):
projectdir = self.make_distutils_repo_subproject()
venv = self.make_venv("distutils-repo-install-subproject")
self.run_in_venv(venv, projectdir, "python", "setup.py", "install")
self.check_in_venv(venv)
def test_pip_wheel(self):
self.make_distutils_wheel_with_pip()
# asserts version as a side-effect
def test_sdist(self):
sdist = self.make_distutils_sdist() # asserts version as a side-effect
t = tarfile.TarFile(sdist)
# make sure we used distutils/sdist, not setuptools/sdist
self.assertFalse("demoapp2-2.0/src/demoapp2.egg-info/PKG-INFO" in
t.getnames())
t.close()
def test_sdist_subproject(self):
sdist = self.make_distutils_sdist_subproject()
t = tarfile.TarFile(sdist)
# make sure we used distutils/sdist, not setuptools/sdist
self.assertFalse("demoapp2-2.0/src/demoapp2.egg-info/PKG-INFO" in
t.getnames())
t.close()
def test_pip_install(self):
repodir = self.make_distutils_repo()
venv = self.make_venv("distutils-repo-pip-install")
self.run_in_venv(venv, repodir, "pip", "install", ".")
self.check_in_venv(venv)
@unittest.expectedFailure
def test_pip_install_subproject(self):
projectdir = self.make_distutils_repo_subproject()
venv = self.make_venv("distutils-repo-pip-install-subproject")
self.run_in_venv(venv, projectdir, "pip", "install", ".")
self.check_in_venv(venv)
def test_pip_install_from_afar(self):
repodir = self.make_distutils_repo()
venv = self.make_venv("distutils-repo-pip-install-from-afar")
self.run_in_venv(venv, venv, "pip", "install", repodir)
self.check_in_venv(venv)
@unittest.expectedFailure
def test_pip_install_from_afar_subproject(self):
projectdir = self.make_distutils_repo_subproject()
venv = self.make_venv("distutils-repo-pip-install-from-afar-subproject")
self.run_in_venv(venv, venv, "pip", "install", projectdir)
self.check_in_venv(venv)
def test_pip_install_editable(self):
repodir = self.make_distutils_repo()
venv = self.make_venv("distutils-repo-pip-install-editable")
self.run_in_venv(venv, repodir, "pip", "install", "--editable", ".")
self.check_in_venv(venv)
def test_pip_install_editable_subproject(self):
projectdir = self.make_distutils_repo_subproject()
venv = self.make_venv("distutils-repo-pip-install-editable-subproject")
self.run_in_venv(venv, projectdir, "pip", "install", "--editable", ".")
self.check_in_venv(venv)
class SetuptoolsRepo(_Invocations, unittest.TestCase):
def test_install(self):
repodir = self.make_setuptools_repo()
demolib = self.make_demolib_sdist()
venv = self.make_venv("setuptools-repo-install")
# "setup.py install" doesn't take --no-index or --find-links, so we
# pre-install the dependency
self.run_in_venv(venv, venv, "pip", "install", demolib)
self.run_in_venv(venv, repodir, "python", "setup.py", "install")
self.check_in_venv_withlib(venv)
def test_install_subproject(self):
projectdir = self.make_setuptools_repo_subproject()
demolib = self.make_demolib_sdist()
venv = self.make_venv("setuptools-repo-install-subproject")
# "setup.py install" doesn't take --no-index or --find-links, so we
# pre-install the dependency
self.run_in_venv(venv, venv, "pip", "install", demolib)
self.run_in_venv(venv, projectdir, "python", "setup.py", "install")
self.check_in_venv_withlib(venv)
@unittest.skip("setuptools 'easy_install .': known to be broken")
def test_easy_install(self):
# This case still fails: the 'easy_install' command modifies the
# repo's setup.cfg (copying our --index-url and --find-links
# arguments into [easy_install]index_url= settings, so that any
# dependencies setup_requires= builds will use them), which means the
# repo is always "dirty", which creates an .egg with the wrong
# version. I have not yet found a clean way to hook the easy_install
# command to fix this: there is very little linkage between the
# parent command (which could calculate the version before setup.cfg
# is modified) and the command which builds the .egg. Leave it broken
# for now.
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("setuptools-repo-easy-install")
self.run_in_venv(venv, repodir,
"python", "setup.py", "easy_install",
"--index-url", indexdir, "--find-links", linkdir,
"."
)
self.check_in_venv_withlib(venv)
def test_develop(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("setuptools-repo-develop")
# "setup.py develop" takes --find-links and --index-url but not
# --no-index
self.run_in_venv(venv, repodir,
"python", "setup.py", "develop",
"--index-url", indexdir, "--find-links", linkdir,
)
self.check_in_venv_withlib(venv)
def test_develop_subproject(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
projectdir = self.make_setuptools_repo_subproject()
venv = self.make_venv("setuptools-repo-develop-subproject")
# "setup.py develop" takes --find-links and --index-url but not
# --no-index
self.run_in_venv(venv, projectdir,
"python", "setup.py", "develop",
"--index-url", indexdir, "--find-links", linkdir,
)
self.check_in_venv_withlib(venv)
def test_egg(self):
self.make_setuptools_egg() # asserts version as a side-effect
def test_pip_wheel(self):
self.make_setuptools_wheel_with_pip()
# asserts version as a side-effect
def test_bdist_wheel(self):
self.make_setuptools_wheel_with_setup_py()
# asserts version as a side-effect
def test_sdist(self):
sdist = self.make_setuptools_sdist() # asserts version as a side-effect
t = tarfile.TarFile(sdist)
# make sure we used setuptools/sdist, not distutils/sdist
self.assertTrue("demoapp2-2.0/src/demoapp2.egg-info/PKG-INFO" in
t.getnames())
t.close()
def test_sdist_subproject(self):
sdist = self.make_setuptools_sdist_subproject()
t = tarfile.TarFile(sdist)
# make sure we used setuptools/sdist, not distutils/sdist
self.assertTrue("demoapp2-2.0/src/demoapp2.egg-info/PKG-INFO" in
t.getnames())
t.close()
def test_pip_install(self):
linkdir = self.make_linkdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("setuptools-repo-pip-install")
self.run_in_venv(venv, repodir, "pip", "install", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
@unittest.expectedFailure
def test_pip_install_subproject(self):
linkdir = self.make_linkdir()
projectdir = self.make_setuptools_repo_subproject()
venv = self.make_venv("setuptools-repo-pip-install-subproject")
self.run_in_venv(venv, projectdir, "pip", "install", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
def test_pip_install_from_afar(self):
linkdir = self.make_linkdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("setuptools-repo-pip-install-from-afar")
self.run_in_venv(venv, venv, "pip", "install", repodir,
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
@unittest.expectedFailure
def test_pip_install_from_afar_subproject(self):
linkdir = self.make_linkdir()
projectdir = self.make_setuptools_repo_subproject()
venv = self.make_venv("setuptools-repo-pip-install-from-afar-subproject")
self.run_in_venv(venv, venv, "pip", "install", projectdir,
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
def test_pip_install_editable(self):
linkdir = self.make_linkdir()
repodir = self.make_setuptools_repo()
venv = self.make_venv("setuptools-repo-pip-install-editable")
self.run_in_venv(venv, repodir, "pip", "install", "--editable", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
def test_pip_install_editable_subproject(self):
linkdir = self.make_linkdir()
projectdir = self.make_setuptools_repo_subproject()
venv = self.make_venv("setuptools-repo-pip-install-editable-subproject")
self.run_in_venv(venv, projectdir, "pip", "install", "--editable", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
class DistutilsSdist(_Invocations, unittest.TestCase):
def test_pip_install(self):
sdist = self.make_distutils_sdist()
venv = self.make_venv("distutils-sdist-pip-install")
self.run_in_venv(venv, venv,
"pip", "install", sdist)
self.check_in_venv(venv)
def test_pip_install_subproject(self):
sdist = self.make_distutils_sdist_subproject()
venv = self.make_venv("distutils-sdist-pip-install-subproject")
self.run_in_venv(venv, venv,
"pip", "install", sdist)
self.check_in_venv(venv)
def test_easy_install(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
sdist = self.make_distutils_sdist()
venv = self.make_venv("distutils-sdist-easy-install")
self.run_in_venv(venv, venv,
"easy_install",
"--index-url", indexdir, "--find-links", linkdir,
sdist)
self.check_in_venv(venv)
class SetuptoolsSdist(_Invocations, unittest.TestCase):
def test_pip_install(self):
linkdir = self.make_linkdir()
sdist = self.make_setuptools_sdist()
venv = self.make_venv("setuptools-sdist-pip-install")
self.run_in_venv(venv, venv,
"pip", "install",
"--no-index", "--find-links", linkdir,
sdist)
self.check_in_venv_withlib(venv)
def test_pip_install_subproject(self):
linkdir = self.make_linkdir()
sdist = self.make_setuptools_sdist_subproject()
venv = self.make_venv("setuptools-sdist-pip-install-subproject")
self.run_in_venv(venv, venv,
"pip", "install",
"--no-index", "--find-links", linkdir,
sdist)
self.check_in_venv_withlib(venv)
def test_easy_install(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
sdist = self.make_setuptools_sdist()
venv = self.make_venv("setuptools-sdist-easy-install")
self.run_in_venv(venv, venv,
"easy_install",
"--index-url", indexdir, "--find-links", linkdir,
sdist)
self.check_in_venv_withlib(venv)
class SetuptoolsWheel(_Invocations, unittest.TestCase):
def test_pip_install(self):
linkdir = self.make_linkdir()
wheel = self.make_setuptools_wheel_with_setup_py()
venv = self.make_venv("setuptools-wheel-pip-install")
self.run_in_venv(venv, venv,
"pip", "install",
"--no-index", "--find-links", linkdir,
wheel)
self.check_in_venv_withlib(venv)
class Egg(_Invocations, unittest.TestCase):
def test_easy_install(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
egg = self.make_setuptools_egg()
venv = self.make_venv("setuptools-egg-easy-install")
self.run_in_venv(venv, venv,
"easy_install",
"--index-url", indexdir, "--find-links", linkdir,
egg)
self.check_in_venv_withlib(venv)
class DistutilsUnpacked(_Invocations, unittest.TestCase):
def test_build(self):
unpacked = self.make_distutils_unpacked()
self.python("setup.py", "build", workdir=unpacked)
# test that the built _version.py is correct. Ideally we'd actually
# run PYTHONPATH=.../build/lib build/scripts-PYVER/rundemo and check
# the output, but that's more fragile than I want to deal with today
fn = os.path.join(unpacked, "build", "lib", "demo", "_version.py")
data = versions_from_file(fn)
self.assertEqual(data["version"], "2.0")
def test_install(self):
unpacked = self.make_distutils_unpacked()
venv = self.make_venv("distutils-unpacked-install")
self.run_in_venv(venv, unpacked, "python", "setup.py", "install")
self.check_in_venv(venv)
def test_install_subproject(self):
unpacked = self.make_distutils_subproject_unpacked()
venv = self.make_venv("distutils-subproject-unpacked-install")
self.run_in_venv(venv, unpacked, "python", "setup.py", "install")
self.check_in_venv(venv)
def test_pip_wheel(self):
unpacked = self.make_distutils_unpacked()
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
venv = self.make_venv("distutils-unpacked-pip-wheel")
self.run_in_venv(venv, unpacked,
"pip", "wheel", "--wheel-dir", "wheelhouse",
"--no-index",# "--find-links", linkdir,
".")
created = os.path.join(unpacked, "wheelhouse", wheelname)
self.assertTrue(os.path.exists(created), created)
def test_pip_install(self):
repodir = self.make_distutils_unpacked()
venv = self.make_venv("distutils-unpacked-pip-install")
self.run_in_venv(venv, repodir, "pip", "install", ".")
self.check_in_venv(venv)
def test_pip_install_subproject(self):
unpacked = self.make_distutils_subproject_unpacked()
venv = self.make_venv("distutils-subproject-unpacked-pip-install")
self.run_in_venv(venv, unpacked, "pip", "install", ".")
self.check_in_venv(venv)
def test_pip_install_from_afar(self):
repodir = self.make_distutils_unpacked()
venv = self.make_venv("distutils-unpacked-pip-install-from-afar")
self.run_in_venv(venv, venv, "pip", "install", repodir)
self.check_in_venv(venv)
class SetuptoolsUnpacked(_Invocations, unittest.TestCase):
def test_install(self):
unpacked = self.make_setuptools_unpacked()
demolib = self.make_demolib_sdist()
venv = self.make_venv("setuptools-unpacked-install")
# "setup.py install" doesn't take --no-index or --find-links, so we
# pre-install the dependency
self.run_in_venv(venv, venv, "pip", "install", demolib)
self.run_in_venv(venv, unpacked,
"python", "setup.py", "install")
self.check_in_venv_withlib(venv)
def test_install_subproject(self):
unpacked = self.make_setuptools_subproject_unpacked()
demolib = self.make_demolib_sdist()
venv = self.make_venv("setuptools-subproject-unpacked-install")
# "setup.py install" doesn't take --no-index or --find-links, so we
# pre-install the dependency
self.run_in_venv(venv, venv, "pip", "install", demolib)
self.run_in_venv(venv, unpacked,
"python", "setup.py", "install")
self.check_in_venv_withlib(venv)
def test_easy_install(self):
linkdir = self.make_linkdir()
indexdir = self.make_empty_indexdir()
unpacked = self.make_setuptools_unpacked()
venv = self.make_venv("setuptools-unpacked-easy-install")
self.run_in_venv(venv, unpacked,
"python", "setup.py", "easy_install",
"--index-url", indexdir, "--find-links", linkdir,
"."
)
self.check_in_venv_withlib(venv)
def test_wheel(self):
unpacked = self.make_setuptools_unpacked()
self.python("setup.py", "bdist_wheel", workdir=unpacked)
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
wheel = os.path.join(unpacked, "dist", wheelname)
self.assertTrue(os.path.exists(wheel))
def test_pip_wheel(self):
unpacked = self.make_setuptools_unpacked()
linkdir = self.make_linkdir()
wheelname = "demoapp2-2.0-%s-none-any.whl" % pyver_major
venv = self.make_venv("setuptools-unpacked-pip-wheel")
self.run_in_venv(venv, unpacked,
"pip", "wheel", "--wheel-dir", "wheelhouse",
"--no-index", "--find-links", linkdir,
".")
created = os.path.join(unpacked, "wheelhouse", wheelname)
self.assertTrue(os.path.exists(created), created)
def test_pip_install(self):
linkdir = self.make_linkdir()
repodir = self.make_setuptools_unpacked()
venv = self.make_venv("setuptools-unpacked-pip-install")
self.run_in_venv(venv, repodir, "pip", "install", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
def test_pip_install_subproject(self):
linkdir = self.make_linkdir()
unpacked = self.make_setuptools_subproject_unpacked()
venv = self.make_venv("setuptools-subproject-unpacked-pip-install")
self.run_in_venv(venv, unpacked, "pip", "install", ".",
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
def test_pip_install_from_afar(self):
linkdir = self.make_linkdir()
repodir = self.make_setuptools_unpacked()
venv = self.make_venv("setuptools-unpacked-pip-install-from-afar")
self.run_in_venv(venv, venv, "pip", "install", repodir,
"--no-index", "--find-links", linkdir)
self.check_in_venv_withlib(venv)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
c12deb8dc47fab1d4779dededfad990fb6c4aaec | 85c82274a3888fa61795bb0600ab96eaf7665b6a | /UTS/D_letterTArray.py | 16e3541a8e3a1482da2c3cb3821ac21e8b71dafd | [] | no_license | refeed/StrukturDataA | 8e5a214569f41b19c05842d003ede5941800482a | 4d3b77bbd28158f1f1e64a49b8e90da731859407 | refs/heads/master | 2023-06-03T08:22:12.442536 | 2021-07-01T03:24:29 | 2021-07-01T03:24:29 | 360,478,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,324 | py | '''
Letter T Array
Batas Run-time: 1 detik / test-case
Batas Memori: 32 MB
DESKRIPSI SOAL
Bayangkan sebuah tabung seperti gambar di atas. Tabung dengan tiga cabang dan
salah satunya menghadap ke atas. Kali ini kita akan bermain dengan sebuah array
yang bentuknya seperti di atas. Proses “Push” data baru seakan menjatuhkan bola
melalui cabang yang menghadap ke atas. Ketika banyak bola di bagian bawah adalah
genap, maka bola baru akan jatuh tepat di tengah, sedangkan jika banyak bola di
bagian bawah adalah ganjil maka bola jatuh akan berada di tepat sebelah kiri
bola paling tengah. (Contoh dapat dilihat bagian akhir soal)
PETUNJUK MASUKAN
Baris pertama adalah bilangan bulat N, banyak data yang akan di-”Push”. N buah
data selanjutnya adalah bilangan bulat yang akan di-”Push” pada array tersebut
secara terurut.
PETUNJUK KELUARAN
Outputkan dari kiri ke kanan data yang ditampilkan pada bagian bawah array
setelah semua data masuk
CONTOH MASUKAN 1
5
1 2 3 4 5
CONTOH KELUARAN 1
2 4 5 3 1
CONTOH MASUKAN 2
4
4 1 3 2
CONTOH KELUARAN 2
1 2 3 4
KETERANGAN
'''
num_of_data = int(input())
data_list = list(map(int, input().split()))
data_in_letter_t = []
for data in data_list:
data_in_letter_t.insert((len(data_in_letter_t) // 2), data)
print(' '.join(list(map(str, data_in_letter_t))) + ' ')
| [
"[email protected]"
] | |
39870bafb24d8c96b9d084eed585673395b338de | e61717bebf8f7d3790b0e98d868ea4ce33f9cc59 | /TSIS10_upd/inserting many data.py | 56017b448f12644780c3f8749161a0b6f3557868 | [] | no_license | KanagatS/PP2 | 81672264b9720af8b15408c9d8228eb6da25378e | b53f5164d6fb753392870607d0506c5a3daaef88 | refs/heads/master | 2023-04-20T10:29:53.298342 | 2021-05-21T18:24:55 | 2021-05-21T18:24:55 | 334,276,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import psycopg2
con = psycopg2.connect(
host='localhost',
database='tsis',
user='postgres',
port=6666,
password=''
)
cur = con.cursor()
# ===============================================
sql = """INSERT INTO student(name) VALUES(%s);"""
cur.executemany(sql, [('is',), ('KBTU',), ('student',)])
con.commit()
# ===============================================
cur.close()
con.close()
| [
"[email protected]"
] | |
08de08127f62aa59ec24287edeb7a29787f3ee2f | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/mesh3d/colorbar/title/_side.py | 95426ff6b73e9ae7aeac6fa6b0ff209b476d779f | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 493 | py | import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="mesh3d.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
| [
"[email protected]"
] | |
a057006ec8e593fa858cdfaccd187d99b327366a | 2be63b91334873f3044a0306344cc907828837b3 | /deluxhotel/blog/admin.py | d61c286a38247c7ff27eb520836214b22d8382fb | [] | no_license | DmitriiGrekov/delux_hotel | ffcb34c99d5740e8591f5eb7a15ea5e72cd0f5be | 0ac14d018166752827f486ba9d3e9553f0b52b67 | refs/heads/master | 2023-07-03T02:46:41.355875 | 2021-08-05T16:21:22 | 2021-08-05T16:21:22 | 393,068,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | from django.contrib import admin
from .models import TagsModel, BlogPostModel, CommentModel
@admin.register(TagsModel)
class TagsAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
prepopulated_fields = {'slug': ('name',)}
@admin.register(BlogPostModel)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title',
'author',
'publish_date',
'active')
list_display_links = ('title', 'author')
search_fields = ('title', 'author', 'text',)
list_filter = ('author', 'publish_date', 'tags')
prepopulated_fields = {'slug': ('title', 'author',)}
@admin.register(CommentModel)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name',
'email',
'date_publish')
list_display_links = ('name',
'email')
search_fields = ('name',
'email')
list_filter = ('date_publish',
'post')
| [
"[email protected]"
] | |
e67ff95685ab64f98a147c59594b3b7a7c4791ce | f59c06566e729380b032f050f852621f425553ac | /plugins/maze.py | 83aeeebf2601c7cabe7ca38404b7a26d1aa3638a | [] | no_license | JonnoFTW/TonsleyLEDManager | c23e27cf7e9f61f97d2c42e3331bceae3fe66231 | 681771584f2b105a2b190641be2d2d1d9d785be1 | refs/heads/master | 2021-06-07T18:24:54.113308 | 2021-05-02T09:43:19 | 2021-05-02T09:43:19 | 55,032,673 | 4 | 7 | null | 2017-09-04T04:13:26 | 2016-03-30T04:33:25 | Python | UTF-8 | Python | false | false | 6,100 | py | class Runner:
blue = [0, 0, 255]
white = [255, 255, 255]
black = [0, 0, 0]
green = [0, 255, 0]
red = [255, 0, 0]
def __init__(self, board_dimensions):
self.dims = board_dimensions
import numpy as np
self.np = np
np.set_printoptions(threshold=np.nan)
self.width = board_dimensions[1]
self.height = board_dimensions[0]
self.reset()
# blue for the runner position
# white for path
# red for frontier
# black for walls
def reset(self):
self.maze = self.np.zeros((self.height, self.width), dtype=self.np.uint8)
for x in range(self.maze.shape[0]):
if x % 2 == 0:
self.maze[x].fill(1)
for y in range(self.maze.shape[1]):
if y % 2 == 0:
self.maze[:, y].fill(1)
self.generated = False
# both need to be odd numbers
self.C = [(self.np.random.choice(range(3, self.height-3, 2)),
self.np.random.choice(range(3, self.width-3, 2)), 'W')]
t = self.C[0]
self.maze[t[0], t[1]] = 0
self.maze[t[0]-1, t[1]] = 0
self.maze[t[0]+1, t[1]] = 0
self.maze[t[0], t[1]+1] = 0
self.maze[t[0], t[1]-1] = 0
self.maze_generator = self.step()
self.maze[0].fill(1)
self.maze[-1].fill(1)
def render_maze(self):
out = self.np.empty((self.height, self.width, 3), dtype=self.np.uint8)
for x, row in enumerate(self.maze):
for y, cell in enumerate(row):
if cell <= 0 or cell == 4:
out[x, y] = self.white
elif cell == 1:
out[x, y] = self.black
elif cell == 2:
out[x, y] = self.red
elif cell == 3 or cell == -2:
out[x, y] = self.green
elif cell == 5:
out[x, y] = self.blue
return out
def step(self):
while self.C:
target = self.C[self.np.random.randint(0, len(self.C))]
n = self.neighbours(target[0], target[1])
self.np.random.shuffle(n)
if not n:
self.maze[target[0], target[1]] = 4
if target[2] == 'S':
self.maze[target[0], target[1]-1] = 4
elif target[2] == 'N':
self.maze[target[0], target[1]+1] = 4
elif target[2] == 'E':
self.maze[target[0]-1, target[1]] = 4
elif target[2] == 'W':
self.maze[target[0]+1, target[1]] = 4
self.C.remove(target)
else:
# mark visited cells as 2
new_cell = n.pop()
self.maze[new_cell[0], new_cell[1]] = 2
if new_cell[2] == 'S':
self.maze[new_cell[0], new_cell[1]-1] = 2
elif new_cell[2] == 'N':
self.maze[new_cell[0], new_cell[1]+1] = 2
elif new_cell[2] == 'E':
self.maze[new_cell[0]-1, new_cell[1]] = 2
elif new_cell[2] == 'W':
self.maze[new_cell[0]+1, new_cell[1]] = 2
self.C.append(new_cell)
yield self.render_maze()
def neighbours(self, x, y, v=2):
return [(nx, ny, d) for nx, ny, d in [(x, y+v, 'S'), (x, y-v, 'N'), (x+v, y, 'E'), (x-v, y, 'W')]
if 1 <= nx < self.maze.shape[0] and 0 <= ny < self.maze.shape[1] and self.maze[nx, ny] <= 0]
def solve(self):
#run the next step in maze
# update runner position
# get the random neighbours and move into one of them
while self.stack:
# get the neighbours of the current cell
x, y, d = self.runner
self.maze[x, y] = 5
n = self.neighbours(x, y, 1)
if x >= self.height - 2:
print "Solved"
break
if not n:
self.runner = self.stack.pop()
self.maze[self.runner[0], self.runner[1]] = 2
yield
else:
self.stack.extend(n)
new_cell = n[0]
self.runner = new_cell
self.maze[new_cell[0], new_cell[1]] = 0
yield
def run(self):
if not self.generated:
# do the next step in the maze generator
try:
return self.maze_generator.next()
except StopIteration:
self.generated = True
for x in range(self.maze.shape[0]):
for y in range(self.maze.shape[1]):
if self.maze[x, y] != 1:
self.maze[x, y] = 0
starts = list(self.np.where(self.maze[1] == 0)[0])# firsts white cell in the first column
self.runner = [0, starts.pop(), 'E']
self.maze_solver = self.solve()
self.stack = [self.runner]
return self.render_maze()
else:
try:
self.maze_solver.next()
except StopIteration:
# we hit the end of the maze or it's unsolvable!
self.reset()
return self.render_maze()
if __name__ == "__main__":
import pygame, sys
FPS = 60
fpsClock = pygame.time.Clock()
rows = 17
cols = 165
board_dimensions = (cols, rows)
disp_size = (cols * 8, rows * 8)
pygame.init()
size = width, height = board_dimensions
screen = pygame.display.set_mode(disp_size)
runner = Runner(board_dimensions)
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
screen.fill((0, 0, 0))
# draw the pixels
pixels = runner.run()
temp_surface = pygame.Surface(board_dimensions)
pygame.surfarray.blit_array(temp_surface, pixels)
pygame.transform.scale(temp_surface, disp_size, screen)
pygame.display.flip() | [
"[email protected]"
] | |
4280ff24cdcb735005428f197ee64f440e0f77ac | 3a09048cb841d91ee39ef054f35b8572f3c166fb | /OnlineJudge/ojproblem/apps.py | 1b720ddb973a9fee0b68995e95e12486f9580439 | [] | no_license | lyyyuna/LihuLabOJ | 91eddf27a16dca5488d5406e0224cf84544254b9 | e1e8e5ae9da629a201f734a33d264bcb6ae2f420 | refs/heads/master | 2022-12-14T02:53:24.786670 | 2019-08-29T03:07:22 | 2019-08-29T03:07:22 | 89,581,070 | 1 | 2 | null | 2022-12-08T08:32:24 | 2017-04-27T09:34:55 | Python | UTF-8 | Python | false | false | 158 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class OjproblemConfig(AppConfig):
name = 'ojproblem'
| [
"[email protected]"
] | |
bae085a67b4f224655e429058f60fbc44a5a185e | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/argparse/argparse_fromfile_prefix_chars.py | 0d40b273f431ba758c22fcbbd05759f0f70e9057 | [
"MIT"
] | permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 422 | py |
import argparse
parser = argparse.ArgumentParser(description='Short sample app',
fromfile_prefix_chars='@',
)
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['@argparse_fromfile_prefix_chars.txt']))
| [
"[email protected]"
] | |
fa07e854a21f6965ab962f6b3f56dc7d7a79a9ad | e5453b6a4b84a32ccca7281d438b7a7fa1853f58 | /src/ibmc/checks/huawei_ibmc_memory_check.py | e9a2b6fbe8b988c64e006753256d5d2b4991b3ab | [
"MIT"
] | permissive | Huawei/Server_Management_Plugin_Check_MK | 88445d9da581c347c5e82cf590453c4cb2c3d53c | 88398c7c8affe0b2064f418de931d69e36afde67 | refs/heads/master | 2021-05-11T11:40:55.302518 | 2021-01-27T09:53:17 | 2021-01-27T09:53:17 | 117,641,709 | 1 | 4 | null | 2018-01-31T05:38:01 | 2018-01-16T06:30:39 | null | UTF-8 | Python | false | false | 2,009 | py | #!/usr/bin/python
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
_health_map = {"1": 0, "2": 1, "3": 1, "4": 2, "5": 3, "6": 3}
_health_str = {0: "OK", 1: "WARNING", 2: "CRITICAL", 3: "ABSENCE", 4: "UNKOWN"}
def inventory_hw_memory_health(info):
return [('MEMORY status', None)]
def check_hw_memory_health(item, params, info):
_health_status = 3
_msg = ''
try:
for state in info[0][0]:
_health_status = _health_map.get(state)
for state, index in info[1]:
_each_status = _health_map.get(state)
if _each_status is not None:
if _each_status == 3:
continue
_health_msg = _health_str.get(_each_status)
_msg = _msg + " %s health status is %s;" % (str(index), _health_msg)
return _health_status, "healthy status is %s, %s" % (_health_str.get(_health_status), _msg)
except IndexError:
return "healthy status is not queried."
check_info["huawei_ibmc_memory_check"] = {
"inventory_function": inventory_hw_memory_health,
"check_function": check_hw_memory_health,
"service_description": "%s",
"includes": ["huawei_ibmc_util_.include"],
"snmp_info": [
(".1.3.6.1.4.1.2011.2.235.1.1.16", ["1.0", ]),
(".1.3.6.1.4.1.2011.2.235.1.1.16", ["50.1.6", "50.1.10"])
],
"snmp_scan_function": scan,
} | [
"[email protected]"
] | |
6f0d1ed0816ccbc48e4a42bfff7f7583a50f9a16 | 781f408fd9dc9fd111d5ac47009ab580636625e5 | /examples/test_get_pdf_text.py | 32573412e9a3e0199172d9ba0bd2f4394ab87c0d | [
"MIT"
] | permissive | doiteachday/SeleniumBase | fb003257b63e157b734d2b34a9c5794d74748322 | 8ded5fac84b85f1d4f43384d0836dbf4a1fc390e | refs/heads/master | 2023-04-10T10:13:50.372864 | 2021-05-04T02:51:43 | 2021-05-04T02:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from seleniumbase import BaseCase
class PdfTests(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print("\n" + pdf_text)
| [
"[email protected]"
] | |
2594bcbf34b79c8031b60bfcbb34bbb0796cf491 | 0175bdc4c896e8019b2c5f7442097cf6b9c1d14a | /pylibs/BasePage.py | 59ed9e3979940a13702c32503f2b2f7648643462 | [] | no_license | GGGYB/shiiia | 323ecee869dcd66510baf0ea7bc30b29c2bfb5ad | 9760f170cbbec37cc340c3b020f36cdd9855e7cd | refs/heads/master | 2023-05-02T07:58:37.023266 | 2021-05-31T09:41:07 | 2021-05-31T09:41:07 | 334,103,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,408 | py | # -*- coding: utf-8 -*-
# Author: sharon
# Datetime: 2021/1/29 14:32
# File: $ {NAME}
from pylibs.MyDriver import Driver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import time
class BasePage():
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def get_element(self,locator):
WebDriverWait(driver=Driver.wd,timeout=10,poll_frequency=0.5).until(
EC.visibility_of_element_located(locator)
)
return Driver.wd.find_element(*locator)
def get_elements(self,locator):
WebDriverWait(driver=Driver.wd,timeout=10,poll_frequency=0.5).until(
EC.visibility_of_element_located(locator)
)
return Driver.wd.find_elements(*locator)
def get_element_text(self,locators):
eleText = []
for ele in self.get_elements(locators):
eleText.append(ele.text)
print(eleText)
return eleText
def scroll_to_window(self,step,scrollSize):
for i in range(step):
Driver.wd.execute_script(f'window.scrollBy(0,{scrollSize})')
def to_page(self,url):
Driver.wd.get(url)
# 可以修改scrollTop的值来定位右侧滚动条的位置,0是最最顶部,10000是最底部。
def scroll_to_extreme(self,num):
js = f"var q=document.documentElement.scrollTop={num}"
Driver.wd.execute_script(js) | [
"[email protected]"
] | |
bc860517d0de7a0508431b8414cb45c85ec7b3e7 | 979cf7d5e2136e7e701df27da29622f9196f219e | /Files/views.py | 6a8462440f919b5546e343a386846815375f1e1c | [] | no_license | RafayelGardishyan/DjangoTeamwork | e68c33844680c6a4e345fe8dfc2d3b4b49ccf2ef | 6b030b161b67976445b292f0d5f7366a5eb48560 | refs/heads/master | 2021-09-16T07:03:51.280141 | 2018-01-06T20:50:11 | 2018-01-06T20:50:11 | 114,727,815 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,901 | py | import random
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import loader
from Start.models import Admin
from .forms import FileForm
from .models import File
from webhooks import Webhook
# Create your views here.
values = {
'securitykey': "",
'whurl': "https://discordapp.com/api/webhooks/399280451258417162/ex_ix9eIhkltscgcS3AyiDt4iVqBpowzAg4LZIFsbuwcJ01jUMkM8Jp78B5YWX6zPoLM",
}
def index(request):
if request.session.get('logged_in'):
files = File.objects.order_by('added_on')
template = loader.get_template('files/index.html')
context = {
'files': files,
}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
def delete(request, slug):
if request.session.get('logged_in'):
file = File.objects.get(slug=slug)
filename = file.name
user = Admin.objects.get(id=1)
if request.GET:
if request.GET['ak'] == values['securitykey']:
file.deletefile()
file.delete()
template = loader.get_template('error.html')
context = {
'message': 'Successfully deleted file ' + filename,
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
embed = Webhook(values['whurl'], color=123123)
embed.set_author(name='Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_desc('Deleted File')
embed.add_field(name='Name', value=filename)
embed.set_thumbnail('https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_footer(text='This message was automatically sent form Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png', ts=True)
embed.post()
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Wrong Admin Key',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
else:
securitykey = ""
for i in range(6):
securitykey += str(random.randint(0, 9))
print(securitykey)
user.sendemail('Delete File', 'Your Security Key is ' + str(securitykey))
values['securitykey'] = securitykey
template = loader.get_template('files/delete.html')
context = {}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
def add(request):
# if this is a POST request we need to process the form data
if request.session.get('logged_in'):
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = FileForm(request.POST, files=request.FILES)
# check whether it's valid:
if form.is_bound:
if form.is_valid():
form.save()
template = loader.get_template('error.html')
context = {
'message': 'Added File',
'link': {
'text': 'Return to Files home',
'url': '/files',
},
'slink': {
'text': 'Add an other File',
'url': '/files/add'
},
}
embed = Webhook(values['whurl'], color=123123)
embed.set_author(name='Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_desc('Added File')
embed.add_field(name='Name', value=form.cleaned_data['file'])
embed.set_thumbnail('https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_footer(text='This message was automatically sent form Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png', ts=True)
embed.post()
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Form is not valid',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Form is not bound',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
# if a GET (or any other method) we'll create a blank form
else:
form = FileForm()
template = loader.get_template('files/add.html')
context = {'form': form}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
| [
"[email protected]"
] | |
9531a59085c598825838be55b85bd85e79853aaa | 327e3c96db66c055d47be868ef5346ae3515b752 | /SpiralMatrix.py | 589762d3c9b41840dab60d26be27ea76aec14b69 | [] | no_license | dabay/LeetCodePython | 790a17893c46aa3a003ef95026471c21d869570d | fdac2086bc793584e05445f5d9afa74fee6fcb33 | refs/heads/master | 2021-03-12T23:34:04.496651 | 2017-08-24T15:55:02 | 2017-08-24T15:55:02 | 27,840,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,611 | py | # -*- coding: utf8 -*-
'''
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
For example,
Given the following matrix:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
You should return [1,2,3,6,9,8,7,4,5].
'''
class Solution:
# @param strs, a list of strings
# @return a list of strings
def spiralOrder(self, matrix):
def spiral_order(result, matrix, start_x, start_y, m, n):
if n==0 or m==0:
return
if n == 1:
for i in xrange(m):
result.append(matrix[start_x+i][start_y])
return
if m == 1:
for i in xrange(n):
result.append(matrix[start_x][start_y+i])
return
for i in xrange(start_y, start_y+n):
result.append(matrix[start_x][i])
for i in xrange(start_x+1, start_x+m):
result.append(matrix[i][start_y+n-1])
for i in xrange(start_y+n-1-1, start_y-1, -1):
result.append(matrix[start_x+m-1][i])
for i in xrange(start_x+m-1-1, start_x, -1):
result.append(matrix[i][start_y])
return spiral_order(result, matrix, start_x + 1, start_y + 1, m-2, n-2)
if len(matrix) == 0:
return []
result = []
spiral_order(result, matrix, 0, 0, len(matrix), len(matrix[0]))
return result
if __name__ == "__main__":
s = Solution()
input = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
print s.spiralOrder(input)
| [
"[email protected]"
] | |
4b47d4fae81b2e9fe90c6198f017118e6e06407e | 0b1c6a559c8f8f38ec0a9b62c5fdec786488c77e | /appspot/time_clock/migrations/0003_auto_20171005_1604.py | 7f9c3ef6d7a0795f1365aaad23df686301d777d4 | [] | no_license | smartworld1000/django_appspot | 9372b1edeb3e9d2507ca49463d34b0cf22e652ed | d801d910ff52b83a45f3bf68334bb06a91b81221 | refs/heads/master | 2021-05-14T03:39:07.613510 | 2017-11-05T07:42:59 | 2017-11-05T07:42:59 | 116,621,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-05 16:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_clock', '0002_auto_20171005_1551'),
]
operations = [
migrations.AlterField(
model_name='time_clock',
name='timein',
field=models.DateTimeField(help_text=''),
),
migrations.AlterField(
model_name='time_clock',
name='timeout',
field=models.DateTimeField(help_text=''),
),
migrations.AlterField(
model_name='time_clock',
name='workdate',
field=models.DateField(help_text=''),
),
]
| [
"[email protected]"
] | |
c96102cd66d61620bf0f87c991aa8d335ee87949 | cbd601867957c9abf19816c1b14bc455b54a6977 | /themed_collection/views.py | 6f4e9ee5c9b27cec49a080bd5da93739cf4f2178 | [] | no_license | tingletech/voro-djsite | 33640c8656af312650092594af8666c75c5d116b | d50da7c430d2e63436ad125be6ec62b3aa2174ac | refs/heads/master | 2021-01-01T19:43:14.241476 | 2011-03-09T22:51:21 | 2011-03-09T22:51:21 | 1,089,200 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,206 | py | import csv
import simplejson
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import permission_required, login_required
from django.template import RequestContext
from django.http import HttpResponse
from django.http import Http404, HttpResponseForbidden, HttpResponseBadRequest
from django import forms
from django.conf import settings
from geocoders_dsc import Google, GeoCodeException
from themed_collection.models import ThemedCollection
from xtf.models import ARKSet, ARKSetMember, ARKObject, GeoPoint, XTFNOTFOUND #should get urls from ARKSet!
from xtf.models import DublinCoreTerm
from xtf.ARK_validator import validate, extract
def ARKSetMember_map_data(member):
'''build the data object for an ARKSetMember'''
if member.object.titles:
#titles = ' '.join([ d['value'] for d in member.object.titles]) # Used when BeautifulSoup is parser, should probably wrap the elem...
titles = ' '.join([ d.content for d in member.object.titles if d.content]) #-- used whne ElementTree is parser
else:
titles = member.title
if member.object.dates:
#dates = ' '.join([ d['value'] for d in member.object.dates if d])
dates = ' '.join([ d.content for d in member.object.dates if d.content])
else:
dates = ''
return dict(title=titles,
date=dates,
note=member.annotation,
lat=member.lat,
lon=member.lon,
place=member.place,
thumbnail=member.object.thumbnail,
url_content=member.object.url_content,
image=member.object.image,
exact=member.location_exact,
)
def collection_members_map_data(member_list):
data = {}
for member in member_list:
data[member.object.ark]= ARKSetMember_map_data(member)
return data
def collection_members_map_json(member_list):
return simplejson.dumps(collection_members_map_data(member_list))
def view_json(request, pk=None, slug=None):
'''Return a simple almost static view
'''
if slug:
themed_collection = get_object_or_404(ThemedCollection, slug=slug)
elif pk:
themed_collection = get_object_or_404(ThemedCollection, pk=pk)
else:
raise Http404
collection_members = themed_collection.get_members()
#compile map data
json = collection_members_map_json(collection_members)
return HttpResponse(json, mimetype='application/json')
def view_themed_collection(request, pk=None, slug=None):
'''Return a simple almost static view
'''
if slug:
themed_collection = get_object_or_404(ThemedCollection, slug=slug)
elif pk:
themed_collection = get_object_or_404(ThemedCollection, pk=pk)
else:
raise Http404
collections = ThemedCollection.objects.all()
collection_members = themed_collection.get_members()
google_map_key = settings.GOOGLE_MAP_KEY
return render_to_response('themed_collection/view_collection.html',
locals()
)
def _parse_csv_row(csv_row):
'''Wants a csv row list. Parses into a dict suitable for creating an
arksetmember object
'''
if len(csv_row) < 2:
raise ValueError('Must have ARK, annotation in csv row')
ark = csv_row[1]
ark = ark[ark.index('ark:'):]
#validate ark
ark, NAAN, name, qual = extract(ark)
title = csv_row[2]
region = csv_row[0]
city = csv_row[4]
geo_place = csv_row[5]
geo_place_notes = csv_row[9]
dates = csv_row[6]
theme_type = csv_row[7]
notes = csv_row[9]
mosaic = False if csv_row[10] == '' else True
exact = True if csv_row[11] == '' else False
ret_dict = locals()
del ret_dict['csv_row']
return ret_dict
def _parse_csv(csv_reader, arkset, themed_collection):
errors = []
arks_added = []
members_added = []
num_add = 0
not_geocoded = []
numrows = 0
g = Google('ABQIAAAAPZhPbFDgyqqKaAJtfPgdhRQxAnOebRR8qqjlEjE1Y4ZOeQ67yxSVDP1Eq9oU2BZjw2PaheQ5prTXaw')
for row in csv_reader:
numrows+=1
try:
data = _parse_csv_row(row)
#print data
except ValueError, e:
errors.append((e, row))
continue
try:
arkobj, created = ARKObject.get_or_create(ark=data['ark'])
if created:
arks_added.append(arkobj)
except XTFNOTFOUND, e:
errors.append((e, row))
continue
member = ARKSetMember(object=arkobj, set=arkset, annotation=data['notes'])
try:
member.save()
members_added.append(member)
# is there a title? if so create a DCTerm for title
if (data['title']):
try:
dcterm = DublinCoreTerm(content_object=member, term='T', content=data['title'])
dcterm.save()
except:
pass
#attempt geocode
place = lat = lng = None
try:
if not data['geo_place']:
location = ' '.join((data['city'], 'CA'))
not_geocoded.append((member, "Only to city level", location))
else:
location = data['geo_place']
place, (lat, lng) = g.geocode(location)
gpt = GeoPoint(content_object=member, lat=lat, lon=lng, place=place, exact=data['exact'])
gpt.save()
except GeoCodeException, e:
not_geocoded.append((member,e.message, location))
#except IntegrityError, e:
#except _mysql_exceptions.IntegrityError, e:
#except _mysql_exceptions.MySQLError, e
#is in mosaic?
if data['mosaic']:
themed_collection.mosaic_members.add(member)
except:
import sys
e = sys.exc_info()
if e[1].message.find("IntegrityError(1062"):
errors.append(("Duplicate Member entry for row, an ARK can only have one entry in a set.", row))
else:
errors.append((e[1], row))
return numrows, errors, members_added, arks_added, not_geocoded
#Decorator won't work, need to check object level perm...
#@permission_required('xtf.change_arkset')
@login_required
def input_csv(request, pk):
'''Input a csv of ARKSetMembers into a given arkset
'''
themed_collection = get_object_or_404(ThemedCollection, pk=pk)
# if not request.user.has_perm('xtf.change_themedcollection', themed_collection):
# return HttpResponseForbidden('<h1>Permission Denied</h1>')
arkset_choices = [(arkset.pk, arkset.title) for arkset in themed_collection.arksets.all()]
class UploadFileForm(forms.Form):
set = forms.ChoiceField(choices=arkset_choices)
file = forms.FileField()
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
if not request.FILES:
form.errors['file'] = ('NO FILE INPUT',)
else:
#THIS SUCKS, LOOKS LIKE THE DJANGO UPLOAD FILE doesn't use universal newlines, can i fix by reading into one that does?
#TODO: fix this problem with newlines here, how?
# the UploadFile object bombs right away...
# may need to read into file, then do a universal newline read
csv_reader = csv.reader(form.cleaned_data['file'])
arkset = ARKSet.objects.get(id=form.cleaned_data['set'])
numrows, errs, set_members_added, arks_added, not_geocoded = _parse_csv(csv_reader, arkset, themed_collection)
num = len(set_members_added)
return render_to_response('themed_collection/input_csv_result.html',
locals(),
)
else:
form = UploadFileForm()
return render_to_response('themed_collection/input_csv.html',
locals(),
)
| [
"none@none"
] | none@none |
0b3008da0bf7f113d48b9ab99344fb70cf022591 | 90f729624737cc9700464532a0c67bcbfe718bde | /lino_xl/lib/cv/mixins.py | 2cf6c081a9ea76a806bd42afe738a54f12383a91 | [
"AGPL-3.0-only"
] | permissive | lino-framework/xl | 46ba6dac6e36bb8e700ad07992961097bb04952f | 642b2eba63e272e56743da2d7629be3f32f670aa | refs/heads/master | 2021-05-22T09:59:22.244649 | 2021-04-12T23:45:06 | 2021-04-12T23:45:06 | 52,145,415 | 1 | 5 | BSD-2-Clause | 2021-03-17T11:20:34 | 2016-02-20T09:08:36 | Python | UTF-8 | Python | false | false | 6,328 | py | # -*- coding: UTF-8 -*-
# Copyright 2013-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.conf import settings
from django.db import models
from django.utils.translation import gettext
from lino.api import dd, rt, _
from etgen.html import E, join_elems, forcetext
from lino.mixins.periods import DateRange
NONE = _("Not specified")
class BiographyOwner(dd.Model):
class Meta:
abstract = True
_cef_levels = None
_mother_tongues = None
def load_language_knowledge(self):
if self._mother_tongues is not None:
return
LanguageKnowledge = rt.models.cv.LanguageKnowledge
self._cef_levels = dict()
self._mother_tongues = []
qs = LanguageKnowledge.objects.filter(person=self)
# if dd.plugins.cv.with_language_history:
# qs = qs.order_by('-entry_date', 'id')
# else:
# qs = qs.order_by('id')
for lk in qs:
if lk.native:
self._mother_tongues.append(lk.language)
# if lk.language.iso2 in ("de", "fr", "en"):
if lk.cef_level is not None:
if not lk.language.iso2 in self._cef_levels:
lkinfo = str(lk.cef_level.value)
if lk.has_certificate:
lkinfo += " ({})".format(_("Certificate"))
self._cef_levels[lk.language.iso2] = lkinfo
@dd.htmlbox(_("Language knowledge"))
def language_knowledge(self, ar):
return self.get_language_knowledge()
def get_language_knowledge(self, *buttons):
self.load_language_knowledge()
lst = []
for lng in settings.SITE.languages:
lst.append("{}: {}".format(
lng.name, self._cef_levels.get(lng.django_code, NONE)))
# if cl is None:
# lst.append("{}: {}".format(lng.name, ))
# else:
# lst.append("{}: {}".format(lng.name, cl))
if len(self._mother_tongues):
lst.append("{}: {}".format(
_("Mother tongues"), self.mother_tongues))
lst += buttons
lst = join_elems(lst, E.br)
return E.p(*lst)
@dd.displayfield(_("Mother tongues"))
def mother_tongues(self, ar):
self.load_language_knowledge()
return ' '.join([str(lng) for lng in self._mother_tongues])
# @dd.displayfield(_("CEF level (de)"))
@dd.displayfield()
def cef_level_de(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('de', NONE)
# @dd.displayfield(_("CEF level (fr)"))
@dd.displayfield()
def cef_level_fr(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('fr', NONE)
# @dd.displayfield(_("CEF level (en)"))
@dd.displayfield()
def cef_level_en(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('en', NONE)
class EducationEntryStates(dd.ChoiceList):
verbose_name = _("State")
add = EducationEntryStates.add_item
add('0', _("Success"), 'success')
add('1', _("Failure"), 'failure')
add('2', _("Ongoing"), 'ongoing')
class HowWell(dd.ChoiceList):
verbose_name = _("How well?")
add = HowWell.add_item
add('0', _("not at all"))
add('1', _("a bit"))
add('2', _("moderate"), "default")
add('3', _("quite well"))
add('4', _("very well"))
class CefLevel(dd.ChoiceList):
verbose_name = _("CEF level")
verbose_name_plural = _("CEF levels")
# show_values = True
#~ @classmethod
#~ def display_text(cls,bc):
#~ def fn(bc):
#~ return u"%s (%s)" % (bc.value,unicode(bc))
#~ return lazy(fn,unicode)(bc)
add = CefLevel.add_item
add('A0')
add('A1')
add('A1+')
add('A2')
add('A2+')
add('B1')
add('B2')
add('B2+')
add('C1')
add('C2')
add('C2+')
# add('A0', _("basic language skills"))
# add('A1', _("basic language skills"))
# add('A1+', _("basic language skills"))
# add('A2', _("basic language skills"))
# add('A2+', _("basic language skills"))
# add('B1', _("independent use of language"))
# add('B2', _("independent use of language"))
# add('B2+', _("independent use of language"))
# add('C1', _("proficient use of language"))
# add('C2', _("proficient use of language"))
# add('C2+', _("proficient use of language"))
class SectorFunction(dd.Model):
class Meta:
abstract = True
sector = dd.ForeignKey("cv.Sector", blank=True, null=True)
function = dd.ForeignKey("cv.Function", blank=True, null=True)
@dd.chooser()
def function_choices(cls, sector):
if sector is None:
return rt.models.cv.Function.objects.all()
return sector.function_set.all()
class PersonHistoryEntry(DateRange):
class Meta:
abstract = True
person = dd.ForeignKey(dd.plugins.cv.person_model)
duration_text = models.CharField(
_("Duration"), max_length=200, blank=True)
class HistoryByPerson(dd.Table):
master_key = 'person'
order_by = ["start_date"]
auto_fit_column_widths = True
@classmethod
def create_instance(self, req, **kw):
obj = super(HistoryByPerson, self).create_instance(req, **kw)
if obj.person_id is not None:
previous_exps = self.model.objects.filter(
person=obj.person).order_by('start_date')
if previous_exps.count() > 0:
exp = previous_exps[previous_exps.count() - 1]
if exp.end_date:
obj.start_date = exp.end_date
else:
obj.start_date = exp.start_date
return obj
@classmethod
def get_table_summary(cls, mi, ar):
if mi is None:
return
items = []
ar = ar.spawn(cls, master_instance=mi, is_on_main_actor=False)
for obj in ar:
chunks = []
for e in cls.get_handle().get_columns():
if e.hidden:
continue
v = e.field._lino_atomizer.full_value_from_object(obj, ar)
if v:
if len(chunks) > 0:
chunks.append(", ")
chunks += [e.get_label(), ": ", E.b(e.format_value(ar, v))]
items.append(E.li(*forcetext(chunks)))
return E.ul(*items)
| [
"[email protected]"
] | |
13097b1d3f56a2e6dabdbab7527c0f64a21c2ad4 | 4732684be0b1a45c2aebe45d22558a9e1bd7f377 | /src/main.py | 8ba296d023c3861d1fe711862ef41a6e31bdf7b5 | [] | no_license | Griffinem/Trade-Up-EV | a7e0175d333daa04d94268e9342ade2084440084 | b9b8b5954517432f9e2d57b45e7ee658008eca6c | refs/heads/master | 2022-08-28T09:22:10.180323 | 2022-08-18T14:26:44 | 2022-08-18T14:26:44 | 247,586,523 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,474 | py | #from api_utils import *
import json
import requests
import time
item_price_data_url = 'http://csgobackpack.net/api/GetItemsList/v2/'
weapon_data_file_path = '..\scraping\weapon_data_file.json'
ev_output_file_path = 'ev_data_file.json'
float_cutoffs = {'Factory New': [0.0, 0.07], 'Minimal Wear': [0.07, 0.15], 'Field-Tested': [0.15, 0.38], 'Well-Worn': [0.38, 0.45], 'Battle-Scarred': [0.45, 1.0]}
wear_int_dict = {0: 'Factory New', 1: 'Minimal Wear', 2: 'Field-Tested', 3: 'Well-Worn', 4: 'Battle-Scarred'}
grade_int_dict = {0: 'consumer', 1: 'industrial', 2: 'milspec', 3: 'restricted', 4: 'classified', 5: 'covert'}
metadata = {}
ev_dict = {}
def get_item_best_wear(wear_min):
if wear_min < float_cutoffs['Factory New'][1]:
return 0
elif wear_min < float_cutoffs['Minimal Wear'][1]:
return 1
elif wear_min < float_cutoffs['Field-Tested'][1]:
return 2
elif wear_min < float_cutoffs['Well-Worn'][1]:
return 3
else:
return 4
def get_item_worst_wear(wear_max):
if wear_max >= float_cutoffs['Battle-Scarred'][0]:
return 4
elif wear_max >= float_cutoffs['Well-Worn'][0]:
return 3
elif wear_max >= float_cutoffs['Field-Tested'][0]:
return 2
elif wear_max >= float_cutoffs['Minimal Wear'][0]:
return 1
else:
return 0
def get_tradeup_ev(coll, grade):
for (i, weapon_i) in enumerate(coll[ grade_int_dict[grade] ]):
# Get best wear and worst wear as int
item_best_wear, item_worst_wear = get_item_best_wear(weapon_i['wear_min']), get_item_worst_wear(weapon_i['wear_max'])
''' The tertiary loop will iterate over each weapon wear '''
for wear_val in range(item_best_wear, item_worst_wear+1):
break_val = False
# Get the tradeup cost
weapon_key_str = weapon_i['name'] + ' (' + wear_int_dict[wear_val] + ')'
try:
tradeup_cost = price_data[weapon_key_str]['price'][ metadata['time'] ][ metadata['metric'] ] * 10
except KeyError:
#print('Error getting {0}. Breaking...'.format(weapon_key_str))
break_val = True
break
#print('Trading up {}'.format(weapon_key_str))
# Get tradeup float avg
tradeup_float_avg = 0.0
if metadata['float'] == 'median':
# Special cases
if wear_val == item_best_wear:
tradeup_float_avg = (weapon_i['wear_min'] + float_cutoffs[ wear_int_dict[wear_val] ][1]) / 2.0
elif wear_val == item_worst_wear:
tradeup_float_avg = (float_cutoffs[ wear_int_dict[wear_val] ][0] + weapon_i['wear_max']) / 2.0
#Default
else:
tradeup_float_avg = (float_cutoffs[ wear_int_dict[wear_val] ][0] + float_cutoffs[ wear_int_dict[wear_val] ][1]) / 2.0
elif metadata['float'] == 'min':
# Special cases
if wear_val == item_best_wear:
tradeup_float_avg = weapon_i['wear_min']
# Default
else:
tradeup_float_avg = float_cutoffs[ wear_int_dict[wear_val] ][0]
elif metadata['float'] == 'max':
# Special cases
if wear_val == item_worst_wear:
tradeup_float_avg = weapon_i['wear_max']
# Default
else:
tradeup_float_avg = float_cutoffs[ wear_int_dict[wear_val] ][1]
''' The quat...iary loop will iterate over each weapon in the next-highest weapon group to get the EV'''
ev = 0
tradeup_gross_list = []
all_profit = True
for (j, weapon_tu_j) in enumerate(coll[ grade_int_dict[grade+1] ]):
# Calculation:
# Resulting Float = (Avg(Tradeup Float) * [Result_Max - Result_Min]) + Result_Min
j_float = (tradeup_float_avg * (weapon_tu_j['wear_max'] - weapon_tu_j['wear_min'])) + weapon_tu_j['wear_min']
j_wear = 0
if j_float < 0.07:
j_wear = 0
elif j_float < 0.15:
j_wear = 1
elif j_float < 0.38:
j_wear = 2
elif j_float < 0.45:
j_wear = 3
else:
j_wear = 4
j_weapon_key_str = weapon_tu_j['name'] + ' (' + wear_int_dict[j_wear] + ')'
try:
tradeup_net = price_data[j_weapon_key_str]['price'][ metadata['time'] ][ metadata['metric'] ]
except KeyError:
#print('Error getting {0}. Breaking...'.format(j_weapon_key_str))
break_val = True
break
# Rough gross value - steam fees
# TODO: Modify this to work with bitskins/other site prices
tradeup_gross = tradeup_net * 0.87
# For checking variance
tradeup_gross_list.append(tradeup_gross)
# For checking all profit
profit = tradeup_gross - tradeup_cost
if profit < 0:
all_profit = False
#print('1/{0} chance for {1}'.format(len(coll[ grade_int_dict[grade+1] ]), j_weapon_key_str))
ev += ( (profit) / len(coll[ grade_int_dict[grade+1] ]) )
if break_val != True:
#print('Trade up 10x {0} at {1} float values results in Expected Value of ${2:.4f}'.format(weapon_key_str, metadata['float'], ev))
ev_dict[weapon_key_str] = [ev, tradeup_cost, tradeup_gross_list, all_profit]
if __name__ == '__main__':
ev_output_file_path = str(input('Enter output file path ("ev_output_file_.json"): '))
''' Gather metadata for query params '''
md_time = str(input('Enter price search time [24_hours, 7_days, 30_days, all_time]: '))
while md_time not in ['24_hours', '7_days', '30_days', 'all_time']:
md_time = str(input('Please enter one of the following price search times [24_hours, 7_days, 30_days, all_time]: '))
metadata['time'] = md_time
md_metric = str(input('Enter price metric [median, average, lowest_price, highest_price]: '))
while md_metric not in ['median', 'average', 'lowest_price', 'highest_price']:
md_metric = str(input('Please enter one of the following price metrics [median, average, lowest_price, highest_price]: '))
metadata['metric'] = md_metric
#md_sold_min = int(input('Enter minimum sold (holds for all items individually in the calculation): '))
#while type(md_sold_min) != 'int':
# md_sold_min = input('Please enter an integer value: ')
#metadata['sold_min'] = int(md_sold_min)
md_float = str(input('Enter float [min, median, max]: '))
while md_float not in ['min', 'median', 'max']:
md_float = str(input('Float must be in [min, median, max]: '))
metadata['float'] = md_float
''' Generate price data from csgobackpack API '''
start_a = time.time()
response = requests.get(item_price_data_url).json()
timestamp = response['timestamp']
price_data = response['items_list']
# Get items data from scraper (use utf8 for the chinese m4 I think)
with open(weapon_data_file_path, 'r', encoding='utf8') as weapon_data_file:
weapon_data = json.load(weapon_data_file)
elapsed_a = time.time() - start_a
print('Load finished in {0} seconds'.format(elapsed_a))
''' The main loop will iterate over individual case/collection '''
start_b = time.time()
for key in weapon_data.keys():
coll = weapon_data[key]
''' The secondary loop will iterate over rarity '''
## Consumer Grade
if len(coll['industrial']) > 0:
get_tradeup_ev(coll, 0)
## Industrial Grade
if len(coll['milspec']) > 0:
get_tradeup_ev(coll, 1)
## Mil-Spec Grade
if len(coll['restricted']) > 0:
get_tradeup_ev(coll, 2)
## Restricted Grade
if len(coll['classified']) > 0:
get_tradeup_ev(coll, 3)
## Classified Grade
if len(coll['covert']) > 0:
get_tradeup_ev(coll, 4)
elapsed_b = time.time() - start_b
ev_dict_sorted = {k: v for k, v in sorted(ev_dict.items(), key=lambda item: item[1], reverse=True)}
with open(ev_output_file_path, 'w', encoding='utf8') as ev_output_file:
json.dump(ev_dict_sorted, ev_output_file)
print('EV check finished in {0} seconds'.format(elapsed_b)) | [
"[email protected]"
] | |
467540c5dee5db0e3e5e016eb7da46ba682879e5 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_muzzle.py | b303154fa083e4f00aab3ba2e61101f50ea18ee8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py |
#calss header
class _MUZZLE():
def __init__(self,):
self.name = "MUZZLE"
self.definitions = [u'the mouth and nose of an animal, especially a dog, or a covering put over this in order to prevent the animal from biting', u'the end of a gun barrel, where the bullets come out']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
343d00dfa90099304af3a226951dacbb1f31c590 | 2f0c30fda27d1167f5a4850bdf9b5040815a162e | /bin/ext_service/reset_habitica_timestamps.py | 61527988d6e4ef8bfd71c972e4eb8b140849ae22 | [
"BSD-3-Clause"
] | permissive | ankur-gos/e-mission-server | 1117e8154174a953c7df47a1f1aa15c29a2a1819 | 64b098540e331ef2bb41bd9fe7a165ff53cc7a87 | refs/heads/master | 2021-01-01T18:10:26.314393 | 2017-07-26T06:03:56 | 2017-07-26T06:03:56 | 98,269,025 | 0 | 0 | null | 2017-07-25T05:48:37 | 2017-07-25T05:48:37 | null | UTF-8 | Python | false | false | 3,059 | py | """
Script to launch the pipeline reset code.
Options documented in
https://github.com/e-mission/e-mission-server/issues/333#issuecomment-312464984
"""
import logging
import argparse
import uuid
import arrow
import copy
import pymongo
import emission.net.ext_service.habitica.executor as enehe
import emission.core.get_database as edb
def _get_user_list(args):
if args.all:
return _find_all_users()
elif args.platform:
return _find_platform_users(args.platform)
elif args.email_list:
return _email_2_user_list(args.email_list)
else:
assert args.user_list is not None
return [uuid.UUID(u) for u in args.user_list]
def _find_platform_users(platform):
return edb.get_timeseries_db().find({'metadata.platform': platform}).distinct(
'user_id')
def _find_all_users():
return edb.get_timeseries_db().find().distinct('user_id')
def _email_2_user_list(email_list):
return [ecwu.User.fromEmail(e) for e in email_list]
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Reset the habitica pipeline. Does NOT delete points, so to avoid double counting, use only in situations where the original run would not have given any points")
# Options corresponding to
# https://github.com/e-mission/e-mission-server/issues/333#issuecomment-312464984
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-a", "--all", action="store_true", default=False,
help="reset the pipeline for all users")
group.add_argument("-p", "--platform", choices = ['android', 'ios'],
help="reset the pipeline for all on the specified platform")
group.add_argument("-u", "--user_list", nargs='+',
help="user ids to reset the pipeline for")
group.add_argument("-e", "--email_list", nargs='+',
help="email addresses to reset the pipeline for")
parser.add_argument("date",
help="date to reset the pipeline to. Format 'YYYY-mm-dd' e.g. 2016-02-17. Interpreted in UTC, so 2016-02-17 will reset the pipeline to 2016-02-16T16:00:00-08:00 in the pacific time zone")
parser.add_argument("-n", "--dry_run", action="store_true", default=False,
help="do everything except actually perform the operations")
args = parser.parse_args()
print args
print "Resetting timestamps to %s" % args.date
print "WARNING! Any points awarded after that date will be double counted!"
# Handle the first row in the table
day_dt = arrow.get(args.date, "YYYY-MM-DD")
logging.debug("day_dt is %s" % day_dt)
day_ts = day_dt.timestamp
logging.debug("day_ts is %s" % day_ts)
user_list = _get_user_list(args)
logging.info("received list with %s users" % user_list)
logging.info("first few entries are %s" % user_list[0:5])
for user_id in user_list:
logging.info("resetting user %s to ts %s" % (user_id, day_ts))
enehe.reset_all_tasks_to_ts(user_id, day_ts, args.dry_run)
| [
"[email protected]"
] | |
27d8155ee1f3dc72b0330c152644cb7e74f95a4e | b5f6109c3c70faa409bdc83d24e16195249e577a | /transviz.py | d41569720c2debfebdd5797ba0ae71f94695a867 | [] | no_license | afcarl/transviz | 1b079d7a7be35d65016be7fe4fa97c7077bf5630 | 9ed0a7b9945923cc2dd02fac45596b3165bcabe2 | refs/heads/master | 2020-03-20T06:07:23.631534 | 2015-02-27T16:50:06 | 2015-02-27T16:50:06 | 137,239,478 | 1 | 0 | null | 2018-06-13T16:07:04 | 2018-06-13T16:07:04 | null | UTF-8 | Python | false | false | 11,686 | py | from __future__ import division
import numpy as np
import networkx as nx
from collections import defaultdict
import hashlib
import os
import cPickle as pickle
from cStringIO import StringIO
from transvizutil import rgb2hexa, num_args, get_usages, normalize_transmat
# TODO add igraph kk layout
# TODO circo bend through middle?
# TODO node shrinking by adding node copies behind the originals!
# default graphviz attributes
graphdefaults = dict(
dpi='72',
outputorder='edgesfirst',
# bgcolor='transparent',
# splines='true', # segfault? https://github.com/ellson/graphviz/issues/42
)
nodedefaults = dict(
shape='circle',
fillcolor='white',
style='filled',
fixedsize='true',
penwidth=1.3,
)
edgedefaults = dict()
# default arguments to graphviz layout routines
graphviz_layouts = {
'twopi':{},
'gvcolor':{},
'wc':{},
'ccomps':{},
'tred':{},
'sccmap':{},
'fdp':{},
'circo':{},
'neato':{'overlap':'false','sep':'+8'},
'acyclic':{},
'nop':{},
'gvpr':{},
'dot':{},
'sfdp':{},
}
# default arguments to networkx layout routines
networkx_layouts = {
'circular':{'scale':120},
'shell':{'scale':120},
'spring':{'scale':120},
'spectral':{'scale':250},
'fruchterman_reingold':{'scale':120},
}
# converters from my attribute formats to graphviz formats
def color_converter(rgba):
if not isinstance(rgba[0],(list,tuple)):
return rgb2hexa(rgba)
else:
return ':'.join(rgb2hexa(_) for _ in rgba)
converters = defaultdict(
lambda: str,
{
'pos': lambda xy: '%f,%f!' % xy,
'color': color_converter,
'fillcolor': color_converter,
'weight': lambda x: x,
}
)
def convert(dct):
ret = {}
for attr, val in dct.items():
try:
ret[attr] = converters[attr](val)
except:
ret[attr] = val
return ret
class TransGraph(nx.DiGraph):
def __init__(self,A,Nmax=None,edge_threshold=0.):
self.A = normalize_transmat(A)
self.usages = get_usages(A,normalized=True)
# initialize as an nx.DiGraph
if Nmax is None:
super(TransGraph,self).__init__(self.A)
else:
super(TransGraph,self).__init__()
most_used = np.argsort(self.usages)[::-1][:Nmax]
for label in most_used:
self.add_node(label)
for (i,j), val in np.ndenumerate(self.A):
if i in most_used and j in most_used:
if val > edge_threshold:
self.add_edge(i,j,weight=val)
# set defaults
self.graph['graph'] = graphdefaults
self.graph['node'] = nodedefaults
self.graph['edge'] = edgedefaults
def graph_attrs(self,**kwargs):
self.graph['graph'].update(convert(kwargs))
return self
def node_attrs(self,func):
nargs = num_args(func)
if nargs == 1:
for i, node in self.nodes_iter(data=True):
node.update(convert(func(i)))
elif nargs == 2:
for i, node in self.nodes_iter(data=True):
node.update(convert(func(i,self.usages[i])))
else:
raise ValueError('func must take 1 or 2 arguments')
return self
def edge_attrs(self,func):
nargs = num_args(func)
if nargs == 1:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func((i,j))))
elif nargs == 2:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func(i,j)))
elif nargs == 3:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func(i,j,self.A[i,j])))
else:
raise ValueError('func must take 1, 2, or 3 arguments')
return self
@staticmethod
def get_cachename(algname,weights):
return algname + hashlib.sha1(np.array(weights)).hexdigest()[:6]
def layout(self,algname=None,posdict=None,**kwargs):
assert (algname is not None) ^ (posdict is not None), \
'must pass algname or posdict'
if posdict is None:
cachename = self.get_cachename(
algname, [self.edge[i][j]['weight'] for (i,j) in self.edges()])
if os.path.isfile(cachename):
with open(cachename,'r') as infile:
posdict = pickle.load(infile)
else:
if algname in graphviz_layouts:
self.graph['graph'].update(dict(graphviz_layouts[algname],**kwargs))
posdict = nx.graphviz_layout(self,algname)
elif algname in networkx_layouts:
func = nx.__dict__[algname+'_layout']
kwargs = dict(networkx_layouts[algname],**kwargs)
kwargs['scale'] *= np.sqrt(self.order())
posdict = func(self,**kwargs)
else:
raise ValueError(
'algname must be one of %s' %
(graphviz_layouts.keys() + networkx_layouts.keys()))
with open(cachename,'w') as outfile:
pickle.dump(posdict,outfile,protocol=-1)
self.node_attrs(lambda i: {'pos':posdict[i]})
self.posdict = posdict
self.has_layout = True
return self
def draw(self,outfile=None,matplotlib=True,notebook=False):
agraph = nx.to_agraph(self)
agraph.has_layout = self.has_layout
if outfile is None:
pngstr = self._get_agraph_pngstr(agraph)
if matplotlib and not notebook:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
plt.imshow(mpimg.imread(pngstr),aspect='equal')
plt.axis('off')
if notebook:
from IPython.display import Image, display
display(Image(data=pngstr))
else:
agraph.draw(outfile)
@staticmethod
def _get_agraph_pngstr(agraph):
sio = StringIO()
agraph.draw(sio,format='png')
ret = sio.getvalue()
sio.close()
return ret
def prune_edges(self,func):
nargs = num_args(func)
if nargs == 1:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func((i,j))]
elif nargs == 2:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func(i,j)]
elif nargs == 3:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func(i,j,self.A[i,j])]
else:
raise ValueError('func must take 1, 2, or 3 arguments')
for e in to_remove:
self.remove_edge(*e)
return self
### convenience
def highlight(self,node,
incolor=(0.21568627450980393, 0.47058823529411764, 0.7490196078431373),
outcolor=(0.996078431372549, 0.7019607843137254, 0.03137254901960784)):
self.node_attrs(
lambda i: {'color': (0.,0.,0.,1.0 if i == node else 0.05)})\
.edge_attrs(
lambda i,j,aij:
{'color': (incolor if j == node else outcolor)
+ (aij if i == node or j == node else 0.0,)})
return self
class TransDiff(TransGraph):
def __init__(self,(A,B),Nmax=None,edge_threshold=0.):
self.A = normalize_transmat(A)
self.B = normalize_transmat(B)
self.A_usages = get_usages(A,normalized=True)
self.B_usages = get_usages(B,normalized=True)
self.has_foreground_nodes = False
# initialize as an nx.DiGraph
if Nmax is None:
nx.DiGraph.__init__(self,self.A+self.B)
else:
nx.DiGraph.__init__(self)
most_used = np.argsort(self.A_usages + self.B_usages)[::-1][:Nmax]
for label in most_used:
self.add_node(label)
for (i,j), val in np.ndenumerate(self.A+self.B):
if i in most_used and j in most_used:
if val > edge_threshold:
self.add_edge(i,j,weight=val)
# set defaults
self.graph['graph'] = graphdefaults
self.graph['node'] = nodedefaults
self.graph['edge'] = edgedefaults
def edge_attrs(self,func):
nargs = num_args(func)
if nargs == 1:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func((i,j))))
elif nargs == 2:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func(i,j)))
elif nargs == 4:
for i, j, edge in self.edges_iter(data=True):
edge.update(convert(func(i,j,self.A[i,j],self.B[i,j])))
else:
raise ValueError('func must take 1, 2, or 4 arguments')
return self
def node_attrs(self,func):
nargs = num_args(func)
if nargs == 1:
for i, node in self.nodes_iter(data=True):
if 'foregroundnode' not in node:
node.update(convert(func(i)))
elif nargs == 3:
for i, node in self.nodes_iter(data=True):
if 'foregroundnode' not in node:
node.update(convert(func(i,self.A_usages[i],self.B_usages[i])))
else:
raise ValueError('func must take 1 or 3 arguments')
return self
def prune_edges(self,func):
nargs = num_args(func)
if nargs == 1:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func((i,j))]
elif nargs == 2:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func(i,j)]
elif nargs == 4:
to_remove = \
[(i,j) for i, j, edge in self.edges_iter(data=True)
if func(i,j,self.A[i,j],self.B[i,j])]
else:
raise ValueError('func must take 1, 2, or 4 arguments')
for e in to_remove:
self.remove_edge(*e)
return self
def foreground_node_attrs(self,func):
if not self.has_foreground_nodes:
for i, node in self.nodes_iter(data=True):
self.add_node("z%d" % i, dict(foregroundnode=True,label=i,**node))
self.has_foreground_nodes = True
nargs = num_args(func)
if nargs == 1:
for i, node in self.nodes_iter(data=True):
if 'foregroundnode' in node:
i = int(i[1:])
node.update(convert(func(i)))
elif nargs == 3:
for i, node in self.nodes_iter(data=True):
if 'foregroundnode' in node:
i = int(i[1:])
node.update(convert(func(i,self.A_usages[i],self.B_usages[i])))
else:
raise ValueError('func must take 1 or 3 arguments')
return self
# TODO change the ordering in the dot file?
# def layout(self,algname=None,posdict=None,**kwargs):
# super(TransDiff,self).layout(algname=algname,posdict=posdict,**kwargs)
# if self.has_background_nodes:
# # TODO
# raise NotImplementedError('call layout before adding bgnd nodes')
# def draw(self,outfile=None,matplotlib=True,notebook=False):
# # TODO put background nodes at the start of the file so they are drawn
# # first
# raise NotImplementedError
| [
"[email protected]"
] | |
0ea68ccacf4032b775a574b37eb328f4f7cf5840 | 92f6e90d9b13930abde894ef6bdb521e1ae2b7be | /Incomplete/painting_wall.py | a4ee9ea29433dc55ca21fad11b0f75f1f18353bc | [
"MIT"
] | permissive | nptit/Check_iO | f32b68b66c7dbd47e1490aa8db0e3f4bf29716e5 | 9107241291e6f6e397c3756497e74eece782f1e4 | refs/heads/master | 2021-01-25T06:55:09.459265 | 2016-03-23T06:50:12 | 2016-03-23T06:50:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,006 | py | def checkio(required, operations):
total = set()
hmm = list()
highest = 0
lowest = 0
missing = list()
for index, op in enumerate(operations, start=1):
start, stop = op
if not hmm: # no need to check multiple tuples for first
if (stop + 1) - start >= required:
return index
hmm.append(op)
lowest = start
highest = stop + 1
# continue # do i need this? skip because of else??
else: # multiple tuples
if start < lowest:
lowest = start
if stop > highest:
highest = stop
for pair in hmm:
lo, hi = pair
if start > hi:
missing.append((hi+1, start-1))
print(index, missing)
# # print(list(range(start, stop+1)))
# # print(set(range(start, stop+1)))
# total = total.union(set(range(start, stop+1)))
# # print(total)
# if len(total) >= required:
# return index
# # print()
return -1
# print(checkio(5, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 1
print(checkio(6, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 2
print(checkio(11, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 3
# print(checkio(16, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 4
# print(checkio(21, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == -1
# print(checkio(1000000011,[[1, 1000000000],[11, 1000000010]])) # == -1
# if __name__ == '__main__':
# assert checkio(5, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 1, "1st"
# assert checkio(6, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 2, "2nd"
# assert checkio(11, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 3, "3rd"
# assert checkio(16, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 4, "4th"
# assert checkio(21, [[1, 5], [11, 15], [2, 14], [21, 25]]) == -1, "not enough"
# assert checkio(1000000011, [[1, 1000000000], [11, 1000000010]]) == -1, "large"
| [
"[email protected]"
] | |
724c7161d9b64a1c4b3e72ac685f5c01764c2ea1 | 0e5291f09c5117504447cc8df683ca1506b70560 | /netbox_client/models/vrf.py | 10a6db5a173cc52661c33c7d5ebf76baaa1fdcb9 | [
"MIT"
] | permissive | nrfta/python-netbox-client | abd0192b79aab912325485bf4e17777a21953c9b | 68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8 | refs/heads/master | 2022-11-13T16:29:02.264187 | 2020-07-05T18:06:42 | 2020-07-05T18:06:42 | 277,121,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,725 | py | # coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class VRF(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'name': 'str',
'rd': 'str',
'tenant': 'NestedTenant',
'enforce_unique': 'bool',
'description': 'str',
'tags': 'list[str]',
'display_name': 'str',
'custom_fields': 'object',
'created': 'date',
'last_updated': 'datetime',
'ipaddress_count': 'int',
'prefix_count': 'int'
}
attribute_map = {
'id': 'id',
'name': 'name',
'rd': 'rd',
'tenant': 'tenant',
'enforce_unique': 'enforce_unique',
'description': 'description',
'tags': 'tags',
'display_name': 'display_name',
'custom_fields': 'custom_fields',
'created': 'created',
'last_updated': 'last_updated',
'ipaddress_count': 'ipaddress_count',
'prefix_count': 'prefix_count'
}
def __init__(self, id=None, name=None, rd=None, tenant=None, enforce_unique=None, description=None, tags=None, display_name=None, custom_fields=None, created=None, last_updated=None, ipaddress_count=None, prefix_count=None): # noqa: E501
"""VRF - a model defined in Swagger""" # noqa: E501
self._id = None
self._name = None
self._rd = None
self._tenant = None
self._enforce_unique = None
self._description = None
self._tags = None
self._display_name = None
self._custom_fields = None
self._created = None
self._last_updated = None
self._ipaddress_count = None
self._prefix_count = None
self.discriminator = None
if id is not None:
self.id = id
self.name = name
if rd is not None:
self.rd = rd
if tenant is not None:
self.tenant = tenant
if enforce_unique is not None:
self.enforce_unique = enforce_unique
if description is not None:
self.description = description
if tags is not None:
self.tags = tags
if display_name is not None:
self.display_name = display_name
if custom_fields is not None:
self.custom_fields = custom_fields
if created is not None:
self.created = created
if last_updated is not None:
self.last_updated = last_updated
if ipaddress_count is not None:
self.ipaddress_count = ipaddress_count
if prefix_count is not None:
self.prefix_count = prefix_count
@property
def id(self):
"""Gets the id of this VRF. # noqa: E501
:return: The id of this VRF. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VRF.
:param id: The id of this VRF. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this VRF. # noqa: E501
:return: The name of this VRF. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VRF.
:param name: The name of this VRF. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 50:
raise ValueError("Invalid value for `name`, length must be less than or equal to `50`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def rd(self):
"""Gets the rd of this VRF. # noqa: E501
Unique route distinguisher (as defined in RFC 4364) # noqa: E501
:return: The rd of this VRF. # noqa: E501
:rtype: str
"""
return self._rd
@rd.setter
def rd(self, rd):
"""Sets the rd of this VRF.
Unique route distinguisher (as defined in RFC 4364) # noqa: E501
:param rd: The rd of this VRF. # noqa: E501
:type: str
"""
if rd is not None and len(rd) > 21:
raise ValueError("Invalid value for `rd`, length must be less than or equal to `21`") # noqa: E501
self._rd = rd
@property
def tenant(self):
"""Gets the tenant of this VRF. # noqa: E501
:return: The tenant of this VRF. # noqa: E501
:rtype: NestedTenant
"""
return self._tenant
@tenant.setter
def tenant(self, tenant):
"""Sets the tenant of this VRF.
:param tenant: The tenant of this VRF. # noqa: E501
:type: NestedTenant
"""
self._tenant = tenant
@property
def enforce_unique(self):
"""Gets the enforce_unique of this VRF. # noqa: E501
Prevent duplicate prefixes/IP addresses within this VRF # noqa: E501
:return: The enforce_unique of this VRF. # noqa: E501
:rtype: bool
"""
return self._enforce_unique
@enforce_unique.setter
def enforce_unique(self, enforce_unique):
"""Sets the enforce_unique of this VRF.
Prevent duplicate prefixes/IP addresses within this VRF # noqa: E501
:param enforce_unique: The enforce_unique of this VRF. # noqa: E501
:type: bool
"""
self._enforce_unique = enforce_unique
@property
def description(self):
"""Gets the description of this VRF. # noqa: E501
:return: The description of this VRF. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this VRF.
:param description: The description of this VRF. # noqa: E501
:type: str
"""
if description is not None and len(description) > 200:
raise ValueError("Invalid value for `description`, length must be less than or equal to `200`") # noqa: E501
self._description = description
@property
def tags(self):
"""Gets the tags of this VRF. # noqa: E501
:return: The tags of this VRF. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this VRF.
:param tags: The tags of this VRF. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def display_name(self):
"""Gets the display_name of this VRF. # noqa: E501
:return: The display_name of this VRF. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this VRF.
:param display_name: The display_name of this VRF. # noqa: E501
:type: str
"""
self._display_name = display_name
@property
def custom_fields(self):
"""Gets the custom_fields of this VRF. # noqa: E501
:return: The custom_fields of this VRF. # noqa: E501
:rtype: object
"""
return self._custom_fields
@custom_fields.setter
def custom_fields(self, custom_fields):
"""Sets the custom_fields of this VRF.
:param custom_fields: The custom_fields of this VRF. # noqa: E501
:type: object
"""
self._custom_fields = custom_fields
@property
def created(self):
"""Gets the created of this VRF. # noqa: E501
:return: The created of this VRF. # noqa: E501
:rtype: date
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this VRF.
:param created: The created of this VRF. # noqa: E501
:type: date
"""
self._created = created
@property
def last_updated(self):
"""Gets the last_updated of this VRF. # noqa: E501
:return: The last_updated of this VRF. # noqa: E501
:rtype: datetime
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""Sets the last_updated of this VRF.
:param last_updated: The last_updated of this VRF. # noqa: E501
:type: datetime
"""
self._last_updated = last_updated
@property
def ipaddress_count(self):
"""Gets the ipaddress_count of this VRF. # noqa: E501
:return: The ipaddress_count of this VRF. # noqa: E501
:rtype: int
"""
return self._ipaddress_count
@ipaddress_count.setter
def ipaddress_count(self, ipaddress_count):
"""Sets the ipaddress_count of this VRF.
:param ipaddress_count: The ipaddress_count of this VRF. # noqa: E501
:type: int
"""
self._ipaddress_count = ipaddress_count
@property
def prefix_count(self):
"""Gets the prefix_count of this VRF. # noqa: E501
:return: The prefix_count of this VRF. # noqa: E501
:rtype: int
"""
return self._prefix_count
@prefix_count.setter
def prefix_count(self, prefix_count):
"""Sets the prefix_count of this VRF.
:param prefix_count: The prefix_count of this VRF. # noqa: E501
:type: int
"""
self._prefix_count = prefix_count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(VRF, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VRF):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
427afb80eebd9b9cb737cb4e73af8fc35c1f04a7 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-3/5c1a9140029525293d87eb87bd8c0260b87feb6c-<main>-bug.py | 91285b80db8036ad3d338654c9bec1ab24026dcf | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,785 | py | def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(name=dict(required=True, type='str'), load_balancers=dict(type='list'), target_group_arns=dict(type='list'), availability_zones=dict(type='list'), launch_config_name=dict(type='str'), min_size=dict(type='int'), max_size=dict(type='int'), placement_group=dict(type='str'), desired_capacity=dict(type='int'), vpc_zone_identifier=dict(type='list'), replace_batch_size=dict(type='int', default=1), replace_all_instances=dict(type='bool', default=False), replace_instances=dict(type='list', default=[]), lc_check=dict(type='bool', default=True), wait_timeout=dict(type='int', default=300), state=dict(default='present', choices=['present', 'absent']), tags=dict(type='list', default=[]), health_check_period=dict(type='int', default=300), health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), default_cooldown=dict(type='int', default=300), wait_for_instances=dict(type='bool', default=True), termination_policies=dict(type='list', default='Default'), notification_topic=dict(type='str', default=None), notification_types=dict(type='list', default=['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']), suspend_processes=dict(type='list', default=[])))
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['replace_all_instances', 'replace_instances']])
if (not HAS_BOTO3):
module.fail_json(msg='boto3 required for this module')
state = module.params.get('state')
replace_instances = module.params.get('replace_instances')
replace_all_instances = module.params.get('replace_all_instances')
(region, ec2_url, aws_connect_params) = get_aws_connection_info(module, boto3=True)
try:
connection = boto3_conn(module, conn_type='client', resource='autoscaling', region=region, endpoint=ec2_url, **aws_connect_params)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg="Can't authorize connection. Check your credentials and profile.", exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.message))
changed = create_changed = replace_changed = False
if (state == 'present'):
(create_changed, asg_properties) = create_autoscaling_group(connection, module)
elif (state == 'absent'):
changed = delete_autoscaling_group(connection, module)
module.exit_json(changed=changed)
if (replace_all_instances or replace_instances):
(replace_changed, asg_properties) = replace(connection, module)
if (create_changed or replace_changed):
changed = True
module.exit_json(changed=changed, **asg_properties) | [
"[email protected]"
] | |
d04ec0b345457bafcc12d9ac2a1faa572fff37e1 | 8f395b474f01b91e3c7a5a6260e84ed12cc57586 | /utilities/infer_implementations.py | 912c718c6b9380daa608b5ec7fa12ca9516a006d | [] | no_license | actixn/W-Net | 2263fbaa9c720af46f08d14f84b590e0ae455856 | adaf5c304d1359ac5c06e98d9cfd1b9c091e5708 | refs/heads/master | 2022-11-25T17:51:54.579895 | 2020-08-08T13:27:09 | 2020-08-08T13:27:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,750 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import cv2
import utilities.charcut as cc
import sys
# reload(sys)
# sys.setdefaultencoding("utf-8")
GRAYSCALE_AVG = 127.5
print_separater = "#################################################################"
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import matplotlib.pyplot as plt
import pylab
from utilities.utils import image_show
import numpy as np
import os
import random as rnd
import scipy.misc as misc
import copy as cp
def get_chars(path):
chars = list()
with open(path) as f:
for line in f:
line = u"%s" % line
char_counter = 0
for char in line:
current_char = line[char_counter]
chars.append(current_char)
char_counter += 1
return chars
def get_chars_set_from_level1_2(path,level):
"""
Expect a text file that each line is a char
"""
chars = list()
character_id_1_counter=0
character_id_2_counter=0
character_list=list()
with open(path) as f:
for line in f:
line = u"%s" % line
char_counter=0
for char in line:
current_char = line[char_counter]
chars.append(current_char)
if level==1:
character_id_1 = str(character_id_1_counter + 16 + 160)
character_id_2 = str(character_id_2_counter + 1 + 160)
character_id = character_id_1 + character_id_2
character_id_2_counter += 1
if character_id_2_counter == 94:
character_id_2_counter = 0
character_id_1_counter += 1
elif level==2:
character_id_1 = str(character_id_1_counter + 56 + 160)
character_id_2 = str(character_id_2_counter + 1 + 160)
character_id = character_id_1 + character_id_2
character_id_2_counter += 1
if character_id_2_counter == 94:
character_id_2_counter = 0
character_id_1_counter += 1
character_list.append(character_id)
char_counter+=1
return chars,character_list
def get_revelant_data(targeted_input_txt,
level1_charlist,level2_charlist,
level1_labellist,level2_labellist,
file_list_txt,file_data_dir,
img_width,img_filters, info):
def list_all_files(rootdir):
_files = []
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isdir(path):
_files.extend(list_all_files(path))
if os.path.isfile(path):
_files.append(path)
return _files
def read_from_dir():
# get label0 for the targeted char img input txt
targeted_chars_list = list()
targeted_character_label0_list = list()
with open(targeted_input_txt) as f:
for line in f:
line = u"%s" % line
char_counter = 0
for char in line:
current_char = line[char_counter]
char_counter += 1
if not current_char == '\n':
level1_found = current_char in level1_charlist
level2_found = current_char in level2_charlist
if level1_found == 1:
idx = level1_charlist.index(current_char)
character_id = level1_labellist[idx]
elif level2_found == 2:
idx = level2_charlist.index(current_char)
character_id = level2_labellist[idx]
else:
print("Fails! Didnt find %s in Set" % unicode(char))
character_id = 0
return -1, -1, False
targeted_character_label0_list.append(str(character_id))
targeted_chars_list.append(current_char)
actual_char_list = line
print("In total %d targeted chars are found in the standard GB2312 set for %s" % (len(targeted_chars_list), info))
# read all char img data
label0_list = list()
label1_list = list()
data_list = list()
for ii in range(len(file_list_txt)):
file_handle = open(file_list_txt[ii], 'r')
lines = file_handle.readlines()
for line in lines:
curt_line = line.split('@')
label1_list.append(curt_line[2])
label0_list.append(curt_line[1])
curt_data = curt_line[3].split('\n')[0]
if curt_data[0] == '/':
curt_data = curt_data[1:]
curt_data_path = os.path.join(file_data_dir[ii], curt_data)
data_list.append(curt_data_path)
# if 'TmpChars' in curt_data:
# a=1
# print(curt_data)
file_handle.close()
# find corresponding char img data
label1_vec = np.unique(label1_list)
label1_vec.sort()
corresponding_char_img = np.zeros(
shape=[len(targeted_character_label0_list), img_width, img_width, img_filters * len(label1_vec)],
dtype=np.float32)
label1_counter = 0
for label1 in label1_vec:
current_label1_indices = [ii for ii in range(len(label1_list)) if
label1_list[ii] == label1]
current_label0_on_current_label1 = list()
current_data_on_current_label1 = list()
for ii in current_label1_indices:
current_label0_on_current_label1.append(label0_list[ii])
current_data_on_current_label1.append(data_list[ii])
target_counter = 0
for ii in targeted_character_label0_list:
if ii not in current_label0_on_current_label1:
print("Fails! Didnt find %s in Dataset" % actual_char_list[target_counter].encode('utf-8'))
return -1, -1, False
else:
# index_found = current_label0_on_current_label1.index(ii)
indices_found = [kk for kk in range(len(current_label0_on_current_label1)) if current_label0_on_current_label1[kk]==ii]
tmp_counter = 0
for index_curt in indices_found:
if 'TmpChar' in current_data_on_current_label1[index_curt]:
tmp_counter+=1
if not tmp_counter == len(indices_found):
new_found_indices = list()
for index_curt in indices_found:
if not 'TmpChar' in current_data_on_current_label1[index_curt]:
new_found_indices.append(index_curt)
indices_found = new_found_indices
index_found = indices_found[np.random.randint(low=0,high=len(indices_found))]
char_img = misc.imread(current_data_on_current_label1[index_found])
# print("%d %d" % (label1_counter,target_counter))
if char_img.ndim == 3:
char_img = np.expand_dims(char_img[:, :, 0], axis=2)
elif char_img.ndim == 2:
char_img = np.expand_dims(char_img, axis=2)
# char_img = char_img / GRAYSCALE_AVG - 1
corresponding_char_img[target_counter, :, :,
label1_counter * img_filters:(label1_counter + 1) * img_filters] \
= char_img
target_counter += 1
label1_counter += 1
print("In total %d targeted chars are corresponded in the specific dataset for %s" % (len(targeted_chars_list), info))
print(print_separater)
return corresponding_char_img, label1_vec, targeted_chars_list, targeted_character_label0_list
def draw_single_char(ch, font):
canvas_size = 256
x_offset = 20
y_offset = 20
img = Image.new("RGB", (canvas_size, canvas_size), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((x_offset, y_offset), ch, (0, 0, 0), font=font)
img_matrix = np.asarray(img)[:, :, 0]
zero_indices = np.where(img_matrix == 0)
exceed = 'NONE'
if np.min(img_matrix) == np.max(img_matrix) or (0 not in img_matrix):
img_output = np.zeros(shape=[256,256,3])
img_output = Image.fromarray(np.uint8(img_output))
else:
up_p = np.min(zero_indices[0])
down_p = np.max(zero_indices[0])
left_p = np.min(zero_indices[1])
right_p = np.max(zero_indices[1])
up_down = down_p - up_p
right_left = right_p - left_p
if up_down > right_left:
character_size = up_down
if not character_size % 2 == 0:
character_size = character_size + 1
down_p = down_p + 1
right_left_avg = (right_p + left_p) / 2
right_p = right_left_avg + int(character_size / 2)
left_p = right_left_avg - int(character_size / 2)
if left_p < 0:
exceed = 'LEFT'
exceed_pixels = np.abs(left_p)
left_p = 0
if right_p > 255:
exceed = 'RIGHT'
exceed_pixels = right_p - 255
right_p = 255
else:
character_size = right_left
if not character_size % 2 == 0:
character_size = character_size + 1
right_p = right_p + 1
up_down_avg = (up_p + down_p) / 2
down_p = up_down_avg + int(character_size / 2)
up_p = up_down_avg - int(character_size / 2)
if up_p < 0:
exceed = 'UP'
exceed_pixels = np.abs(up_p)
up_p = 0
if down_p > 255:
exceed = 'DOWN'
exceed_pixels = down_p - 255
down_p = 255
img_matrix_cut = img_matrix[up_p:down_p, left_p:right_p]
if not exceed=='NONE':
if exceed=='LEFT':
added_pixels = np.ones([img_matrix_cut.shape[0],exceed_pixels]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=1)
elif exceed=='RIGHT':
added_pixels = np.ones([img_matrix_cut.shape[0], exceed_pixels]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut,added_pixels], axis=1)
elif exceed=='UP':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=0)
elif exceed=='DOWN':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut, added_pixels], axis=0)
img_matrix_cut = np.tile(np.reshape(img_matrix_cut,
[img_matrix_cut.shape[0], img_matrix_cut.shape[1], 1]),
[1, 1, 3])
img_cut = Image.fromarray(np.uint8(img_matrix_cut))
img_resize = img_cut.resize((150, 150), Image.ANTIALIAS)
img_output = Image.new("RGB", (256, 256), (255, 255, 255))
img_output.paste(img_resize, (52, 52))
img_output.resize((64,64), Image.ANTIALIAS)
return img_output.resize((64,64), Image.ANTIALIAS)
def generate_from_single_font_file(files):
targeted_chars_list = list()
targeted_character_label0_list = list()
with open(targeted_input_txt) as f:
for line in f:
line = u"%s" % line
char_counter = 0
for char in line:
current_char = line[char_counter]
char_counter += 1
targeted_character_label0_list.append(str(current_char))
targeted_chars_list.append(current_char)
print("In total %d targeted chars are found." % len(targeted_chars_list))
label1_vec = list()
corresponding_char_img = np.zeros(
shape=[len(targeted_character_label0_list),
img_width, img_width,
img_filters * len(files)],
dtype=np.float32)
for label1_counter in range(len(files)):
current_font_misc = ImageFont.truetype(files[label1_counter], size=150)
current_file_path = files[label1_counter]
current_file_name = os.path.splitext(current_file_path)[0]
current_file_name = current_file_name.split('/')[len(current_file_name.split('/'))-1]
label1_vec.append(current_file_name)
for target_counter in range(len(targeted_chars_list)):
char_misc = draw_single_char(ch=targeted_chars_list[target_counter], font=current_font_misc)
char_img = np.asarray(char_misc)[:, :, 0]
# char_img = char_img / GRAYSCALE_AVG - 1
char_img = np.expand_dims(char_img, axis=2)
if char_img.ndim == 3:
char_img = np.expand_dims(char_img[:, :, 0], axis=2)
elif char_img.ndim == 2:
char_img = np.expand_dims(char_img, axis=2)
corresponding_char_img[target_counter, :, :,
label1_counter * img_filters:(label1_counter + 1) * img_filters] \
= char_img
return corresponding_char_img,label1_vec, targeted_chars_list, targeted_character_label0_list
dir_img = False
for check_dir in file_data_dir:
if os.path.isdir(check_dir):
file_list = list_all_files(check_dir)
if (os.path.isdir(check_dir)) and \
(os.path.splitext(file_list[np.random.randint(0,len(file_list)-1)])[-1]=='.png' or os.path.splitext(file_list[np.random.randint(0,len(file_list)-1)])[-1]=='.jpg'):
dir_img=True
break
single_font_file = False
font_file_dir = False
if not dir_img:
for check_file in file_data_dir:
is_file = os.path.isfile(check_file)
is_dir = os.path.isdir(check_file)
is_ttf = (os.path.splitext(check_file)[-1] == '.ttf') or (os.path.splitext(check_file)[-1] == '.TTF')
is_ttc = (os.path.splitext(check_file)[-1] == '.ttc') or (os.path.splitext(check_file)[-1] == '.TTC')
is_otf = (os.path.splitext(check_file)[-1] == '.otf') or (os.path.splitext(check_file)[-1] == '.OTF')
if is_file and (is_ttf or is_ttc or is_otf):
single_font_file = True
elif is_dir:
file_list_in_the_dir = list_all_files(check_file)
for sub_file in file_list_in_the_dir:
is_ttf = (os.path.splitext(sub_file)[-1] == '.ttf') or (os.path.splitext(sub_file)[-1] == '.TTF')
is_ttc = (os.path.splitext(sub_file)[-1] == '.ttc') or (os.path.splitext(sub_file)[-1] == '.TTC')
is_otf = (os.path.splitext(sub_file)[-1] == '.otf') or (os.path.splitext(sub_file)[-1] == '.OTF')
if not (is_ttf or is_ttc or is_otf):
break
font_file_dir = True
if single_font_file or font_file_dir:
break
if dir_img:
corresponding_char_img, label1_vec, char_list, char_label0_list = read_from_dir()
if single_font_file or font_file_dir:
if font_file_dir:
file_data_dir_new = list()
for file_dir in file_data_dir:
files = list_all_files(file_dir)
file_data_dir_new.extend(files)
file_data_dir = file_data_dir_new
file_data_dir.sort()
corresponding_char_img, label1_vec, char_list, char_label0_list = \
generate_from_single_font_file(files=file_data_dir)
corresponding_char_img[np.where(corresponding_char_img < 240)] = 0
corresponding_char_img[np.where(corresponding_char_img >= 240)] = 255
corresponding_char_img = corresponding_char_img / GRAYSCALE_AVG - 1
return corresponding_char_img, label1_vec, True, char_list, char_label0_list
def get_style_references(img_path, resave_path, style_input_number):
if os.path.isdir(img_path):
style_reference = \
collect_chars_from_directory(img_path, resave_path)
else:
file_extension=os.path.splitext(img_path)[1]
if file_extension=='.ttf':
style_reference = \
generated_from_ttf_otf_files(img_path, resave_path)
else:
style_reference = \
crop_from_full_handwriting_essay_paper(img_path, resave_path)
if (not style_input_number == 0) and style_input_number<style_reference.shape[2]:
rnd_indices=rnd.sample(range(style_reference.shape[2]),style_input_number)
rnd_counter=0
for ii in rnd_indices:
current_style_ref=np.expand_dims(style_reference[:,:,ii],axis=2)
if rnd_counter == 0:
new_style_reference=current_style_ref
else:
new_style_reference=np.concatenate([new_style_reference,current_style_ref],axis=2)
rnd_counter+=1
style_reference=new_style_reference
style_reference = np.expand_dims(style_reference, axis=0)
print("Selected %d style references for generation" % style_reference.shape[3])
print(print_separater)
return style_reference
def collect_chars_from_directory(img_path, resave_path):
counter=0
for root, dirs, files in os.walk(img_path):
files.sort()
for name in files:
if not ((name.find("DS") == -1) and (name.find("Th") == -1)):
continue
file_path = (os.path.join(root, name))
file_extension = os.path.splitext(file_path)[1]
if file_extension=='.png':
char_read=misc.imread(os.path.join(root,name))
char_read=char_read[:,:,0]
char_read = char_read / GRAYSCALE_AVG - 1
char_read = np.expand_dims(char_read, axis=2)
if counter == 0:
style_reference = char_read
else:
style_reference = np.concatenate([style_reference, char_read], axis=2)
counter+=1
style_num = style_reference.shape[2]
row_col_num = np.int64(np.ceil(np.sqrt(style_num)))
resave_paper = matrix_paper_generation(images=np.expand_dims(np.transpose(style_reference,[2,0,1]),axis=3),
rows=row_col_num,columns=row_col_num)
misc.imsave(os.path.join(resave_path, 'InputStyleImg.png'), resave_paper)
return style_reference
def generated_from_ttf_otf_files(img_path, resave_path):
def draw_single_char(ch, font):
canvas_size = 256
x_offset = 20
y_offset = 20
img = Image.new("RGB", (canvas_size, canvas_size), (255, 255, 255))
draw = ImageDraw.Draw(img)
draw.text((x_offset, y_offset), ch, (0, 0, 0), font=font)
img_matrix = np.asarray(img)[:, :, 0]
zero_indices = np.where(img_matrix == 0)
exceed = 'NONE'
if np.min(img_matrix) == np.max(img_matrix) or (0 not in img_matrix):
img_output = np.zeros(shape=[256,256,3])
img_output = Image.fromarray(np.uint8(img_output))
else:
up_p = np.min(zero_indices[0])
down_p = np.max(zero_indices[0])
left_p = np.min(zero_indices[1])
right_p = np.max(zero_indices[1])
up_down = down_p - up_p
right_left = right_p - left_p
if up_down > right_left:
character_size = up_down
if not character_size % 2 == 0:
character_size = character_size + 1
down_p = down_p + 1
right_left_avg = (right_p + left_p) / 2
right_p = right_left_avg + int(character_size / 2)
left_p = right_left_avg - int(character_size / 2)
if left_p < 0:
exceed = 'LEFT'
exceed_pixels = np.abs(left_p)
left_p = 0
if right_p > 255:
exceed = 'RIGHT'
exceed_pixels = right_p - 255
right_p = 255
else:
character_size = right_left
if not character_size % 2 == 0:
character_size = character_size + 1
right_p = right_p + 1
up_down_avg = (up_p + down_p) / 2
down_p = up_down_avg + int(character_size / 2)
up_p = up_down_avg - int(character_size / 2)
if up_p < 0:
exceed = 'UP'
exceed_pixels = np.abs(up_p)
up_p = 0
if down_p > 255:
exceed = 'DOWN'
exceed_pixels = down_p - 255
down_p = 255
img_matrix_cut = img_matrix[up_p:down_p, left_p:right_p]
if not exceed=='NONE':
if exceed=='LEFT':
added_pixels = np.ones([img_matrix_cut.shape[0],exceed_pixels]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=1)
elif exceed=='RIGHT':
added_pixels = np.ones([img_matrix_cut.shape[0], exceed_pixels]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut,added_pixels], axis=1)
elif exceed=='UP':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=0)
elif exceed=='DOWN':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut, added_pixels], axis=0)
img_matrix_cut = np.tile(np.reshape(img_matrix_cut,
[img_matrix_cut.shape[0], img_matrix_cut.shape[1], 1]),
[1, 1, 3])
img_cut = Image.fromarray(np.uint8(img_matrix_cut))
img_resize = img_cut.resize((150, 150), Image.ANTIALIAS)
img_output = Image.new("RGB", (256, 256), (255, 255, 255))
img_output.paste(img_resize, (52, 52))
img_output.resize((64,64), Image.ANTIALIAS)
return img_output.resize((64,64), Image.ANTIALIAS)
sample_char_set = get_chars(path='../ContentTxt/SampleChars.txt')
sample_font = ImageFont.truetype(img_path, size=150)
counter=0
for current_char in sample_char_set:
char_misc = draw_single_char(ch=current_char,font=sample_font)
char_np = np.asarray(char_misc)[:, :, 0]
char_np = char_np / GRAYSCALE_AVG - 1
char_np = np.expand_dims(char_np,axis=2)
if counter==0:
style_reference = char_np
else:
style_reference = np.concatenate([style_reference,char_np],axis=2)
counter+=1
style_num = style_reference.shape[2]
row_col_num = np.int64(np.ceil(np.sqrt(style_num)))
resave_paper = matrix_paper_generation(images=np.expand_dims(np.transpose(style_reference, [2, 0, 1]), axis=3),
rows=row_col_num, columns=row_col_num)
misc.imsave(os.path.join(resave_path, 'InputStyleImg.png'), resave_paper)
return style_reference
def crop_from_full_handwriting_essay_paper(img_path, resave_path):
img = cv2.imread(img_path)
img_misc = misc.imread(img_path)
misc.imsave(os.path.join(resave_path, 'InputStyleImg.png'), img_misc)
img_new = img
img_new[np.where(img < 150)] = 0
img_new[np.where(img >= 150)] = 255
img = img_new
image_list = cc.char_cut(img, 37, 64)
counter = 0
style_reference = None
for im_split in image_list:
img = np.expand_dims(im_split, axis=2)
img = img / GRAYSCALE_AVG - 1
if counter == 0:
style_reference = img
else:
style_reference = np.concatenate([style_reference, img], axis=2)
counter += 1
print("In total %d style references are extracted from %s" % (style_reference.shape[2],img_path))
return style_reference
def draw_single_char(ch, font, canvas_size, x_offset=20, y_offset=20,filters=-1):
img_read = Image.new("RGB", (256, 256), (255, 255, 255))
draw = ImageDraw.Draw(img_read)
draw.text((x_offset, y_offset), ch, (0, 0, 0), font=font)
img_read = np.array(img_read)
img_matrix = np.asarray(img_read)[:, :, 0]
zero_indices = np.where(img_matrix == 0)
exceed = 'NONE'
if np.min(img_matrix) == np.max(img_matrix) or (0 not in img_matrix):
img_output = np.zeros(shape=[256, 256, 3])
img_output = Image.fromarray(np.uint8(img_output))
else:
up_p = np.min(zero_indices[0])
down_p = np.max(zero_indices[0])
left_p = np.min(zero_indices[1])
right_p = np.max(zero_indices[1])
up_down = down_p - up_p
right_left = right_p - left_p
if up_down > right_left:
character_size = up_down
if not character_size % 2 == 0:
character_size = character_size + 1
down_p = down_p + 1
right_left_avg = (right_p + left_p) / 2
right_p = right_left_avg + int(character_size / 2)
left_p = right_left_avg - int(character_size / 2)
if left_p < 0:
exceed = 'LEFT'
exceed_pixels = np.abs(left_p)
left_p = 0
if right_p > 255:
exceed = 'RIGHT'
exceed_pixels = right_p - 255
right_p = 255
else:
character_size = right_left
if not character_size % 2 == 0:
character_size = character_size + 1
right_p = right_p + 1
up_down_avg = (up_p + down_p) / 2
down_p = up_down_avg + int(character_size / 2)
up_p = up_down_avg - int(character_size / 2)
if up_p < 0:
exceed = 'UP'
exceed_pixels = np.abs(up_p)
up_p = 0
if down_p > 255:
exceed = 'DOWN'
exceed_pixels = down_p - 255
down_p = 255
img_matrix_cut = img_matrix[up_p:down_p, left_p:right_p]
if not exceed == 'NONE':
if exceed == 'LEFT':
added_pixels = np.ones([img_matrix_cut.shape[0], exceed_pixels]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=1)
elif exceed == 'RIGHT':
added_pixels = np.ones([img_matrix_cut.shape[0], exceed_pixels]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut, added_pixels], axis=1)
elif exceed == 'UP':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([added_pixels, img_matrix_cut], axis=0)
elif exceed == 'DOWN':
added_pixels = np.ones([exceed_pixels, img_matrix_cut.shape[1]]) * 255
img_matrix_cut = np.concatenate([img_matrix_cut, added_pixels], axis=0)
img_matrix_cut = np.tile(np.reshape(img_matrix_cut,
[img_matrix_cut.shape[0], img_matrix_cut.shape[1], 1]),
[1, 1, 3])
img_cut = Image.fromarray(np.uint8(img_matrix_cut))
img_resize = img_cut.resize((150, 150), Image.ANTIALIAS)
img_output = Image.new("RGB", (256, 256), (255, 255, 255))
img_output.paste(img_resize, (52, 52))
if not canvas_size == 256:
img_output = img_output.resize((canvas_size,canvas_size), Image.ANTIALIAS)
img = np.array(img_output)
if filters == 1:
img = img[:, :, 0]
img = np.reshape(img, [img.shape[0], img.shape[1], 1])
return img
def find_transfer_targets(data_dir,txt_path,selected_label1,style_input_number,
img_width,filter_num,batch_size):
data_list = list()
for ii in range(len(data_dir)):
file_handle = open(txt_path[ii], 'r')
lines = file_handle.readlines()
for line in lines:
curt_line = line.split('@')
label1 = int(curt_line[2])
if label1 == selected_label1:
curt_data = curt_line[3].split('\n')[0]
if curt_data[0] == '/':
curt_data = curt_data[1:]
curt_data_path = os.path.join(data_dir[ii], curt_data)
data_list.append(curt_data_path)
file_handle.close()
indices = range(len(data_list))
selected_indices = rnd.sample(indices,style_input_number)
data_list = [data_list[i] for i in selected_indices]
full_chars = np.zeros([style_input_number, img_width, img_width, filter_num])
counter=0
for ii in data_list:
char_img = misc.imread(ii)
if filter_num == 1:
char_img = char_img[:, :, 0]
char_img = np.reshape(char_img, [char_img.shape[0], char_img.shape[1], 1])
char_img = np.subtract(np.divide(char_img,
np.ones(char_img.shape) * GRAYSCALE_AVG),
np.ones(char_img.shape))
full_chars[counter, :, :, :] = char_img
counter+=1
iter_num = style_input_number / batch_size + 1
full_batch_num = iter_num * batch_size
added_needed = full_batch_num - full_chars.shape[0]
if added_needed < full_chars.shape[0]:
full_chars = np.concatenate([full_chars, full_chars[0:added_needed, :, :, :]])
else:
for ii in range(added_needed):
full_char_length = full_chars.shape[0]
selected = rnd.sample(range(full_char_length),1)
full_chars = np.concatenate([full_chars, full_chars[selected, :, :, :]])
return full_chars
def transform_numpy_to_paper(numpy_image):
output_paper = Image.new("RGB", (numpy_image.shape[2],
numpy_image.shape[1]),
(255, 255, 255))
numpy_image = np.squeeze(numpy_image)
numpy_image = numpy_image - np.min(numpy_image)
numpy_image = numpy_image / np.max(numpy_image)
numpy_image = numpy_image * 255
numpy_image = np.tile(np.reshape(numpy_image,
[1, numpy_image.shape[0], numpy_image.shape[1], 1]),
[1, 1, 1, 3])
pasted = Image.fromarray(np.uint8(np.squeeze(numpy_image)))
output_paper.paste(pasted,(0,0))
return output_paper
def matrix_paper_generation(images, rows, columns):
char_width=images.shape[1]
chars_per_row = columns
chars_per_column = rows
output_paper = Image.new("RGB", (char_width * chars_per_row,
char_width * chars_per_column),
(255, 255, 255))
column_counter = 1
row_counter = 1
for ii in range(images.shape[0]):
curt_img = np.squeeze(images[ii, :, :, :])
curt_img = curt_img - np.min(curt_img)
curt_img = curt_img / np.max(curt_img)
curt_img = curt_img * 255
curt_img = np.tile(np.reshape(curt_img,
[curt_img.shape[0], curt_img.shape[1], 1]),
[1, 1, 3])
curt_pasted = Image.fromarray(np.uint8(curt_img))
output_paper.paste(curt_pasted, ((column_counter - 1) * char_width,
(row_counter - 1) * char_width))
column_counter += 1
if column_counter > chars_per_row:
column_counter = 1
row_counter += 1
return output_paper
def numpy_img_save(img,path):
imgout=cp.deepcopy(img)
imgout = imgout * 255
imgout = np.tile(np.reshape(imgout,
[imgout.shape[0],
imgout.shape[1], 1]),
[1, 1, 3])
imgout_misc = Image.fromarray(np.uint8(imgout))
misc.imsave(path,imgout_misc)
def one_row_or_column_generation(images,option):
img_num = images.shape[0]
char_width = images.shape[1]
if option=='ROW':
output_paper = Image.new("RGB", (char_width * img_num,
char_width),
(255, 255, 255))
for ii in range(images.shape[0]):
curt_img = np.squeeze(images[ii, :, :, :])
curt_img = curt_img - np.min(curt_img)
curt_img = curt_img / np.max(curt_img)
curt_img = curt_img * 255
curt_img = np.tile(np.reshape(curt_img,
[curt_img.shape[0], curt_img.shape[1], 1]),
[1, 1, 3])
curt_pasted = Image.fromarray(np.uint8(curt_img))
output_paper.paste(curt_pasted,(ii*char_width,0))
elif option=='COLUMN':
output_paper = Image.new("RGB", (char_width ,
char_width * img_num),
(255, 255, 255))
for ii in range(images.shape[0]):
curt_img = np.squeeze(images[ii, :, :, :])
curt_img = curt_img - np.min(curt_img)
curt_img = curt_img / np.max(curt_img)
curt_img = curt_img * 255
curt_img = np.tile(np.reshape(curt_img,
[curt_img.shape[0], curt_img.shape[1], 1]),
[1, 1, 3])
curt_pasted = Image.fromarray(np.uint8(curt_img))
output_paper.paste(curt_pasted, (0,ii * char_width))
return output_paper
| [
"[email protected]"
] | |
6264a9609afe6739ef0895579757c90f0c5ba974 | bd498cbbb28e33370298a84b693f93a3058d3138 | /SIAT/benchmarks/resnet/implementations/mindspore_open_src/model/thor.py | a7798c2b1fb3178a226b6da64031bc33791ff00b | [
"Apache-2.0"
] | permissive | piyushghai/training_results_v0.7 | afb303446e75e3e9789b0f6c40ce330b6b83a70c | e017c9359f66e2d814c6990d1ffa56654a73f5b0 | refs/heads/master | 2022-12-19T16:50:17.372320 | 2020-09-24T01:02:00 | 2020-09-24T18:01:01 | 298,127,245 | 0 | 1 | Apache-2.0 | 2020-09-24T00:27:21 | 2020-09-24T00:27:21 | null | UTF-8 | Python | false | false | 12,850 | py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""momentum"""
import mindspore.common.dtype as mstype
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.parameter import ParameterTuple
from mindspore.common.tensor import Tensor
from mindspore.nn.optim.optimizer import Optimizer
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.parallel._utils import _get_device_num, _get_mirror_mean
from model.grad_reducer_thor import DistributedGradReducerThor
momentum_opt = C.MultitypeFuncGraph("momentum_opt")
@momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor")
def _tensor_run_opt_ext(opt, learning_rate, momentum, gradient, weight, moment):
"""Apply momentum optimizer to the weight parameter using Tensor."""
success = True
success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum))
return success
op_add = P.AddN()
apply_decay = C.MultitypeFuncGraph("apply_decay")
@apply_decay.register("Number", "Bool", "Tensor", "Tensor")
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
return op_add((weight * weight_decay, gradient))
return gradient
class THOR(Optimizer):
"""THOR"""
def __init__(self, params, learning_rate, momentum, matrix_A, matrix_G, A_inv_max, G_inv_max, weight_decay=0.0,
loss_scale=1.0, batch_size=32.0,
decay_filter=lambda x: x.name not in []):
super(THOR, self).__init__(learning_rate, params, weight_decay, loss_scale)
if isinstance(momentum, float) and momentum < 0.0:
raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum))
self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum")
self.params = self.parameters
self.moments = self.params.clone(prefix="moments", init='zeros')
self.hyper_map = C.HyperMap()
self.opt = P.ApplyMomentum()
self.matrix_A = ParameterTuple(matrix_A)
self.matrix_G = ParameterTuple(matrix_G)
self.A_inv_max = ParameterTuple(A_inv_max)
self.G_inv_max = ParameterTuple(G_inv_max)
self.cube_matmul_left = P.CusMatMulCubeFraczLeftCast()
self.cube_matmul_left_fc = P.CusMatMulCubeDenseLeft()
self.cube_matmul_right_fc = P.CusMatMulCubeDenseRight()
self.cube_matmul_right_mul = P.CusMatMulCubeFraczRightMul()
self.transpose = P.Transpose()
self.shape = P.Shape()
self.reshape = P.Reshape()
self.mul = P.Mul()
self.weight_idx = []
for i in range(len(self.params)):
if "conv" in self.params[i].name or "end_point" in self.params[i].name:
self.weight_idx.append(i)
self.weight_idx.append(len(self.params))
self.feature_map = [1.0 / 12544, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136, 1.0 / 3136,
1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784, 1.0 / 784,
1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196, 1.0 / 196,
1.0 / 196, 1.0 / 196, 1.0 / 196,
1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49, 1.0 / 49,
1.0]
mean = _get_mirror_mean()
degree = _get_device_num()
self.grad_reducer_Amax = DistributedGradReducerThor(self.parameters, 2, mean, degree)
self.grad_reducer_Gmax = DistributedGradReducerThor(self.parameters, 5, mean, degree)
self.grad_reducer_A = DistributedGradReducerThor(self.parameters, 3, mean, degree)
self.grad_reducer_G = DistributedGradReducerThor(self.parameters, 4, mean, degree)
self.matrix_A_inv = ()
self.matrix_G_inv = ()
self.matrix_max_inv = ()
for i in range(54):
self.matrix_max_inv = self.matrix_max_inv + (
Parameter(initializer(1, [1], mstype.float32), name="matrix_max" + str(i), requires_grad=False),)
self.log = P.Log()
self.exp = P.Exp()
self.sqrt = P.Sqrt()
self.matrix_max_inv = ParameterTuple(self.matrix_max_inv)
self.assign = P.Assign()
self.cast = P.Cast()
self.thor = True
self.weight_decay = weight_decay * loss_scale
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.conv_index = [
0,
1,2,3,6,7,8,9,12,13,14,
17,18,19,22,23,24,25,28,29,30,33,34,35,
38,39,40,43,44,45,46,49,50,51,54,55,56,59,60,61,64,65,66,
69,70,71,74,75,76,77,80,81,82,
85
]
self.batch_size = batch_size
self.bn_index = [3,7,10,13,17,20,23,26,30,33,36,39,42,45,49,52]
self.bn_gradient_index = [
-1,-1,-1,
4,
-1,-1,-1,
10,
-1,-1,
15,
-1,-1,
20,
-1,-1,-1,
26,
-1,-1,
31,
-1,-1,
36,
-1,-1,
41,
-1,-1,-1,
47,
-1,-1,
52,
-1,-1,
57,
-1,-1,
62,
-1,-1,
67,
-1,-1,
72,
-1,-1,-1,
78,
-1,-1,
83
]
def construct(self, gradients):
params = self.params
moments = self.moments
if self.thor: # 二阶子图处理流程
matrix_A_allreduce = ()
matrix_G_allreduce = ()
matrix_A_max_allreduce = ()
matrix_G_max_allreduce = ()
for i in range(54):
g = gradients[self.conv_index[i]]
matrix_A = self.matrix_A[i]
matrix_G = self.matrix_G[i]
A_max = self.A_inv_max[i]
G_max = self.G_inv_max[i]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
A_max = F.depend(A_max, g)
G_max = F.depend(G_max, g)
matrix_A_allreduce = matrix_A_allreduce + (matrix_A,)
matrix_G_allreduce = matrix_G_allreduce + (matrix_G,)
matrix_A_max_allreduce = matrix_A_max_allreduce + (A_max,)
matrix_G_max_allreduce = matrix_G_max_allreduce + (G_max,)
matrix_A_allreduce = self.grad_reducer_A(matrix_A_allreduce)
matrix_G_allreduce = self.grad_reducer_G(matrix_G_allreduce)
matrix_A_max_allreduce = self.grad_reducer_Amax(matrix_A_max_allreduce)
matrix_G_max_allreduce = self.grad_reducer_Gmax(matrix_G_max_allreduce)
if self.batch_size == 256:
new_grads = (gradients[0], )
start_index = 1
else:
new_grads = ()
start_index = 0
for i in range(start_index, 54):
# g = gradients[i * 3] # 原本梯度排列为weight,gamma,beta
g = gradients[self.conv_index[i]]
temp_a = matrix_A_allreduce[i]
temp_g = matrix_G_allreduce[i]
temp_a = self.cast(temp_a, mstype.float32)
temp_g = self.cast(temp_g, mstype.float32)
matrix_A_inv_max = self.log(matrix_A_max_allreduce[i])
matrix_A_inv_max = self.mul(matrix_A_inv_max, -1)
matrix_A_inv_max = self.exp(matrix_A_inv_max)
temp_a = self.mul(temp_a, matrix_A_inv_max)
matrix_G_inv_max = self.log(matrix_G_max_allreduce[i])
matrix_G_inv_max = self.mul(matrix_G_inv_max, -1)
matrix_G_inv_max = self.exp(matrix_G_inv_max)
temp_g = self.mul(temp_g, matrix_G_inv_max)
temp_max = self.mul(matrix_A_max_allreduce[i], matrix_G_max_allreduce[i])
temp_max = self.mul(temp_max, self.feature_map[i])
if i == 53: # 区分fc和卷积算子
g = self.cube_matmul_left_fc(temp_g, g)
g = self.cube_matmul_right_fc(g, temp_a, temp_max)
else:
g = self.cube_matmul_left(temp_g, g)
g = self.cube_matmul_right_mul(g, temp_a, temp_max)
# 计算得到的二阶信息矩阵赋值为parameter,给一阶用
fake_A = self.assign(self.matrix_A[i], temp_a)
fake_G = self.assign(self.matrix_G[i], temp_g)
fake_max = self.assign(self.matrix_max_inv[i], temp_max)
# 图上加个边
g = F.depend(g, fake_A)
g = F.depend(g, fake_G)
g = F.depend(g, fake_max)
# if i == 53: # 梯度放到tuple中,后面给momentum用来更新权重
# new_grads = new_grads + (g,)
# else:
# new_grads = new_grads + (g, gradients[i * 3 + 1], gradients[i * 3 + 2])
# if i in self.bn_index: #beta, gamma下标再算一下
if i == 3 or i == 7 or i == 10 or i == 13 or i == 17 or i == 20 or i == 23 or i == 26 or i == 30 or i == 33 or i == 36 or i == 39 or i == 42 or i == 45 or i == 49 or i == 52:
new_grads = new_grads + (g, gradients[self.bn_gradient_index[i]], gradients[self.bn_gradient_index[i]+1])
elif i == 53:
new_grads = new_grads + (g, gradients[86])
else:
new_grads = new_grads + (g,)
#gradients = new_grads + gradients[85]
gradients = new_grads
else: # 一阶子图处理流程
if self.batch_size == 256:
new_grads = (gradients[0], )
start_index = 1
else:
new_grads = ()
start_index = 0
for i in range(start_index, 54):
# g = gradients[i * 3]
g = gradients[self.conv_index[i]]
matrix_A = self.matrix_A[i]
matrix_G = self.matrix_G[i]
matrix_max = self.matrix_max_inv[i]
matrix_A = F.depend(matrix_A, g)
matrix_G = F.depend(matrix_G, g)
matrix_max = F.depend(matrix_max, g)
if i == 53:
g = self.cube_matmul_left_fc(matrix_G, g)
g = self.cube_matmul_right_fc(g, matrix_A, matrix_max)
# new_grads = new_grads + (g,)
else:
g = self.cube_matmul_left(matrix_G, g)
g = self.cube_matmul_right_mul(g, matrix_A, matrix_max)
# new_grads = new_grads + (g, gradients[i * 3 + 1], gradients[i * 3 + 2])
# if i in self.bn_index: #beta, gamma下标再算一下
if i == 3 or i == 7 or i == 10 or i == 13 or i == 17 or i == 20 or i == 23 or i == 26 or i == 30 or i == 33 or i == 36 or i == 39 or i == 42 or i == 45 or i == 49 or i == 52:
new_grads = new_grads + (g, gradients[self.bn_gradient_index[i]], gradients[self.bn_gradient_index[i]+1])
elif i == 53:
new_grads = new_grads + (g, gradients[86])
else:
new_grads = new_grads + (g,)
gradients = new_grads
if self.weight_decay > 0:
gradients = self.hyper_map(F.partial(apply_decay, self.weight_decay), self.decay_flags,
params, gradients)
gradients = self.scale_grad(gradients)
lr = self.get_lr()
success = self.hyper_map(F.partial(momentum_opt, self.opt, lr, self.momentum), gradients, params, moments)
return success
| [
"[email protected]"
] | |
3821efe47b843b6c0e67ea56bd904c71cae7edbe | 3307766701d680af6d12a726a2d98df2cb1830e5 | /jams/gcj/2017/1C/C/C.py | 52f7e3992946510e28a1d7dbac618bac1426e0bb | [] | no_license | dpaneda/code | c1a54037a275fa7044eb5c2d6079f052dd968615 | 7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff | refs/heads/master | 2023-01-07T18:41:00.816363 | 2022-12-30T09:24:22 | 2022-12-30T09:24:22 | 1,583,913 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,126 | py | #!/usr/bin/python2
import sys
def equals(P):
for i in xrange(0, len(P)):
if (P[0] - P[i]) > 0.00000001:
return False
return True
def diff(P):
a = P[0]
n = 1
for i in xrange(1, len(P)):
if P[i] == a:
n += 1
else:
return n, P[i] - a
def solve():
N, K = map(int, raw_input().split())
U = float(raw_input())
P = map(float, raw_input().split())
P.sort()
while U > 0:
if N == 1:
P[0] += U
break
if equals(P):
u = U / len(P)
for i in xrange(0, len(P)):
P[i] += u
break
n, u = diff(P)
if (u * n) < U:
for i in xrange(0, n):
P[0] += u
U -= u * n
P.sort()
print P
else:
for i in xrange(0, n):
P[i] += U / n
break
p = 1
for i in xrange(0, len(P)):
p *= P[i]
return str(p)
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print("Case #{0}: {1}".format(case, solve()))
| [
"[email protected]"
] | |
7985ceb35a1900004f926901a654243dccd6e223 | e85f4714cf2b590d21582ebd567208da1b9132fc | /tests/test_pakit_tests.py | a24369e54dcc6a0174d05d577836e2b3b1380841 | [
"BSD-3-Clause"
] | permissive | pakit/pakit_tests | 1fcc6c6974a297d1931b704a93d4580ed1eecd90 | 078203f31d56b9701781008bc90668a5a5b292ba | refs/heads/master | 2020-04-15T15:58:09.280612 | 2016-01-02T04:02:07 | 2016-01-02T04:02:07 | 42,521,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,612 | py | """
Test pakit_tests
"""
from __future__ import absolute_import, print_function
import os
import tempfile
import mock
import pytest
from pakit_tests import (
create_args_parser, extract_repo_names, extract_repo_block, main,
scan_recipes, format_lines, write_file, TEMPLATE
)
import tests.common as tc
def test_parse_recipes_root():
root = os.path.abspath('recipes')
args = create_args_parser().parse_args([root])
assert args.recipes_root == root
assert args.output == os.path.join('tests', 'test_recipes.py')
def test_parse_output():
root = os.path.abspath('recipes')
argv = '{0} {1}'.format(root, 'test_recs.py').split()
args = create_args_parser().parse_args(argv)
assert args.recipes_root == root
assert args.output == 'test_recs.py'
def test_extract_repo_names():
text = """self.repos = {
'stable': Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}"""
assert extract_repo_names(text) == ['stable', 'unstable']
def test_extract_repo_block():
text = """class Ag(Recipe):
\"\"\"
Grep like tool optimized for speed
\"\"\"
def __init__(self):
super(Ag, self).__init__()
self.src = 'https://github.com/ggreer/the_silver_searcher.git'
self.homepage = self.src
self.repos = {
"stable": Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}
def build(self):
self.cmd('./build.sh --prefix {prefix}')
self.cmd('make install')"""
expect = """self.repos = {
"stable": Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}"""
assert extract_repo_block(text) == expect
def test_scan_recipes():
data = scan_recipes(tc.RECIPES)
assert 'ag' in data
assert sorted(data['ag']) == ['stable', 'unstable']
def test_format_lines():
data = {
'ag': ['stable', 'unstable'],
'ack': ['stable'],
}
lines = format_lines(data)
expect = """\nclass Test_ack(RecipeTest):
def test_stable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0
\nclass Test_ag(RecipeTest):
def test_stable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0
def test_unstable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0"""
assert '\n'.join(lines) == expect
def test_write_file():
try:
test_file = tempfile.NamedTemporaryFile()
write_file(tc.RECIPES, test_file.name)
with open(test_file.name, 'r') as fin:
assert TEMPLATE.replace('ROOT_RECS', tc.RECIPES) in fin.read()
finally:
test_file.close()
@mock.patch('pakit.main.argparse._sys')
def test_main_args_none(mock_sys):
with pytest.raises(AttributeError):
main(['pakit_tests'])
mock_sys.exit.assert_called_with(2)
@mock.patch('pakit_tests.write_file')
def test_main_output_absolutel(mock_write, mock_print):
main(['pakit_tests', '.', '/dev/null'])
mock_print.assert_any_call('Scanning recipes under: ' + os.getcwd())
mock_print.assert_any_call('Writing tests to: /dev/null')
@mock.patch('pakit_tests.write_file')
def test_main_output_relative(mock_write, mock_print):
main(['pakit_tests', '/tmp'])
mock_print.assert_any_call('Scanning recipes under: /tmp')
mock_print.assert_any_call('Writing tests to: /tmp/tests/test_recipes.py')
mock_write.assert_any_call('/tmp', '/tmp/tests/test_recipes.py')
| [
"[email protected]"
] | |
bc9fe81e043cc94e56cafd9cd99b0951d3bb10c5 | 7cdb18e0a7ef01a34ec602bb31aa915c482fcd24 | /hujian_api/API_service/TestCase/Attendance_analyse_standard_02.py | 478b59425ccc56de8a4de30b2e7e86270540fa5b | [] | no_license | wangdan377/Python_API | 6adac56974f9c6af238895a3101db0e3f0667ba1 | 38b31d4d02740d359a7e47fb3a3975045f00288e | refs/heads/master | 2023-02-18T14:39:03.009815 | 2021-01-20T12:59:52 | 2021-01-20T12:59:52 | 311,855,608 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,763 | py | import pytest
import allure
import requests
import json
import time
from Params.params import Login
from Params.params import Login_info
from Params.params import Password_reset
from Params.params import Log_info
from Params.params import Log_latest
from Params.params import Log_list
from Params.params import Attendance_groups_sync
from Params.params import Attendance_schedules_sync
from Params.params import Attendance_records_sync
from Params.params import Flow_sync
from Params.params import Department_sync
from Params.params import Department_list
from Params.params import Department_employees_list
from Params.params import Department_employee_query
from Params.params import Attendance_class_list
from Params.params import Attendance_analyse
from Params.params import Attendance_analyse_result
from Params.params import Attendance_analyse_result_statistics
from Common import Post
from Common import Get
from Common import Assert
from Common import Consts
class Attendance_analyse_standard_02:
@allure.severity('normal')
@allure.feature('Attendance_analyse')
@allure.story('Attendance_analyse_standard')
def test_standard_02(self):
session_a = requests.session()
get_req = Get.Get()
ass = Assert.Assertions()
url_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&userIds=293210194326475830'
#分析 用户293210194326475830 2019年10月 考勤
res_2019_10 = get_req.get_model_a(session_a,url_2019_10)
time.sleep(10)
resCode_2019_10 = res_2019_10['code']
resText_2019_10 = res_2019_10['text']
#print(resText_2019_10)
assert ass.assert_code(resCode_2019_10, 200)
assert ass.assert_in_text(resText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse?startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&userIds=293210194326475830'
# 分析 用户293210194326475830 2019年11月 考勤
res_2019_11 = get_req.get_model_a(session_a, url_2019_11)
time.sleep(10)
resCode_2019_11 = res_2019_11['code']
resText_2019_11 = res_2019_11['text']
#print(resText_2019_11)
assert ass.assert_code(resCode_2019_11, 200)
assert ass.assert_in_text(resText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_10 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=293210194326475830&startDate=2019-10-01 00:00:00&endDate=2019-10-31 00:00:00&pageSize=31'
#获取 用户293210194326475830 2019年10月 考勤分析结果
res_result_2019_10 = get_req.get_model_a(session_a,url_result_2019_10)
res_resultCode_2019_10 = res_result_2019_10['code']
res_resultText_2019_10 = res_result_2019_10['text']
assert ass.assert_code(res_resultCode_2019_10, 200)
assert ass.assert_in_text(res_resultText_2019_10, 'ok')
Consts.RESULT_LIST.append('True')
url_result_2019_11 = 'http://172.16.2.101:4000/api/attendance/analyse/list?userId=293210194326475830&startDate=2019-11-01 00:00:00&endDate=2019-11-30 00:00:00&pageSize=31'
# 获取 用户293210194326475830 2019年11月 考勤分析结果
res_result_2019_11 = get_req.get_model_a(session_a, url_result_2019_11)
res_resultCode_2019_11 = res_result_2019_11['code']
res_resultText_2019_11 = res_result_2019_11['text']
assert ass.assert_code(res_resultCode_2019_11, 200)
assert ass.assert_in_text(res_resultText_2019_11, 'ok')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_10 = json.loads(res_resultText_2019_10)
resInfo_10_01 = res_resultDict_2019_10['result']['list'][0]
resInfo_10_02 = res_resultDict_2019_10['result']['list'][1]
resInfo_10_03 = res_resultDict_2019_10['result']['list'][2]
resInfo_10_04 = res_resultDict_2019_10['result']['list'][3]
resInfo_10_05 = res_resultDict_2019_10['result']['list'][4]
resInfo_10_06 = res_resultDict_2019_10['result']['list'][5]
resInfo_10_07 = res_resultDict_2019_10['result']['list'][6]
resInfo_10_08 = res_resultDict_2019_10['result']['list'][7]
resInfo_10_09 = res_resultDict_2019_10['result']['list'][8]
resInfo_10_10 = res_resultDict_2019_10['result']['list'][9]
resInfo_10_11 = res_resultDict_2019_10['result']['list'][10]
resInfo_10_12 = res_resultDict_2019_10['result']['list'][11]
resInfo_10_13 = res_resultDict_2019_10['result']['list'][12]
resInfo_10_14 = res_resultDict_2019_10['result']['list'][13]
resInfo_10_15 = res_resultDict_2019_10['result']['list'][14]
resInfo_10_16 = res_resultDict_2019_10['result']['list'][15]
resInfo_10_17 = res_resultDict_2019_10['result']['list'][16]
resInfo_10_18 = res_resultDict_2019_10['result']['list'][17]
resInfo_10_19 = res_resultDict_2019_10['result']['list'][18]
resInfo_10_20 = res_resultDict_2019_10['result']['list'][19]
resInfo_10_21 = res_resultDict_2019_10['result']['list'][20]
resInfo_10_22 = res_resultDict_2019_10['result']['list'][21]
resInfo_10_23 = res_resultDict_2019_10['result']['list'][22]
resInfo_10_24 = res_resultDict_2019_10['result']['list'][23]
resInfo_10_25 = res_resultDict_2019_10['result']['list'][24]
resInfo_10_26 = res_resultDict_2019_10['result']['list'][25]
resInfo_10_27 = res_resultDict_2019_10['result']['list'][26]
resInfo_10_28 = res_resultDict_2019_10['result']['list'][27]
resInfo_10_29 = res_resultDict_2019_10['result']['list'][28]
resInfo_10_30 = res_resultDict_2019_10['result']['list'][29]
resInfo_10_31 = res_resultDict_2019_10['result']['list'][30]
assert ass.assert_in_text(resInfo_10_01, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_04, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_05, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_06, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_07, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_08, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_09, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_10, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_11, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_12, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_13, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_14, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_15, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_17, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_18, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_19, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_20, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_21, 'ABNORMAL')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_22, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_23, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_24, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_26, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_27, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_28, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_29, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_30, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_10_31, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
res_resultDict_2019_11 = json.loads(res_resultText_2019_11)
resInfo_11_01 = res_resultDict_2019_11['result']['list'][0]
resInfo_11_02 = res_resultDict_2019_11['result']['list'][1]
resInfo_11_03 = res_resultDict_2019_11['result']['list'][2]
resInfo_11_04 = res_resultDict_2019_11['result']['list'][3]
resInfo_11_05 = res_resultDict_2019_11['result']['list'][4]
resInfo_11_06 = res_resultDict_2019_11['result']['list'][5]
resInfo_11_07 = res_resultDict_2019_11['result']['list'][6]
resInfo_11_08 = res_resultDict_2019_11['result']['list'][7]
resInfo_11_09 = res_resultDict_2019_11['result']['list'][8]
resInfo_11_10 = res_resultDict_2019_11['result']['list'][9]
resInfo_11_11 = res_resultDict_2019_11['result']['list'][10]
resInfo_11_12 = res_resultDict_2019_11['result']['list'][11]
resInfo_11_13 = res_resultDict_2019_11['result']['list'][12]
resInfo_11_14 = res_resultDict_2019_11['result']['list'][13]
resInfo_11_15 = res_resultDict_2019_11['result']['list'][14]
resInfo_11_16 = res_resultDict_2019_11['result']['list'][15]
resInfo_11_17 = res_resultDict_2019_11['result']['list'][16]
resInfo_11_18 = res_resultDict_2019_11['result']['list'][17]
resInfo_11_19 = res_resultDict_2019_11['result']['list'][18]
resInfo_11_20 = res_resultDict_2019_11['result']['list'][19]
resInfo_11_21 = res_resultDict_2019_11['result']['list'][20]
resInfo_11_22 = res_resultDict_2019_11['result']['list'][21]
resInfo_11_23 = res_resultDict_2019_11['result']['list'][22]
resInfo_11_24 = res_resultDict_2019_11['result']['list'][23]
resInfo_11_25 = res_resultDict_2019_11['result']['list'][24]
resInfo_11_26 = res_resultDict_2019_11['result']['list'][25]
resInfo_11_27 = res_resultDict_2019_11['result']['list'][26]
resInfo_11_28 = res_resultDict_2019_11['result']['list'][27]
resInfo_11_29 = res_resultDict_2019_11['result']['list'][28]
resInfo_11_30 = res_resultDict_2019_11['result']['list'][29]
assert ass.assert_in_text(resInfo_11_01, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_02, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_03, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_04, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_05, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_06, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_07, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_08, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_09, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_10, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_11, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_12, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_13, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_14, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_15, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_16, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_17, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_18, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_19, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_20, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_21, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_22, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_23, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_24, 'SUCCESS')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_25, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_26, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_27, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_28, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_29, 'ABNORMAL480')
Consts.RESULT_LIST.append('True')
assert ass.assert_in_text(resInfo_11_30, 'SUCCESS')
Consts.RESULT_LIST.append('True')
if __name__ == '__main__':
a = Attendance_analyse_standard_02()
a.test_standard_02()
| [
"[email protected]"
] | |
da4aa3bdc9eddca782b1e0a4f1eca9a1d8028af1 | 2321ebc9c76e2eb95a05976e3681ed7f4e24d361 | /pandas-for-finance/10/05.py | 68641569fe58f3b6a01bc5b01c572044cc7080ca | [] | no_license | sharebook-kr/books | 71428bfec46759a8da81d70bfe28fa67e4244aee | 7537053c559ca055bf54ab940bf4078217c288a1 | refs/heads/master | 2020-04-22T19:08:42.294339 | 2019-08-17T12:06:42 | 2019-08-17T12:06:42 | 170,598,895 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | import requests
from bs4 import BeautifulSoup
import time
import telepot
from telepot.loop import MessageLoop
def get_dividend_earning_rate(code):
try:
url = "http://finance.naver.com/item/main.nhn?code=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, "html5lib")
tag = soup.select("#_dvr")
return tag[0].text
except:
return 0
token = "398259524:AAHMXMTVrXDfNd-E9tAsA1eRp-u4LopefLI"
bot = telepot.Bot(token)
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
code = msg['text']
dvr = get_dividend_earning_rate(code)
text = "배당 수익률은 {} 입니다.".format(dvr)
bot.sendMessage(chat_id, text)
MessageLoop(bot, handle).run_as_thread()
while True:
time.sleep(10) | [
"[email protected]"
] | |
c62de43f47a28b30ee881c1391e0c50a8a2b2ebf | b5f9f93a415a5cc0117a580c5da12804e68c141d | /scripts/motions/initr0.py | 65093d4646203aa136da56e262759377b990ad57 | [] | no_license | akihikoy/lfd_trick | 71f89d80abc27ffc6fbd5bc609322918a4f8264e | b7bf0189db7bcef07772db17de29302d6e8ba2bf | refs/heads/master | 2021-01-10T14:22:53.341666 | 2016-03-29T18:16:15 | 2016-03-29T18:16:15 | 50,623,958 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/python
from core_tool import *
def Help():
return '''Move left arm/gripper to init posture.
Usage: init0'''
def Run(t,*args):
if t.robot.Is('PR2'):
angles= [-1.5758421026969418, 1.2968352230407523, -1.6520923310211921, -2.095963566248973, 10.512690320637843, -1.469029183486648, 2.37512293699]
elif t.robot.Is('Baxter'):
angles= [0.6772525170776368, -0.8617137066101075, -0.1092961310119629, 2.4812139215698243, -0.7577865083496095, -1.4657186411499024, -0.12732040524902344]
angles[0]-= 0.6
t.robot.OpenGripper(arm=RIGHT, blocking=False)
t.robot.MoveToQ(angles,dt=4.0, arm=RIGHT,blocking=False)
| [
"[email protected]"
] | |
1ab7dc817ebdb29dad6da210ed339031a9d170c7 | 700d2e5b4501fa638bef04141bb92aa1b5a422f0 | /LowVoltage/actions/update_item.py | 045ab47970e839494c5c41a728899b5928c1f23b | [
"MIT"
] | permissive | jacquev6/LowVoltage | 134d8e85add7ff8107090b80adeda99552a43fa4 | aa9c3653e54f2ccda3db0ed647ba9ad5e5657ea3 | refs/heads/master | 2016-09-05T18:29:29.734533 | 2015-09-06T16:17:03 | 2015-09-06T16:17:03 | 24,800,231 | 3 | 3 | null | 2015-04-29T15:06:34 | 2014-10-04T20:02:56 | Python | UTF-8 | Python | false | false | 22,664 | py | # coding: utf8
# Copyright 2014-2015 Vincent Jacques <[email protected]>
"""
When given a :class:`UpdateItem`, the connection will return a :class:`UpdateItemResponse`:
>>> connection(UpdateItem(table, {"h": 0}).remove("a"))
<LowVoltage.actions.update_item.UpdateItemResponse ...>
"""
import LowVoltage as _lv
import LowVoltage.testing as _tst
from .action import Action
from .conversion import _convert_dict_to_db, _convert_db_to_dict
from .next_gen_mixins import proxy
from .next_gen_mixins import (
ConditionExpression,
ExpressionAttributeNames,
ExpressionAttributeValues,
Key,
ReturnConsumedCapacity,
ReturnItemCollectionMetrics,
ReturnValues,
TableName
)
from .return_types import ConsumedCapacity, ItemCollectionMetrics, _is_dict
class UpdateItemResponse(object):
"""
UpdateItemResponse()
The `UpdateItem response <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#API_UpdateItem_ResponseElements>`__.
"""
def __init__(
self,
Attributes=None,
ConsumedCapacity=None,
ItemCollectionMetrics=None,
**dummy
):
self.__attributes = Attributes
self.__consumed_capacity = ConsumedCapacity
self.__item_collection_metrics = ItemCollectionMetrics
@property
def attributes(self):
"""
The (previous or new) attributes of the item you just updated. If you used :meth:`~UpdateItem.return_values_all_old`, :meth:`~UpdateItem.return_values_all_new`, :meth:`~UpdateItem.return_values_updated_old` or :meth:`~UpdateItem.return_values_updated_new`.
:type: ``None`` or dict
"""
if _is_dict(self.__attributes):
return _convert_db_to_dict(self.__attributes)
@property
def consumed_capacity(self):
"""
The capacity consumed by the request. If you used :meth:`~UpdateItem.return_consumed_capacity_total` or :meth:`~UpdateItem.return_consumed_capacity_indexes`.
:type: ``None`` or :class:`.ConsumedCapacity`
"""
if _is_dict(self.__consumed_capacity):
return ConsumedCapacity(**self.__consumed_capacity)
@property
def item_collection_metrics(self):
"""
Metrics about the collection of the item you just updated. If a LSI was touched and you used :meth:`~UpdateItem.return_item_collection_metrics_size`.
:type: ``None`` or :class:`.ItemCollectionMetrics`
"""
if _is_dict(self.__item_collection_metrics):
return ItemCollectionMetrics(**self.__item_collection_metrics)
class UpdateItem(Action):
"""
The `UpdateItem request <http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateItem.html#API_UpdateItem_RequestParameters>`__.
"""
def __init__(self, table_name=None, key=None):
"""
Passing ``table_name`` to the constructor is like calling :meth:`table_name` on the new instance.
Passing ``key`` to the constructor is like calling :meth:`key` on the new instance.
"""
super(UpdateItem, self).__init__("UpdateItem", UpdateItemResponse)
self.__set = {}
self.__remove = []
self.__add = {}
self.__delete = {}
self.__condition_expression = ConditionExpression(self)
self.__expression_attribute_names = ExpressionAttributeNames(self)
self.__expression_attribute_values = ExpressionAttributeValues(self)
self.__key = Key(self, key)
self.__return_consumed_capacity = ReturnConsumedCapacity(self)
self.__return_item_collection_metrics = ReturnItemCollectionMetrics(self)
self.__return_values = ReturnValues(self)
self.__table_name = TableName(self, table_name)
@property
def payload(self):
data = {}
update = []
if self.__set:
update.append("SET {}".format(", ".join("{}={}".format(n, v) for n, v in self.__set.iteritems())))
if self.__remove:
update.append("REMOVE {}".format(", ".join(self.__remove)))
if self.__add:
update.append("ADD {}".format(", ".join("{} :{}".format(n, v) for n, v in self.__add.iteritems())))
if self.__delete:
update.append("DELETE {}".format(", ".join("{} :{}".format(n, v) for n, v in self.__delete.iteritems())))
if update:
data["UpdateExpression"] = " ".join(update)
data.update(self.__condition_expression.payload)
data.update(self.__expression_attribute_names.payload)
data.update(self.__expression_attribute_values.payload)
data.update(self.__key.payload)
data.update(self.__return_consumed_capacity.payload)
data.update(self.__return_item_collection_metrics.payload)
data.update(self.__return_values.payload)
data.update(self.__table_name.payload)
return data
@proxy
def table_name(self, table_name):
"""
>>> connection(
... UpdateItem(key={"h": 0})
... .table_name(table)
... .remove("a")
... )
<LowVoltage.actions.update_item.UpdateItemResponse ...>
"""
return self.__table_name.set(table_name)
@proxy
def key(self, key):
"""
>>> connection(
... UpdateItem(table_name=table)
... .key({"h": 0})
... .remove("a")
... )
<LowVoltage.actions.update_item.UpdateItemResponse ...>
"""
return self.__key.set(key)
def set(self, attribute_name, value_name):
"""
Add a value to SET as an attribute to UpdateExpression.
As described in the `developer guide <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html#Expressions.Modifying.UpdateExpressions.SET>`__.
>>> connection(PutItem(table, {"h": 0}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":forty_two")
... .expression_attribute_value("forty_two", 42)
... .return_values_all_new()
... ).attributes
{u'a': 42, u'h': 0}
"""
self.__set[attribute_name] = value_name
return self
def remove(self, path):
"""
Add an attribute to REMOVE to UpdateExpression.
As described in the `developer guide <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html#Expressions.Modifying.UpdateExpressions.REMOVE>`__.
>>> connection(PutItem(table, {"h": 0, "a": 42}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .remove("a")
... .return_values_all_new()
... ).attributes
{u'h': 0}
"""
self.__remove.append(path)
return self
def add(self, attribute_name, value_name):
"""
Add a (set of) value(s) to ADD to a number (or a set) attribute to UpdateExpression.
As described in the `developer guide <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html#Expressions.Modifying.UpdateExpressions.ADD>`__.
>>> connection(PutItem(table, {"h": 0, "a": 42}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .add("a", "two")
... .expression_attribute_value("two", 2)
... .return_values_all_new()
... ).attributes
{u'a': 44, u'h': 0}
>>> connection(PutItem(table, {"h": 0, "a": {2, 3}}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .add("a", "vals")
... .expression_attribute_value("vals", {1, 2})
... .return_values_all_new()
... ).attributes
{u'a': set([1, 2, 3]), u'h': 0}
"""
self.__add[attribute_name] = value_name
return self
def delete(self, attribute_name, value_name):
"""
Add a set of values to DELETE from a set attribute to UpdateExpression.
As described in the `developer guide <http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html#Expressions.Modifying.UpdateExpressions.DELETE>`__.
>>> connection(PutItem(table, {"h": 0, "a": {1, 2, 3}}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .delete("a", "vals")
... .expression_attribute_value("vals", {1, 2, 4})
... .return_values_all_new()
... ).attributes
{u'a': set([3]), u'h': 0}
"""
self.__delete[attribute_name] = value_name
return self
@proxy
def condition_expression(self, expression):
"""
>>> connection(
... UpdateItem(table, {"h": 1})
... .remove("gh")
... .condition_expression("#syn=:val")
... .expression_attribute_name("syn", "gr")
... .expression_attribute_value("val", 8)
... )
<LowVoltage.actions.update_item.UpdateItemResponse ...>
"""
return self.__condition_expression.set(expression)
@proxy
def expression_attribute_name(self, synonym, name):
"""
See :meth:`condition_expression` for an example.
"""
return self.__expression_attribute_names.add(synonym, name)
@proxy
def expression_attribute_value(self, name, value):
"""
See :meth:`condition_expression` for an example.
"""
return self.__expression_attribute_values.add(name, value)
@proxy
def return_consumed_capacity_indexes(self):
"""
>>> c = connection(
... UpdateItem(table, {"h": 5}).set("gh", "h").set("gr", "h")
... .return_consumed_capacity_indexes()
... ).consumed_capacity
>>> c.capacity_units
3.0
>>> c.table.capacity_units
1.0
>>> c.global_secondary_indexes["gsi"].capacity_units
2.0
"""
return self.__return_consumed_capacity.indexes()
@proxy
def return_consumed_capacity_total(self):
"""
>>> connection(
... UpdateItem(table, {"h": 4}).set("gh", "h").set("gr", "h")
... .return_consumed_capacity_total()
... ).consumed_capacity.capacity_units
3.0
"""
return self.__return_consumed_capacity.total()
@proxy
def return_consumed_capacity_none(self):
"""
>>> print connection(
... UpdateItem(table, {"h": 6}).set("gh", "h").set("gr", "h")
... .return_consumed_capacity_none()
... ).consumed_capacity
None
"""
return self.__return_consumed_capacity.none()
@proxy
def return_item_collection_metrics_size(self):
"""
>>> m = connection(
... UpdateItem(table2, {"h": 0, "r1": 0}).set("a", "h")
... .return_item_collection_metrics_size()
... ).item_collection_metrics
>>> m.item_collection_key
{u'h': 0}
>>> m.size_estimate_range_gb
[0.0, 1.0]
"""
return self.__return_item_collection_metrics.size()
@proxy
def return_item_collection_metrics_none(self):
"""
>>> print connection(
... UpdateItem(table2, {"h": 1, "r1": 0}).set("a", "h")
... .return_item_collection_metrics_none()
... ).item_collection_metrics
None
"""
return self.__return_item_collection_metrics.none()
@proxy
def return_values_all_old(self):
"""
>>> connection(PutItem(table, {"h": 0, "a": 1, "b": 2}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":v")
... .expression_attribute_value("v", 2)
... .return_values_all_old()
... ).attributes
{u'a': 1, u'h': 0, u'b': 2}
"""
return self.__return_values.all_old()
@proxy
def return_values_all_new(self):
"""
>>> connection(PutItem(table, {"h": 0, "a": 1, "b": 2}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":v")
... .expression_attribute_value("v", 2)
... .return_values_all_new()
... ).attributes
{u'a': 2, u'h': 0, u'b': 2}
"""
return self.__return_values.all_new()
@proxy
def return_values_updated_old(self):
"""
>>> connection(PutItem(table, {"h": 0, "a": 1, "b": 2}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":v")
... .expression_attribute_value("v", 2)
... .return_values_updated_old()
... ).attributes
{u'a': 1}
"""
return self.__return_values.updated_old()
@proxy
def return_values_updated_new(self):
"""
>>> connection(PutItem(table, {"h": 0, "a": 1, "b": 2}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":v")
... .expression_attribute_value("v", 2)
... .return_values_updated_new()
... ).attributes
{u'a': 2}
"""
return self.__return_values.updated_new()
@proxy
def return_values_none(self):
"""
>>> connection(PutItem(table, {"h": 0, "a": 1, "b": 2}))
<LowVoltage.actions.put_item.PutItemResponse ...>
>>> print connection(
... UpdateItem(table, {"h": 0})
... .set("a", ":v")
... .expression_attribute_value("v", 2)
... .return_values_none()
... ).attributes
None
"""
return self.__return_values.none()
class UpdateItemUnitTests(_tst.UnitTests):
def test_name(self):
self.assertEqual(UpdateItem("Table", {"hash": 42}).name, "UpdateItem")
def test_table_name_and_key(self):
self.assertEqual(
UpdateItem().table_name("Table").key({"hash": 42}).payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
}
)
def test_constructor(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
}
)
def test_set(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).set("a", ":v").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "SET a=:v",
}
)
def test_several_sets(self):
self.assertIn(
UpdateItem("Table", {"hash": 42}).set("a", ":v").set("b", ":w").payload,
[
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "SET a=:v, b=:w",
},
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "SET b=:w, a=:v",
}
]
)
def test_remove(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).remove("a").remove("b").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "REMOVE a, b",
}
)
def test_add(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).add("a", "v").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "ADD a :v",
}
)
def test_several_adds(self):
self.assertIn(
UpdateItem("Table", {"hash": 42}).add("a", "v").add("b", "w").payload,
[
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "ADD a :v, b :w",
},
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "ADD b :w, a :v",
}
]
)
def test_delete(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).delete("a", "v").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "DELETE a :v",
}
)
def test_several_deletes(self):
self.assertIn(
UpdateItem("Table", {"hash": 42}).delete("a", "v").delete("b", "w").payload,
[
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "DELETE a :v, b :w",
},
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"UpdateExpression": "DELETE b :w, a :v",
}
]
)
def test_expression_attribute_value(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).expression_attribute_value("v", u"value").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"ExpressionAttributeValues": {":v": {"S": "value"}},
}
)
def test_expression_attribute_name(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).expression_attribute_name("n", "path").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"ExpressionAttributeNames": {"#n": "path"},
}
)
def test_condition_expression(self):
self.assertEqual(
UpdateItem("Table", {"hash": 42}).condition_expression("a=b").payload,
{
"TableName": "Table",
"Key": {"hash": {"N": "42"}},
"ConditionExpression": "a=b",
}
)
def test_return_values_all_new(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_values_all_new().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnValues": "ALL_NEW",
}
)
def test_return_values_all_old(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_values_all_old().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnValues": "ALL_OLD",
}
)
def test_return_values_updated_new(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_values_updated_new().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnValues": "UPDATED_NEW",
}
)
def test_return_values_updated_old(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_values_updated_old().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnValues": "UPDATED_OLD",
}
)
def test_return_values_none(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_values_none().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnValues": "NONE",
}
)
def test_return_consumed_capacity_total(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_consumed_capacity_total().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnConsumedCapacity": "TOTAL",
}
)
def test_return_consumed_capacity_indexes(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_consumed_capacity_indexes().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnConsumedCapacity": "INDEXES",
}
)
def test_return_consumed_capacity_none(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_consumed_capacity_none().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnConsumedCapacity": "NONE",
}
)
def test_return_item_collection_metrics_size(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_item_collection_metrics_size().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnItemCollectionMetrics": "SIZE",
}
)
def test_return_item_collection_metrics_none(self):
self.assertEqual(
UpdateItem("Table", {"hash": u"h"}).return_item_collection_metrics_none().payload,
{
"TableName": "Table",
"Key": {"hash": {"S": "h"}},
"ReturnItemCollectionMetrics": "NONE",
}
)
class UpdateItemResponseUnitTests(_tst.UnitTests):
def test_all_none(self):
r = UpdateItemResponse()
self.assertIsNone(r.attributes)
self.assertIsNone(r.consumed_capacity)
self.assertIsNone(r.item_collection_metrics)
def test_all_set(self):
unprocessed_keys = object()
r = UpdateItemResponse(Attributes={"h": {"S": "a"}}, ConsumedCapacity={}, ItemCollectionMetrics={})
self.assertEqual(r.attributes, {"h": "a"})
self.assertIsInstance(r.consumed_capacity, ConsumedCapacity)
self.assertIsInstance(r.item_collection_metrics, ItemCollectionMetrics)
| [
"[email protected]"
] | |
0cd6a4e11eea792cd0918edb44bb11e6d8b29ecd | 3c6b36eb1f4f9760c52903f6d0ec4a501f948c90 | /osp/corpus/models/__init__.py | d1c292de71e9477c50603b545d6d90ec443aee8b | [
"Apache-2.0"
] | permissive | davidmcclure/open-syllabus-project | 38444249af845013e3f281a7a713dca83159c56e | 078cfd4c5a257fbfb0901d43bfbc6350824eed4e | refs/heads/master | 2021-06-30T21:47:07.636558 | 2021-06-27T15:15:35 | 2021-06-27T15:15:35 | 50,152,020 | 220 | 14 | Apache-2.0 | 2021-06-27T15:11:15 | 2016-01-22T02:29:57 | Python | UTF-8 | Python | false | false | 162 | py |
from .document import Document
from .document_format import Document_Format
from .document_text import Document_Text
from .document_index import Document_Index
| [
"[email protected]"
] | |
a578ce80b077a6b303027caee95e8d5938e4b2a1 | 1ebe2b9d9d1f67e34cbe21c49f8710b2a1b9eeae | /tests/test_AppObj_getSinglePassword.py | b9993a19aa52502869d1ec20e6142b69d38a25a2 | [
"MIT"
] | permissive | rmetcalf9/PasswordManPro_CLI | 93ee0daff3bfd1c445bbb364df1a59711ec6344b | 207a624a51ac2848c48aeac3282152315b5146df | refs/heads/master | 2021-06-02T00:29:40.353520 | 2020-03-30T10:27:52 | 2020-03-30T10:27:52 | 135,285,541 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | from TestHelperSuperClass import testHelperSuperClass
import passwordmanpro_cli
from unittest.mock import patch
import samplePayloadsAndEnvs
class test_AppObj(testHelperSuperClass):
@patch('passwordmanpro_cli.AppObjClass._callGet')
def test_getSinglePassword(self, getResoursesResponse):
getResoursesResponse.side_effect = [
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.resourseResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.accountsResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.passwordResponseRAW}
]
fetchedPassword = passwordmanpro_cli.getSinglePassword(
resourseName="soadevteamserver-konga",
accountName="kongaadmin",
skipSSLChecks=False,
env=samplePayloadsAndEnvs.env
)
self.assertEqual(fetchedPassword, 'dummyPasswordForTest', msg='Incorrect password output')
| [
"[email protected]"
] | |
57c735539919e5edbbcb4ff8c16418d9f6376188 | 68bad4b3d92872bb5b77b4ee503e588d20511a27 | /python/scripts_inhibition/old_script/simulate_inhibition_ZZZ151_slow.py | ab2d7fa5209a6129519eb6a8b0d03dbf06e4c97c | [] | no_license | mickelindahl/bgmodel | 647be626a7311a8f08f3dfc897c6dd4466fc0a92 | 78e6f2b73bbcbecd0dba25caf99f835313c914ee | refs/heads/master | 2023-08-29T13:57:04.122115 | 2022-02-11T14:28:23 | 2022-02-11T14:28:23 | 17,148,386 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,492 | py | '''
Created on Aug 12, 2013
@author: lindahlm
'''
from core.network.manager import Builder_striatum as Builder
from core.parallel_excecution import loop
from core.network import default_params
from scripts_inhibition.base_simulate import (get_path_logs,
get_args_list_inhibition,
get_kwargs_list_indv_nets,
par_process_and_thread,
pert_set_data_path_to_milner_on_supermicro,
pert_add_inhibition)
import scripts_inhibition.base_inhibition_striatum as module
import oscillation_perturbations151_slow as op
import pprint
pp=pprint.pprint
FILE_NAME=__file__.split('/')[-1][0:-3]
FROM_DISK_0=0
LOAD_MILNER_ON_SUPERMICRO=False
NUM_NETS=1
NUM_RUNS=len(op.get()) #A run for each perturbation
num_sim=NUM_NETS*NUM_RUNS
kwargs={
'Builder':Builder,
'cores_milner':40*1,
'cores_superm':4,
'file_name':FILE_NAME,
'from_disk':0,
'debug':False,
'do_runs':range(NUM_RUNS), #A run for each perturbation
'do_obj':False,
'i0':FROM_DISK_0,
'job_name':'inh_YYY',
'l_hours': ['00','00','00'],
'l_minutes':['15','10','5'],
'l_seconds':['00','00','00'],
'lower':1,
'local_threads_milner':20,
'local_threads_superm':1,
'module':module,
'nets':['Net_{}'.format(i) for i in range(NUM_NETS)],
'resolution':5,
'repetitions':1,
'path_code':default_params.HOME_CODE,
'path_results':get_path_logs(LOAD_MILNER_ON_SUPERMICRO,
FILE_NAME),
'perturbation_list':op.get(),
'size':3000,
'upper':3}
d_process_and_thread=par_process_and_thread(**kwargs)
pp(d_process_and_thread)
kwargs.update(d_process_and_thread)
p_list = pert_add_inhibition(**kwargs)
p_list = pert_set_data_path_to_milner_on_supermicro(p_list,
LOAD_MILNER_ON_SUPERMICRO)
for i, p in enumerate(p_list): print i, p
a_list=get_args_list_inhibition(p_list, **kwargs)
k_list=get_kwargs_list_indv_nets(len(p_list), kwargs)
for obj in a_list:
print obj.kwargs['setup'].nets_to_run
# for i, a in enumerate(args_list):
# print i, a
loop(min(num_sim, 10),[num_sim, num_sim, NUM_RUNS], a_list, k_list )
# loop(args_list, path, 1)
| [
"[email protected]"
] | |
1175d28772eb9d5b231c3206392fb90d67127bab | b8a803694c283a5acd13ab6760a36710884ab24f | /llvm/mc/__init__.py | 69dd12f877e6415b53f60c7690e36b2f9d76a64c | [
"NCSA",
"BSD-3-Clause"
] | permissive | llvmpy/llvmpy | 8a4c31e731364ead802231b97e058b8f8c444f96 | 13130fe35f1fb03a7051ad46c36146002391a6fa | refs/heads/master | 2016-09-05T16:48:54.694686 | 2015-04-28T16:21:34 | 2015-04-28T16:21:34 | 3,375,197 | 155 | 13 | null | 2015-05-27T18:36:45 | 2012-02-07T07:09:59 | HTML | UTF-8 | Python | false | false | 6,676 | py | import sys
import llvm
if llvm.version < (3, 4):
raise Exception("mc is not supported for llvm version less than 3.4")
from io import BytesIO
import contextlib
from llvmpy import api, extra
from llvmpy.api.llvm import MCDisassembler
class Operand(object):
def __init__(self, mcoperand, target_machine):
'''
@mcoperand: an MCOperand object
@target_machine: an llvm.target.TargetMachine object
'''
self.op = mcoperand
if not self.op:
raise llvm.LLVMException("null MCOperand argument")
self.tm = target_machine
def __str__(self):
s = "invalid"
if self.is_reg():
s = "reg(%s)" % (self.reg_name())
elif self.is_imm():
s = "imm(0x%02x)" % (self.op.getImm())
elif self.is_fp_imm():
s = "imm(%r)" % (self.op.getFPImm())
elif self.is_expr():
s = "expr(%r)" % (self.op.getExpr().getKind())
elif self.is_inst():
s = repr(Instr(self.op.getInst()))
return s
def __repr__(self):
return str(self)
def reg_name(self):
if self.is_reg():
s = self.tm.reg_info.getName(self.op.getReg())
if s.strip() == "":
return "?"
else:
return s
else:
return ""
def is_reg(self):
return self.op.isReg()
def is_imm(self):
return self.op.isImm()
def is_fp_imm(self):
return self.op.isFPImm()
def is_expr(self):
return self.op.isExpr()
def is_inst(self):
return self.op.isInst()
def get_imm(self):
if self.is_imm():
return self.op.getImm()
else:
return None
def get_fp_imm(self):
if self.is_fp_imm():
return self.op.getFPImm()
else:
return None
def get_inst(self):
if self.is_inst():
return Instr(self.op.getInst())
else:
return None
class Instr(object):
def __init__(self, mcinst, target_machine):
'''
@mcinst: an MCInst object
@target_machine: an llvm.target.TargetMachine object
'''
self.mcinst = mcinst
if not self.mcinst:
raise llvm.LLVMException("null MCInst argument")
self.tm = target_machine
def __str__(self):
os = extra.make_raw_ostream_for_printing()
self.tm.inst_printer.printInst(self.mcinst, os, "")
return str(os.str())
def __repr__(self):
return str(self)
def __len__(self):
''' the number of operands '''
return int(self.mcinst.size())
def operands(self):
amt = self.mcinst.getNumOperands()
if amt < 1:
return []
l = []
for i in range(0, amt):
l.append(Operand(self.mcinst.getOperand(i), self.tm))
return l
@property
def instr_desc(self):
return self.tm.instr_info.get(self.opcode)
@property
def flags(self):
return self.instr_desc.getFlags()
@property
def ts_flags(self):
return self.instr_desc.TSFlags
@property
def opcode(self):
return self.mcinst.getOpcode()
def is_branch(self):
return self.instr_desc.isBranch()
def is_cond_branch(self):
return self.instr_desc.isConditionalBranch()
def is_uncond_branch(self):
return self.instr_desc.isUnconditionalBranch()
def is_indirect_branch(self):
return self.instr_desc.isIndirectBranch()
def is_call(self):
return self.instr_desc.isCall()
def is_return(self):
return self.instr_desc.isReturn()
def is_terminator(self):
return self.instr_desc.isTerminator()
def is_barrier(self):
return self.instr_desc.isBarrier()
class BadInstr(Instr):
pass
class Disassembler(object):
def __init__(self, target_machine):
self.tm = target_machine
@property
def mdasm(self):
return self.tm.disassembler
@property
def mai(self):
return self.tm.asm_info
def instr(self, mcinst):
return Instr(mcinst, self.tm)
def bad_instr(self, mcinst):
return BadInstr(mcinst, self.tm)
def decode(self, bs, base_addr, align=None):
'''
decodes the bytes in @bs into instructions and yields
each instruction as it is decoded. @base_addr is the base address
where the instruction bytes are from (not an offset into
@bs). yields instructions in the form of (addr, data, inst) where
addr is an integer, data is a tuple of integers and inst is an instance of
llvm.mc.Instr. @align specifies the byte alignment of instructions and
is only used if an un-decodable instruction is encountered, in which
case the disassembler will skip the following bytes until the next
aligned address. if @align is unspecified, the default alignment
for the architecture will be used, however this may not be ideal
for disassembly. for example, the default alignment for ARM is 1, but you
probably want it to be 4 for the purposes of disassembling ARM
instructions.
'''
if isinstance(bs, str) and sys.version_info.major >= 3:
bs = bytes(map(lambda c: ord(c), bs))
elif not isinstance(bs, bytes):
raise TypeError("expected bs to be either 'str' or 'bytes' but got %s" % type(bs))
code = api.llvm.StringRefMemoryObject.new(bs, base_addr)
idx = 0
if not isinstance(align, int) or align < 1:
align = self.mai.getMinInstAlignment()
while(idx < code.getExtent()):
inst = api.llvm.MCInst.new()
addr = code.getBase() + idx
status, size = self.mdasm.getInstruction(inst, code, addr)
if size < 1:
size = (align - (idx % align))
amt_left = code.getExtent() - idx
if amt_left >= size:
data = code.readBytes(addr, size)
elif amt_left < 1:
break
else:
data = code.readBytes(addr, amt_left)
if sys.version_info.major < 3:
data = tuple(map(lambda b: ord(b), data))
else:
data = tuple(data)
if status == MCDisassembler.DecodeStatus.Fail:
yield (addr, data, None)
elif status == MCDisassembler.DecodeStatus.SoftFail:
yield (addr, data, self.bad_instr(inst))
else:
yield (addr, data, self.instr(inst))
idx += size
| [
"[email protected]"
] | |
cd768bdf9259efd8ae6f1c74de49916277ef7c0b | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startCirq1222.py | b2897a1f78eb02d31ad0854ee13aa149499f7d5a | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,794 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=49
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[1])) # number=26
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=28
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=34
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=35
c.append(cirq.rx(0.8011061266653969).on(input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[1])) # number=36
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=46
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=47
c.append(cirq.H.on(input_qubit[0])) # number=48
c.append(cirq.X.on(input_qubit[0])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=42
c.append(cirq.X.on(input_qubit[1])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=44
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.Y.on(input_qubit[1])) # number=45
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=22
c.append(cirq.X.on(input_qubit[0])) # number=23
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=31
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[4])) # number=29
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1222.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
] | |
fb8e9457ad5e04fd8f1f282ecd96716532bbf285 | dbfdbe3c1d5e3ad38625d8c971fe8dd45c8c3885 | /device_agent/snmp/libs/pysmi-0.3.1/pysmi/reader/zipreader.py | d9f6c4aeb941e5044dd6806f94dd71c09fbca20c | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | fyfdoc/IntegrateTest | a58f6d0ea7cff5f67d79d7e042c0bb39c6b8bbbb | 0d8374406c10c313d6627699879215841e0ebdb6 | refs/heads/master | 2022-12-03T02:32:37.388556 | 2019-01-25T02:36:42 | 2019-01-25T02:36:42 | 167,468,256 | 0 | 1 | null | 2022-11-29T20:58:41 | 2019-01-25T01:59:28 | Python | UTF-8 | Python | false | false | 5,627 | py | #
# This file is part of pysmi software.
#
# Copyright (c) 2015-2018, Ilya Etingof <[email protected]>
# License: http://snmplabs.com/pysmi/license.html
#
import os
import sys
import time
import datetime
import zipfile
from pysmi.reader.base import AbstractReader
from pysmi.mibinfo import MibInfo
from pysmi.compat import decode
from pysmi import debug
from pysmi import error
class FileLike(object):
"""Stripped down, binary file mock to work with ZipFile"""
def __init__(self, buf, name):
self.name = name
self.buf = buf
self.null = buf[:0]
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def close(self):
if not self.closed:
self.closed = True
self.buf = self.null
self.pos = 0
def seek(self, pos, mode = 0):
if self.buflist:
self.buf += self.null.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
return self.pos
def read(self, n=-1):
if self.buflist:
self.buf += self.null.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos + n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
class ZipReader(AbstractReader):
"""Fetch ASN.1 MIB text by name from a ZIP archive.
*ZipReader* class instance tries to locate ASN.1 MIB files
by name, fetch and return their contents to caller.
"""
useIndexFile = False
def __init__(self, path, ignoreErrors=True):
"""Create an instance of *ZipReader* serving a ZIP archive.
Args:
path (str): path to ZIP archive containing MIB files
Keyword Args:
ignoreErrors (bool): ignore ZIP archive access errors
"""
self._name = path
self._members = {}
self._pendingError = None
try:
self._members = self._readZipDirectory(fileObj=open(path, 'rb'))
except Exception:
debug.logger & debug.flagReader and debug.logger(
'ZIP file %s open failure: %s' % (self._name, sys.exc_info()[1]))
if not ignoreErrors:
self._pendingError = error.PySmiError('file %s access error: %s' % (self._name, sys.exc_info()[1]))
def _readZipDirectory(self, fileObj):
archive = zipfile.ZipFile(fileObj)
if isinstance(fileObj, FileLike):
fileObj = None
members = {}
for member in archive.infolist():
filename = os.path.basename(member.filename)
if not filename:
continue
if (member.filename.endswith('.zip') or
member.filename.endswith('.ZIP')):
innerZipBlob = archive.read(member.filename)
innerMembers = self._readZipDirectory(FileLike(innerZipBlob, member.filename))
for innerFilename, ref in innerMembers.items():
while innerFilename in members:
innerFilename += '+'
members[innerFilename] = [[fileObj, member.filename, None]]
members[innerFilename].extend(ref)
else:
mtime = time.mktime(datetime.datetime(*member.date_time[:6]).timetuple())
members[filename] = [[fileObj, member.filename, mtime]]
return members
def _readZipFile(self, refs):
for fileObj, filename, mtime in refs:
if not fileObj:
fileObj = FileLike(dataObj, name=self._name)
archive = zipfile.ZipFile(fileObj)
try:
dataObj = archive.read(filename)
except Exception:
debug.logger & debug.flagReader and debug.logger('ZIP read component %s read error: %s' % (fileObj.name, sys.exc_info()[1]))
return '', 0
return dataObj, mtime
def __str__(self):
return '%s{"%s"}' % (self.__class__.__name__, self._name)
def getData(self, mibname, zipBlob=None):
debug.logger & debug.flagReader and debug.logger('looking for MIB %s at %s' % (mibname, self._name))
if self._pendingError:
raise self._pendingError
if not self._members:
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
for mibalias, mibfile in self.getMibVariants(mibname):
debug.logger & debug.flagReader and debug.logger('trying MIB %s' % mibfile)
try:
refs = self._members[mibfile]
except KeyError:
continue
mibData, mtime = self._readZipFile(refs)
if not mibData:
continue
debug.logger & debug.flagReader and debug.logger(
'source MIB %s, mtime %s, read from %s/%s' % (mibfile, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime)), self._name, mibfile)
)
if len(mibData) == self.maxMibSize:
raise IOError('MIB %s/%s too large' % (self._name, mibfile))
return MibInfo(path='zip://%s/%s' % (self._name, mibfile),
file=mibfile, name=mibalias, mtime=mtime), decode(mibData)
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
| [
"[email protected]"
] | |
46841f47f1f695cf591b225b1aa16e65ae0935ef | 5dd190725aaaeb7287d935b3c99c20480b208816 | /object_detection/utils/np_box_list_test.py | 0cf2ef4d21dd8fea0b5d78c45776b8866d1f7cdc | [
"MIT"
] | permissive | DemonDamon/mask-detection-based-on-tf2odapi | 32d947164fb54395b9e45368c0d4bcf3a6ea1c28 | 192ae544169c1230c21141c033800aa1bd94e9b6 | refs/heads/main | 2023-05-13T05:05:44.534885 | 2021-06-08T05:56:09 | 2021-06-08T05:56:09 | 369,463,131 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,436 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
class BoxListTest(tf.test.TestCase):
def test_invalid_box_data(self):
with self.assertRaises(ValueError):
np_box_list.BoxList([0, 0, 1, 1])
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(boxlist.has_field('boxes'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertFalse(boxlist.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
with self.assertRaises(ValueError):
boxlist.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
def test_add_already_existed_field(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.boxlist.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
boxlist = self.boxlist
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
boxlist = self.boxlist
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels')))
def test_get_extra_fields(self):
boxlist = self.boxlist
self.assertItemsEqual(boxlist.get_extra_fields(), [])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.boxlist.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
expected_num_boxes = 2
self.assertEquals(boxlist.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
| [
"[email protected]"
] | |
5e82d5c5a82104ee6f3ba514fcce0106579c026f | 715a11d7b8f15694a5cc4b47ac0e3a3cfc4ffedc | /peakelem.py | 5d99b8c5e4760ff7fad5f9cbebcb6e3ce1a46279 | [] | no_license | mohanrajanr/CodePrep | 5cd538d16598f6a0d2486357d3cc6e0fa1626e4e | 2e23a5f996139b887bf723f58b23368cf8121cd4 | refs/heads/main | 2023-04-23T04:10:06.111120 | 2021-05-11T06:47:51 | 2021-05-11T06:47:51 | 366,283,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | from typing import List
def findPeakElement(nums: List[int]) -> int:
l = 0
r = len(nums) -1
while l < r:
mid = l + (r - l)//2
if nums[mid] < nums[mid + 1]:
l = mid + 1
else:
r = mid
return l
print(findPeakElement([1,2,3,1]))
print(findPeakElement([1,2,1,3,5,6,4]))
| [
"[email protected]"
] | |
a7137729e4f44867909a24c14dd9be012679b1c0 | d7016f69993570a1c55974582cda899ff70907ec | /sdk/security/azure-mgmt-security/azure/mgmt/security/v2019_01_01_preview/aio/operations/_regulatory_compliance_standards_operations.py | a1a59794d5339f57d8b5bd9b86f55456dcf52ab8 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 9,216 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._regulatory_compliance_standards_operations import build_get_request, build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RegulatoryComplianceStandardsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.security.v2019_01_01_preview.aio.SecurityCenter`'s
:attr:`regulatory_compliance_standards` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.RegulatoryComplianceStandard"]:
"""Supported regulatory compliance standards details and state.
:param filter: OData filter. Optional. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RegulatoryComplianceStandard or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.security.v2019_01_01_preview.models.RegulatoryComplianceStandard]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2019-01-01-preview")
) # type: Literal["2019-01-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.RegulatoryComplianceStandardList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("RegulatoryComplianceStandardList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards"} # type: ignore
@distributed_trace_async
async def get(
self, regulatory_compliance_standard_name: str, **kwargs: Any
) -> _models.RegulatoryComplianceStandard:
"""Supported regulatory compliance details state for selected standard.
:param regulatory_compliance_standard_name: Name of the regulatory compliance standard object.
Required.
:type regulatory_compliance_standard_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegulatoryComplianceStandard or the result of cls(response)
:rtype: ~azure.mgmt.security.v2019_01_01_preview.models.RegulatoryComplianceStandard
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", "2019-01-01-preview")
) # type: Literal["2019-01-01-preview"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.RegulatoryComplianceStandard]
request = build_get_request(
regulatory_compliance_standard_name=regulatory_compliance_standard_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("RegulatoryComplianceStandard", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Security/regulatoryComplianceStandards/{regulatoryComplianceStandardName}"} # type: ignore
| [
"[email protected]"
] | |
39cbc94ee7bdfab87c35956c0e4db581e7be8f01 | f0932f59d37adfbba9307ee31e6f78ce3c256c4a | /scripts/pick_primers.py | 4970130ecdc69ece8f850de75796334dbcf07178 | [] | no_license | kalekundert/ligrna | 3785a1e5fb8ed6d07839a5314029f3fc882d4471 | 843963973c34c4976f5adfbd4d03f5f1d0344423 | refs/heads/master | 2020-04-12T12:52:32.828100 | 2020-02-22T00:59:57 | 2020-02-22T00:59:57 | 162,505,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | #!/usr/bin/env python3
"""\
Automatically design primers that can be used to construct the given sgRNA
design by overlap extension PCR. There are a number of parameters controlling
how ideal the primers have to be, and you can play with them to get more or
fewer results.
I ended up not using this script in favor of ordering my designs as gBlocks
gene fragments from IDT. The PCR assembly takes as long as it takes IDT to
deliver gBlocks, and the gBlocks are much higher purity. The gBlocks are also
not that much more expensive at $90 per design. Most of the primers are ~$30,
and then you have to add reagents and my time.
Usage:
pick_primers.py <name> [options]
Options:
--max-num-primers NUM
--min-primer-len LEN [default: 40]
--max-primer-len LEN [default: 50]
--min-overlap-len LEN [default: 18]
--max-overlap-len LEN [default: 22]
--min-overlap-tm CELSIUS [default: 52.0]
--max-overlap-tm CELSIUS [default: 58.0]
--max-tm-diff DELTA-CELSIUS [default: 2.0]
--max-gc-content PERCENT [default: 0.6]
--min-gc-content PERCENT [default: 0.3]
-c, --color WHEN [default: auto]
-q, --header-only
"""
import sys, docopt
import pcr_helper, sgrna_sensor
args = docopt.docopt(__doc__)
print('$ ' + ' '.join(sys.argv))
print()
design = sgrna_sensor.from_name(args['<name>'])
assembler = pcr_helper.PcrAssembly()
assembler.max_num_primers = int(args['--max-num-primers'] or 0)
assembler.min_primer_len = int(args['--min-primer-len'])
assembler.max_primer_len = int(args['--max-primer-len'])
assembler.min_overlap_len = int(args['--min-overlap-len'])
assembler.max_overlap_len = int(args['--max-overlap-len'])
assembler.min_overlap_tm = float(args['--min-overlap-tm'])
assembler.max_overlap_tm = float(args['--max-overlap-tm'])
assembler.max_tm_diff = float(args['--max-tm-diff'])
assembler.max_gc_content = float(args['--max-gc-content'])
assembler.min_gc_content = float(args['--min-gc-content'])
assembler.use_color = args['--color']
assembler.find_primers(design)
assembler.print_primers(args['--header-only'])
| [
"[email protected]"
] | |
a0feaf8c56a52a21c80539eab8e8ed88e51eac94 | 781e2692049e87a4256320c76e82a19be257a05d | /intervention/results/control_111904_1447993241_112_7.5.py | 845005cad1ed0885c10843c1b9b9886bcb1ed4e3 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 178 | py | def num_common_letters(goal_word, guess):
common = []
for char in list(guess):
if char in list(goal_word) and char not in common:
common += [char]
return len(common) | [
"[email protected]"
] | |
e6baa85e72a32507593f58f59909889a6f6d0876 | a4009f6d6f5379ddd9e948c3083c92fe8f1be259 | /tutorial/schema_design.py | fa4df3ae574cba3890816db3e74c5da08d33f0f7 | [
"MIT"
] | permissive | MacHu-GWU/learn_whoosh-project | 44a3b66a81b5a4686f48fa72b2e02538cfd2616e | 3ffff3b2084d2bb0bd17f38be322f75fa14986b5 | refs/heads/master | 2018-12-21T09:41:36.829399 | 2018-09-30T03:03:25 | 2018-09-30T03:03:25 | 26,701,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,667 | py | ##encoding=utf8
"""
Field说明 = http://pythonhosted.org//Whoosh/api/fields.html
stored – Whether the value of this field is stored with the document.
unique – Whether the value of this field is unique per-document.
"""
from __future__ import print_function, unicode_literals
from whoosh.index import create_in
from whoosh.fields import *
from whoosh.qparser import *
from whoosh.qparser.dateparse import DateParserPlugin
from datetime import datetime, date, timedelta
import os, shutil
def restart():
path = "indexdir"
for fname in os.listdir(path):
try:
os.remove(os.path.join(path, fname))
except:
shutil.rmtree(os.path.join(path, fname))
restart()
def example00():
"""
这是官方文档中的例子,展示了whoosh中的几个基本抽象概念
"""
## 定义document schema。本例中文档有3个属性,title是TEXT类型,path是ID类型,content是TEXT类型
## 类型的概念以及参数stored的意义在后面的例子中均有介绍
schema = Schema(title=TEXT(stored=True), path=ID(stored=True), content=TEXT)
## 定义index(简写ix)。按照我们定义的schema,全文搜索引擎就会为不同的字段自动添加index
## index有writer,和searcher两大子类
ix = create_in("indexdir", schema)
## 创建writer类,将数据通过writer写入index。writer和数据库一样,也有commit这个机制
writer = ix.writer()
writer.add_document(title="First document", path="/a",
content="This is the first document we've added!")
writer.add_document(title="Second document", path="/b",
content="The second one is even more interesting!")
writer.commit()
## 创建搜索器
with ix.searcher() as searcher:
## 创建QueryParser类,这个类是用来将用户输入的搜索串解析成服务器能理解query
queryparser = QueryParser("content", ix.schema)
## 通过.parse方法创建query
query = queryparser.parse("first")
## 将query传入sercher进行搜索
results = searcher.search(query)
print(results[0])
example00()
def example01():
"""whoosh.fields.ID
ID类似于数据库中primary key的概念,根据primary key进行SELECT的时候必须ID完全匹配上,对大小写敏感。
所以ID适合用来储存文档中具有唯一标志符的属性,例如:
document_path
document_url
create_time_stamp
"""
schema = Schema(filepath=ID(stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(filepath = "C:\python27\scripts")
writer.commit()
with ix.searcher() as searcher:
query = QueryParser("filepath", ix.schema).parse("C:\python27\scripts")
print(searcher.search(query)[0])
query = QueryParser("filepath", ix.schema).parse("C:\Python27\scripts")
print(searcher.search(query)[0])
# example01()
def example02():
"""whoosh.fields.IDLIST
IDLIST类似数据库中多个primary key的概念,同样对大小写敏感。其他部分请参考ID篇
"""
schema = Schema(city_and_state=IDLIST(stored=True), )
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(city_and_state = "arlington VA")
writer.commit()
with ix.searcher() as searcher:
query = QueryParser("city_and_state", ix.schema).parse("arlington", "VA")
print(searcher.search(query)[0])
query = QueryParser("city_and_state", ix.schema).parse("VA, arlington")
print(searcher.search(query)[0])
query = QueryParser("city_and_state", ix.schema).parse("arlington")
print(searcher.search(query)[0])
query = QueryParser("city_and_state", ix.schema).parse("VA")
print(searcher.search(query)[0])
# example02()
def example03():
"""whoosh.fields.STORED
STORED表示无法被搜索到,但是每次其他单元被搜索到,都会自动跟着被显示
"""
schema = Schema(SSN=ID(stored=True), memo=STORED, )
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(SSN = "123456", memo = "he is my best friend in high school")
writer.commit()
with ix.searcher() as searcher:
query = QueryParser("SSN", ix.schema).parse("123456")
print(searcher.search(query)[0])
query = QueryParser("memo", ix.schema).parse("he is my best friend in high school") # unsearchable
print(searcher.search(query)[0])
# example03()
def example04():
"""whoosh.fields.KEYWORD
KEYWORD适合用于标签类的对象。每一个document拥有若干个标签,这若干标签可以相同。和IDLIST不同的是,
可能很多个文档共同享有同一个标签集合。
"""
schema = Schema(tags=KEYWORD(stored=True, commas=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(tags = "action,romance,story,war")
writer.commit()
with ix.searcher() as searcher:
query = QueryParser("tags", ix.schema).parse("story", "war")
print(searcher.search(query)[0])
query = QueryParser("tags", ix.schema).parse("war action")
print(searcher.search(query)[0])
# example04()
def example05():
"""whoosh.fields.TEXT
TEXT适用于文本对象,搜索方式类似于短语搜索,搜索的最小单位是词,对大小写不敏感。例如:
"am", "boy" ===> "I am a boy"
TEXT的主要对象是词,对数字和词的混合搜索支持不良好。
如果要支持任意字母片段的匹配,请参考ngram算法部分
"""
schema = Schema(sentence=TEXT(stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(sentence = "I live in 1400 S Joyce St")
writer.commit()
with ix.searcher() as searcher:
# query = QueryParser("sentence", ix.schema).parse("live", "joyce")
# print(searcher.search(query)[0])
# query = QueryParser("sentence", ix.schema).parse("joyce", "1400")
# print(searcher.search(query)[0])
# query = QueryParser("sentence", ix.schema).parse("live", "1400", "joyce") # 数字单词混合,无法匹配
# print(searcher.search(query)[0])
# query = QueryParser("sentence", ix.schema).parse("joy") # 单词片段,无法匹配
# print(searcher.search(query)[0])
qp = QueryParser("sentence", ix.schema)
qp.add_plugin(WildcardPlugin()) # 字符模糊搜索扩展
q = qp.parse("S joy*")
print(searcher.search(q)[0])
# example05()
def example06():
"""whoosh.fields.NUMERIC
数值型对象,有int, float两类
"""
schema = Schema(temperature=NUMERIC(float, stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(temperature = 32.3)
writer.commit()
with ix.searcher() as searcher:
qp = QueryParser("temperature", ix.schema)
qp.add_plugin(GtLtPlugin()) # 大于小于 支持扩展
q = qp.parse("temperature:>=20.0")
print(searcher.search(q)[0])
qp.add_plugin(OperatorsPlugin(And="&")) # 与或非 支持扩展
q = qp.parse("temperature:>=20.0 & temperature:<=40.0")
print(searcher.search(q)[0])
qp.add_plugin(RangePlugin()) # 范围区间 支持扩展
q = qp.parse("temperature:{20 to]")
print(searcher.search(q)[0])
# example06()
def example07():
"""whoosh.fields.DATETIME
"""
schema = Schema(create_date=DATETIME(stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(create_date = datetime(2014,1,10,6,30,0))
writer.commit()
with ix.searcher() as searcher:
qp = QueryParser("create_date", ix.schema)
qp.add_plugin(DateParserPlugin()) # DateParserPlugin自带了大于,小于区间等语法
q = qp.parse("2014-01-10-06-30-00")
print(searcher.search(q)[0])
q = qp.parse("create_date:[20140110063000 to ]") # whoosh支持最良好的就是年月日小时分钟秒连续写在一起的格式
print(searcher.search(q)[0])
qp.add_plugin(OperatorsPlugin())
q = qp.parse("create_date:[201403 to] OR [to 201402] ")
print(searcher.search(q)[0])
# example07()
def example08():
"""whoosh.fields.NGRAM
NGRAM将整个文本拆分成词,然后把每个词拆分成小块。
"""
schema = Schema(tweets=NGRAM(minsize=2, maxsize=6, stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(tweets="Heard that? Kate just get a boyfriend!")
writer.commit()
with ix.searcher() as searcher:
qp = QueryParser("tweets", schema)
q = qp.parse("oyfrien") # 成功,因为在
print(searcher.search(q)[0])
q = qp.parse("boyfriend") # 成功,匹配整个词
print(searcher.search(q)[0])
q = qp.parse("e ju") # 不成功,因为空格跨越了词
print(searcher.search(q)[0])
# example08()
def example09():
"""whoosh.fields.NGRAMWORDS
NGRAMWORDS将整个文本拆分成字母块,无视词语,包括空格标点符号在内都拆分成小块
"""
schema = Schema(tweets=NGRAMWORDS(minsize=2, maxsize=8, stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(tweets="Heard that? Kate just get a boyfriend!")
writer.commit()
with ix.searcher() as searcher:
qp = QueryParser("tweets", schema)
q = qp.parse("oyfrien") # 成功,因为在
print(searcher.search(q)[0])
q = qp.parse("boyfriend") # 成功,匹配整个词
print(searcher.search(q)[0])
q = qp.parse("e ju") # 成功,因为是拆分的整个文本
print(searcher.search(q)[0])
# example09()
def example10():
"""根据多个字段进行全文搜索
"""
schema = Schema(name=TEXT(stored=True), height=NUMERIC(float, stored=True))
ix = create_in("indexdir", schema)
writer = ix.writer()
writer.add_document(name = "Jack", height=180.5)
writer.commit()
with ix.searcher() as searcher:
qp = QueryParser(None, schema)
qp.add_plugin(MultifieldPlugin(["name", "height"]) ) # 多字段搜索parser
qp.add_plugin(GtLtPlugin())
q = qp.parse("Jack height:>=180.5")
print(searcher.search(q)[0])
# example10()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.