content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
from collections import defaultdict
game = defaultdict(list)
tuple_list_county = [('US', 'Visconsin'), ('Germany', 'Bavaria'), ('UK', 'Bradfordshire'), ('India', 'punjab'), ('China', 'Shandong'), ('Canada', 'Nova Scotia')]
print game["any_value"]
for k,v in tuple_list_county:
game[k].append(v)
print game | 26.25 | 162 | 0.669841 | [
"MIT"
] | LuisPereda/Learning_Python | Chapter10/default_dict_list_of_tuples.py | 315 | Python |
def pixel(num):
def f(s):
return s + '\033[{}m \033[0m'.format(num)
return f
def new_line(s):
return s + u"\n"
def build(*steps, string=""):
for step in steps:
string = step(string)
return string
def main():
cyan = pixel(46)
space = pixel('08')
heart = [new_line,
space, space, cyan, cyan, space, space, space, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, space, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, cyan, cyan, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, cyan, cyan, cyan, cyan, cyan, new_line,
space, space, space, space, cyan, cyan, cyan, new_line,
space, space, space, space, space, cyan, new_line]
print(build(*heart))
if __name__ == '__main__':
main() | 36.6875 | 87 | 0.579216 | [
"MIT"
] | xxninjabunnyxx/pixel-pop-heart-challenge | heart.py | 1,174 | Python |
class AdminCatalogHelper:
def __init__(self, app):
self.app = app
def go_though_each_product_and_print_browser_log(self):
for i in range(len(self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]'))):
self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]')[i].click()
[print(log) for log in self.app.wd.get_log("browser")]
self.app.wait_for_element_to_be_visible('#tab-general')
self.app.wd.find_element_by_css_selector('button[name="cancel"]').click()
| 50.833333 | 127 | 0.678689 | [
"MIT"
] | spcartman/selenium_full_course | v_python/fixture/admin_catalog.py | 610 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from msccl.collectives import *
from msccl.algorithm import *
from msccl.instance import *
from msccl.topologies import *
def _alltoall_subproblem(local_nodes, num_copies):
remote_node = local_nodes
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
def cases(chunk, local,remote_out,remote_in):
if chunk < local_end:
return local(chunk)
elif chunk < remote_out_end:
return remote_out(chunk - local_end)
else:
return remote_in(chunk - remote_out_end)
def pre(rank, chunk):
return cases(chunk,
lambda c: rank == c % local_nodes,
lambda c: rank == (c // (num_copies - 1)) % local_nodes,
lambda c: rank == remote_node)
def post(rank, chunk):
return cases(chunk,
lambda c: rank == c // local_nodes,
lambda c: rank == remote_node,
lambda c: rank == (c // (num_copies - 1)) // local_nodes)
def trigger(rank, chunk):
if rank == remote_node:
return cases(chunk,
lambda c: None,
lambda c: chunk + num_remote_pairs,
lambda c: chunk - num_remote_pairs)
else:
return None
return build_collective(f'AlltoallSubproblem(n={local_nodes},copies={num_copies})',
local_nodes + 1, num_chunks,
pre, post, trigger=trigger)
def make_alltoall_subproblem_collective_and_topology(topology, num_copies, relay_nodes, bw = 1, share_bw = False):
local_nodes = topology.num_nodes()
remote_node = local_nodes
links = [[0 for _ in range(local_nodes + 1)] for _ in range(local_nodes + 1)]
for src in range(local_nodes):
for dst in range(local_nodes):
links[dst][src] = topology.link(src, dst)
for relay in relay_nodes:
links[remote_node][relay] = bw
links[relay][remote_node] = bw
switches = topology.switches.copy()
if share_bw:
switches.append((relay_nodes, [num_nodes + 1], bw, 'remote_out'))
switches.append(([num_nodes + 1], relay_nodes, bw, 'remote_in'))
collective = _alltoall_subproblem(local_nodes, num_copies)
topology = Topology(f'Subtopo(local={topology.name},relays=({",".join(str(i) for i in relay_nodes)}))', links, topology.switches)
return collective, topology
def synthesize_alltoall_subproblem(subproblem_algo, num_copies, logging=False):
if subproblem_algo.is_pipelined():
raise ValueError('Pipelining is not supported.')
local_topology = subproblem_algo.topology
chunks = subproblem_algo.instance.chunks
local_nodes = local_topology.num_nodes() - 1
remote_node = local_nodes
nodes = local_nodes * num_copies
collective = alltoall(nodes).chunk_up(chunks)
# Create a distributed topology where copies of relay nodes that connect to the remote node in the subproblem
# topology are connected to all the relay nodes in the other copies.
links = [[0 for _ in range(nodes)] for _ in range(nodes)]
for dst in range(nodes):
for src in range(nodes):
local_src = src % local_nodes
local_dst = dst % local_nodes
if src // local_nodes != dst // local_nodes:
bw = min(local_topology.link(local_src, remote_node), local_topology.link(remote_node, local_dst))
links[dst][src] = bw
else:
links[dst][src] = local_topology.link(local_src, local_dst)
# Also make copies of switches with a similar expansion of the remote node into the nodes of other copies.
switches = []
for srcs, dsts, bw, name in local_topology.switches:
for i in range(num_copies):
def to_dist(ranks):
for rank in ranks:
if rank < remote_node:
# Non-remote nodes are just translated to the distributed numbering of ranks.
yield rank + i * local_nodes
else:
# Include all remote nodes in the switch. This is fine because the links already limit
# connectivity to just the relay nodes.
for r in range(nodes):
if r // local_nodes != i:
yield r
dist_srcs = list(to_dist(srcs))
dist_dsts = list(to_dist(dsts))
switches.append((dist_srcs, dist_dsts, bw, f'copy_{i}_{name}_local'))
topology = Topology(f'Stiched(sub={local_topology.name},copies={num_copies})', links, switches)
def nth_chunk_for_pair(src, dst, idx):
# The following chunk calculation respects both the _scattered and _transpose
# pre/postconditions in Alltoall. When substituting it in:
# -the precondition (chunk % self.num_nodes) simplifies to src
# -the postcondition ((chunk // self.num_nodes) % self.num_nodes) simplifies to dst
return (src + dst * collective.num_nodes) * chunks + idx
steps = []
# Calculate the ranges of the differently handled chunks
local_end = local_nodes * local_nodes
num_remote_pairs = (num_copies - 1) * local_nodes * local_nodes
remote_out_end = local_end + num_remote_pairs
num_chunks = remote_out_end + num_remote_pairs
for local_step in subproblem_algo.steps:
sends = []
# These are used to track operations involving remote nodes that get matched with another operation in the same
# step.
unmatched_sends = {}
unmatched_recvs = {}
# Stitch together copies of the subproblem algorithm
for chunk, src, dst in local_step.sends:
for i in range(num_copies):
def to_dist(rank):
# Translates ranks from the local to the distributed topology
return rank + i * local_nodes
def other_start(c):
# Given a relative remote chunk return local rank 0 in the copy it corresponds to
other_i = c % (num_copies - 1)
if other_i >= i:
other_i += 1
return other_i * local_nodes
# Calculate origin and target ranks that match the Alltoall pre/postconditions
if chunk < local_end:
assert src != remote_node and dst != remote_node
origin = to_dist((chunk // chunks) % local_nodes)
target = to_dist((chunk // chunks) // local_nodes)
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk)
elif chunk < remote_out_end:
c = chunk - local_end
local_origin = ((c // chunks) // (num_copies - 1)) % local_nodes
origin = to_dist(local_origin)
target = other_start(c) + ((c // (num_copies - 1))) // local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(local_origin, chunk)
assert subproblem_algo.collective.postcondition(target % local_nodes, chunk + num_remote_pairs)
else:
assert chunk < num_chunks
c = chunk - remote_out_end
local_target = ((c // chunks) // (num_copies - 1)) // local_nodes
target = to_dist(local_target)
origin = other_start(c) + ((c // (num_copies - 1))) % local_nodes
# Check that the origin and target calculation match the local collective
assert subproblem_algo.collective.precondition(origin % local_nodes, chunk - num_remote_pairs)
assert subproblem_algo.collective.postcondition(local_target, chunk)
# Get the chunk number in the distributed algorithm
chunk_idx = chunk % chunks
# Translate send src and dst to distributed space and add the send to the distributed algorithm
dist_chunk = nth_chunk_for_pair(origin, target, chunk_idx)
if dst == remote_node:
assert chunk < remote_out_end
# Sends to remote nodes have to find a matched receive
if dist_chunk in unmatched_recvs:
dist_dst = unmatched_recvs.pop(dist_chunk)
sends.append((dist_chunk, to_dist(src), dist_dst))
else:
unmatched_sends[dist_chunk] = to_dist(src)
elif src == remote_node:
assert chunk < num_chunks
# Receives from remote nodes have to find a matched send
if dist_chunk in unmatched_sends:
dist_src = unmatched_sends.pop(dist_chunk)
sends.append((dist_chunk, dist_src, to_dist(dst)))
else:
unmatched_recvs[dist_chunk] = to_dist(dst)
else:
# Sends locally are just translated to the new distributed space of ranks
sends.append((dist_chunk, to_dist(src), to_dist(dst)))
if len(unmatched_sends) > 0 or len(unmatched_recvs) > 0:
raise ValueError('Subproblem algorithm has unpaired sends/recvs.')
steps.append(Step(local_step.rounds, sends))
instance = Instance(
steps=len(steps),
extra_rounds=sum(step.rounds - 1 for step in steps),
chunks=chunks,
)
return Algorithm.make_implementation(collective, topology, instance, steps)
| 45.267857 | 133 | 0.60355 | [
"MIT"
] | angelica-moreira/sccl | msccl/distributors/alltoall_subproblem.py | 10,140 | Python |
import logging
from collections import OrderedDict
from pathlib import Path
from typing import List, Optional, Set, Tuple, Union
import numpy as np
import torch
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import minmax_scale
from tqdm import tqdm
import flair
from flair.data import Dictionary, Sentence, Span, SpanLabel
from flair.datasets import DataLoader, FlairDatapointDataset
from flair.embeddings import (
TokenEmbeddings,
TransformerDocumentEmbeddings,
TransformerWordEmbeddings,
)
from flair.file_utils import cached_path
from flair.models.sequence_tagger_model import SequenceTagger
from flair.models.text_classification_model import TextClassifier
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
class FewshotClassifier(flair.nn.Classifier[Sentence]):
def __init__(self):
self._current_task = None
self._task_specific_attributes = {}
self.label_nearest_map = None
self.tars_model: flair.nn.Classifier[Sentence]
super(FewshotClassifier, self).__init__()
def forward_loss(
self, data_points: Union[List[Sentence], Sentence]
) -> Union[torch.Tensor, Tuple[torch.Tensor, int]]:
if not isinstance(data_points, list):
data_points = [data_points]
# Transform input data into TARS format
sentences = self._get_tars_formatted_sentences(data_points)
loss = self.tars_model.forward_loss(sentences)
return loss
@property
def tars_embeddings(self):
raise NotImplementedError
def _get_tars_formatted_sentence(self, label, sentence):
raise NotImplementedError
def _get_tars_formatted_sentences(self, sentences: List[Sentence]):
label_text_pairs = []
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
for sentence in sentences:
label_text_pairs_for_sentence = []
if self.training and self.num_negative_labels_to_sample is not None:
positive_labels = list(
OrderedDict.fromkeys([label.value for label in sentence.get_labels(self.label_type)])
)
sampled_negative_labels = self._get_nearest_labels_for(positive_labels)
for label in positive_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
for label in sampled_negative_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
else:
for label in all_labels:
label_text_pairs_for_sentence.append(self._get_tars_formatted_sentence(label, sentence))
label_text_pairs.extend(label_text_pairs_for_sentence)
return label_text_pairs
def _get_nearest_labels_for(self, labels):
# if there are no labels, return a random sample as negatives
if len(labels) == 0:
tags = self.get_current_label_dictionary().get_items()
import random
sample = random.sample(tags, k=self.num_negative_labels_to_sample)
return sample
already_sampled_negative_labels = set()
# otherwise, go through all labels
for label in labels:
plausible_labels = []
plausible_label_probabilities = []
for plausible_label in self.label_nearest_map[label]:
if plausible_label in already_sampled_negative_labels or plausible_label in labels:
continue
else:
plausible_labels.append(plausible_label)
plausible_label_probabilities.append(self.label_nearest_map[label][plausible_label])
# make sure the probabilities always sum up to 1
plausible_label_probabilities = np.array(plausible_label_probabilities, dtype="float64")
plausible_label_probabilities += 1e-08
plausible_label_probabilities /= np.sum(plausible_label_probabilities)
if len(plausible_labels) > 0:
num_samples = min(self.num_negative_labels_to_sample, len(plausible_labels))
sampled_negative_labels = np.random.choice(
plausible_labels,
num_samples,
replace=False,
p=plausible_label_probabilities,
)
already_sampled_negative_labels.update(sampled_negative_labels)
return already_sampled_negative_labels
def train(self, mode=True):
"""Populate label similarity map based on cosine similarity before running epoch
If the `num_negative_labels_to_sample` is set to an integer value then before starting
each epoch the model would create a similarity measure between the label names based
on cosine distances between their BERT encoded embeddings.
"""
if mode and self.num_negative_labels_to_sample is not None:
self._compute_label_similarity_for_current_epoch()
super().train(mode)
super().train(mode)
def _compute_label_similarity_for_current_epoch(self):
"""
Compute the similarity between all labels for better sampling of negatives
"""
# get and embed all labels by making a Sentence object that contains only the label text
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
label_sentences = [Sentence(label) for label in all_labels]
self.tars_embeddings.eval() # TODO: check if this is necessary
self.tars_embeddings.embed(label_sentences)
self.tars_embeddings.train()
# get each label embedding and scale between 0 and 1
if isinstance(self.tars_embeddings, TokenEmbeddings):
encodings_np = [sentence[0].get_embedding().cpu().detach().numpy() for sentence in label_sentences]
else:
encodings_np = [sentence.get_embedding().cpu().detach().numpy() for sentence in label_sentences]
normalized_encoding = minmax_scale(encodings_np)
# compute similarity matrix
similarity_matrix = cosine_similarity(normalized_encoding)
# the higher the similarity, the greater the chance that a label is
# sampled as negative example
negative_label_probabilities = {}
for row_index, label in enumerate(all_labels):
negative_label_probabilities[label] = {}
for column_index, other_label in enumerate(all_labels):
if label != other_label:
negative_label_probabilities[label][other_label] = similarity_matrix[row_index][column_index]
self.label_nearest_map = negative_label_probabilities
def get_current_label_dictionary(self):
label_dictionary = self._task_specific_attributes[self._current_task]["label_dictionary"]
return label_dictionary
def get_current_label_type(self):
return self._task_specific_attributes[self._current_task]["label_type"]
def is_current_task_multi_label(self):
return self._task_specific_attributes[self._current_task]["multi_label"]
def add_and_switch_to_new_task(
self,
task_name,
label_dictionary: Union[List, Set, Dictionary, str],
label_type: str,
multi_label: bool = True,
force_switch: bool = False,
):
"""
Adds a new task to an existing TARS model. Sets necessary attributes and finally 'switches'
to the new task. Parameters are similar to the constructor except for model choice, batch
size and negative sampling. This method does not store the resultant model onto disk.
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of the labels you want to predict
:param label_type: string to identify the label type ('ner', 'sentiment', etc.)
:param multi_label: whether this task is a multi-label prediction problem
:param force_switch: if True, will overwrite existing task with same name
"""
if task_name in self._task_specific_attributes and not force_switch:
log.warning("Task `%s` already exists in TARS model. Switching to it.", task_name)
else:
# make label dictionary if no Dictionary object is passed
if isinstance(label_dictionary, Dictionary):
label_dictionary = label_dictionary.get_items()
if type(label_dictionary) == str:
label_dictionary = [label_dictionary]
# prepare dictionary of tags (without B- I- prefixes and without UNK)
tag_dictionary = Dictionary(add_unk=False)
for tag in label_dictionary:
if tag == "<unk>" or tag == "O":
continue
if tag[1] == "-":
tag = tag[2:]
tag_dictionary.add_item(tag)
else:
tag_dictionary.add_item(tag)
self._task_specific_attributes[task_name] = {
"label_dictionary": tag_dictionary,
"label_type": label_type,
"multi_label": multi_label,
}
self.switch_to_task(task_name)
def list_existing_tasks(self) -> Set[str]:
"""
Lists existing tasks in the loaded TARS model on the console.
"""
return set(self._task_specific_attributes.keys())
def switch_to_task(self, task_name):
"""
Switches to a task which was previously added.
"""
if task_name not in self._task_specific_attributes:
log.error(
"Provided `%s` does not exist in the model. Consider calling " "`add_and_switch_to_new_task` first.",
task_name,
)
else:
self._current_task = task_name
def _drop_task(self, task_name):
if task_name in self._task_specific_attributes:
if self._current_task == task_name:
log.error(
"`%s` is the current task." " Switch to some other task before dropping this.",
task_name,
)
else:
self._task_specific_attributes.pop(task_name)
else:
log.warning("No task exists with the name `%s`.", task_name)
@staticmethod
def _filter_empty_sentences(sentences: List[Sentence]) -> List[Sentence]:
filtered_sentences = [sentence for sentence in sentences if sentence.tokens]
if len(sentences) != len(filtered_sentences):
log.warning(f"Ignore {len(sentences) - len(filtered_sentences)} sentence(s) with no tokens.")
return filtered_sentences
@property
def label_type(self):
return self.get_current_label_type()
def predict_zero_shot(
self,
sentences: Union[List[Sentence], Sentence],
candidate_label_set: Union[List[str], Set[str], str],
multi_label: bool = True,
):
"""
Method to make zero shot predictions from the TARS model
:param sentences: input sentence objects to classify
:param candidate_label_set: set of candidate labels
:param multi_label: indicates whether multi-label or single class prediction. Defaults to True.
"""
# check if candidate_label_set is empty
if candidate_label_set is None or len(candidate_label_set) == 0:
log.warning("Provided candidate_label_set is empty")
return
# make list if only one candidate label is passed
if isinstance(candidate_label_set, str):
candidate_label_set = {candidate_label_set}
# create label dictionary
label_dictionary = Dictionary(add_unk=False)
for label in candidate_label_set:
label_dictionary.add_item(label)
# note current task
existing_current_task = self._current_task
# create a temporary task
self.add_and_switch_to_new_task(
task_name="ZeroShot",
label_dictionary=label_dictionary,
label_type="-".join(label_dictionary.get_items()),
multi_label=multi_label,
)
try:
# make zero shot predictions
self.predict(sentences)
finally:
# switch to the pre-existing task
self.switch_to_task(existing_current_task)
self._drop_task("ZeroShot")
return
class TARSTagger(FewshotClassifier):
"""
TARS model for sequence tagging. In the backend, the model uses a BERT based 5-class
sequence labeler which given a <label, text> pair predicts the probability for each word
to belong to one of the BIOES classes. The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerWordEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
"""
super(TARSTagger, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerWordEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item("entity")
tars_dictionary.span_labels = True
# initialize a bare-bones sequence tagger
self.tars_model: SequenceTagger = SequenceTagger(
hidden_size=123,
embeddings=embeddings,
tag_dictionary=tars_dictionary,
tag_type=self.static_label_type,
use_crf=False,
use_rnn=False,
reproject_embeddings=False,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
def _get_tars_formatted_sentence(self, label, sentence):
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
label_length = 0 if not self.prefix else len(label.split(" ")) + len(self.separator.split(" "))
# make a tars sentence where all labels are O by default
tars_sentence = Sentence(label_text_pair, use_tokenizer=False)
for entity_label in sentence.get_labels(self.label_type):
if entity_label.value == label:
new_span = [tars_sentence.get_token(token.idx + label_length) for token in entity_label.span]
tars_sentence.add_complex_label(self.static_label_type, SpanLabel(Span(new_span), value="entity"))
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"tag_type": self.get_current_label_type(),
"tag_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"prefix": self.prefix,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _fetch_model(model_name) -> str:
if model_name == "tars-ner":
cache_dir = Path("models")
model_name = cached_path(
"https://nlp.informatik.hu-berlin.de/resources/models/tars-ner/tars-ner.pt",
cache_dir=cache_dir,
)
return model_name
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
model = TARSTagger(
task_name=state["current_task"],
label_dictionary=state["tag_dictionary"],
label_type=state["tag_type"],
embeddings=state["tars_model"].embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
prefix=state["prefix"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@property
def tars_embeddings(self):
return self.tars_model.embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
most_probable_first: bool = True,
):
# return
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
# with torch.no_grad():
if not sentences:
return sentences
if not isinstance(sentences, list):
sentences = [sentences]
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
overall_count = 0
with torch.no_grad():
for batch in dataloader:
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
all_detected = {}
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
for predicted in tars_sentence.get_labels(label_name):
predicted.value = label
all_detected[predicted] = predicted.score
if most_probable_first:
import operator
already_set_indices: List[int] = []
sorted_x = sorted(all_detected.items(), key=operator.itemgetter(1))
sorted_x.reverse()
for tuple in sorted_x:
# get the span and its label
label = tuple[0]
# label = span.get_labels("tars_temp_label")[0].value
label_length = (
0 if not self.prefix else len(label.value.split(" ")) + len(self.separator.split(" "))
)
# determine whether tokens in this span already have a label
tag_this = True
for token in label.span:
corresponding_token = sentence.get_token(token.idx - label_length)
if corresponding_token is None:
tag_this = False
continue
if token.idx in already_set_indices:
tag_this = False
continue
# only add if all tokens have no label
if tag_this:
already_set_indices.extend(token.idx for token in label.span)
predicted_span = [sentence.get_token(token.idx - label_length) for token in label.span]
sentence.add_complex_label(
label_name,
label=SpanLabel(Span(predicted_span), value=label.value, score=label.score),
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
class TARSClassifier(FewshotClassifier):
"""
TARS model for text classification. In the backend, the model uses a BERT based binary
text classifier which given a <label, text> pair predicts the probability of two classes
"True", and "False". The input data is a usual Sentence object which is inflated
by the model internally before pushing it through the transformer stack of BERT.
"""
static_label_type = "tars_label"
LABEL_MATCH = "YES"
LABEL_NO_MATCH = "NO"
def __init__(
self,
task_name: Optional[str] = None,
label_dictionary: Optional[Dictionary] = None,
label_type: Optional[str] = None,
embeddings: Union[TransformerDocumentEmbeddings, str] = "bert-base-uncased",
num_negative_labels_to_sample: int = 2,
prefix: bool = True,
**tagger_args,
):
"""
Initializes a TextClassifier
:param task_name: a string depicting the name of the task
:param label_dictionary: dictionary of labels you want to predict
:param embeddings: name of the pre-trained transformer model e.g.,
'bert-base-uncased' etc
:param num_negative_labels_to_sample: number of negative labels to sample for each
positive labels against a sentence during training. Defaults to 2 negative
labels for each positive label. The model would sample all the negative labels
if None is passed. That slows down the training considerably.
:param multi_label: auto-detected by default, but you can set this to True
to force multi-label predictionor False to force single-label prediction
:param multi_label_threshold: If multi-label you can set the threshold to make predictions
:param beta: Parameter for F-beta score for evaluation and training annealing
"""
super(TARSClassifier, self).__init__()
if isinstance(embeddings, str):
embeddings = TransformerDocumentEmbeddings(
model=embeddings,
fine_tune=True,
layers="-1",
layer_mean=False,
)
# prepare TARS dictionary
tars_dictionary = Dictionary(add_unk=False)
tars_dictionary.add_item(self.LABEL_NO_MATCH)
tars_dictionary.add_item(self.LABEL_MATCH)
# initialize a bare-bones sequence tagger
self.tars_model = TextClassifier(
document_embeddings=embeddings,
label_dictionary=tars_dictionary,
label_type=self.static_label_type,
**tagger_args,
)
# transformer separator
self.separator = str(self.tars_embeddings.tokenizer.sep_token)
if self.tars_embeddings.tokenizer._bos_token:
self.separator += str(self.tars_embeddings.tokenizer.bos_token)
self.prefix = prefix
self.num_negative_labels_to_sample = num_negative_labels_to_sample
if task_name and label_dictionary and label_type:
# Store task specific labels since TARS can handle multiple tasks
self.add_and_switch_to_new_task(task_name, label_dictionary, label_type)
else:
log.info(
"TARS initialized without a task. You need to call .add_and_switch_to_new_task() "
"before training this model"
)
self.clean_up_labels = True
def _clean(self, label_value: str) -> str:
if self.clean_up_labels:
return label_value.replace("_", " ")
else:
return label_value
def _get_tars_formatted_sentence(self, label, sentence):
label = self._clean(label)
original_text = sentence.to_tokenized_string()
label_text_pair = (
f"{label} {self.separator} {original_text}" if self.prefix else f"{original_text} {self.separator} {label}"
)
sentence_labels = [self._clean(label.value) for label in sentence.get_labels(self.get_current_label_type())]
tars_label = self.LABEL_MATCH if label in sentence_labels else self.LABEL_NO_MATCH
tars_sentence = Sentence(label_text_pair, use_tokenizer=False).add_label(self.static_label_type, tars_label)
return tars_sentence
def _get_state_dict(self):
model_state = {
"state_dict": self.state_dict(),
"current_task": self._current_task,
"label_type": self.get_current_label_type(),
"label_dictionary": self.get_current_label_dictionary(),
"tars_model": self.tars_model,
"num_negative_labels_to_sample": self.num_negative_labels_to_sample,
"task_specific_attributes": self._task_specific_attributes,
}
return model_state
@staticmethod
def _init_model_with_state_dict(state):
# init new TARS classifier
label_dictionary = state["label_dictionary"]
label_type = "default_label" if not state["label_type"] else state["label_type"]
model: TARSClassifier = TARSClassifier(
task_name=state["current_task"],
label_dictionary=label_dictionary,
label_type=label_type,
embeddings=state["tars_model"].document_embeddings,
num_negative_labels_to_sample=state["num_negative_labels_to_sample"],
)
# set all task information
model._task_specific_attributes = state["task_specific_attributes"]
# linear layers of internal classifier
model.load_state_dict(state["state_dict"])
return model
@staticmethod
def _fetch_model(model_name) -> str:
model_map = {}
hu_path: str = "https://nlp.informatik.hu-berlin.de/resources/models"
model_map["tars-base"] = "/".join([hu_path, "tars-base", "tars-base-v8.pt"])
cache_dir = Path("models")
if model_name in model_map:
model_name = cached_path(model_map[model_name], cache_dir=cache_dir)
return model_name
@property
def tars_embeddings(self):
return self.tars_model.document_embeddings
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size=32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
label_threshold: float = 0.5,
multi_label: Optional[bool] = None,
):
"""
Predict sequence tags for Named Entity Recognition task
:param sentences: a Sentence or a List of Sentence
:param mini_batch_size: size of the minibatch, usually bigger is more rapid but consume more memory,
up to a point when it has no more effect.
:param all_tag_prob: True to compute the score for each tag on each token,
otherwise only the score of the best tag is returned
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.get_current_label_type()
if multi_label is None:
multi_label = self.is_current_task_multi_label()
# with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, Sentence):
sentences = [sentences]
# set context if not set already
previous_sentence = None
for sentence in sentences:
if sentence.is_context_set():
continue
sentence._previous_sentence = previous_sentence
sentence._next_sentence = None
if previous_sentence:
previous_sentence._next_sentence = sentence
previous_sentence = sentence
reordered_sentences = sorted(sentences, key=lambda s: len(s), reverse=True)
dataloader = DataLoader(
dataset=FlairDatapointDataset(reordered_sentences),
batch_size=mini_batch_size,
)
# progress bar for verbosity
if verbose:
progressbar = tqdm(dataloader)
progressbar.set_description("Batch inference")
dataloader = progressbar
overall_loss = 0
overall_count = 0
batch_no = 0
with torch.no_grad():
for batch in dataloader:
batch_no += 1
batch = self._filter_empty_sentences(batch)
# stop if all sentences are empty
if not batch:
continue
# go through each sentence in the batch
for sentence in batch:
# always remove tags first
sentence.remove_labels(label_name)
all_labels = [label.decode("utf-8") for label in self.get_current_label_dictionary().idx2item]
best_label = None
for label in all_labels:
tars_sentence = self._get_tars_formatted_sentence(label, sentence)
loss_and_count = self.tars_model.predict(
tars_sentence,
label_name=label_name,
return_loss=True,
return_probabilities_for_all_classes=True if label_threshold < 0.5 else False,
)
overall_loss += loss_and_count[0].item()
overall_count += loss_and_count[1]
# add all labels that according to TARS match the text and are above threshold
for predicted_tars_label in tars_sentence.get_labels(label_name):
if (
predicted_tars_label.value == self.LABEL_MATCH
and predicted_tars_label.score > label_threshold
):
# do not add labels below confidence threshold
sentence.add_label(label_name, label, predicted_tars_label.score)
# only use label with highest confidence if enforcing single-label predictions
if not multi_label:
if len(sentence.get_labels(label_name)) > 0:
# get all label scores and do an argmax to get the best label
label_scores = torch.tensor(
[label.score for label in sentence.get_labels(label_name)],
dtype=torch.float,
)
best_label = sentence.get_labels(label_name)[torch.argmax(label_scores)]
# remove previously added labels and only add the best label
sentence.remove_labels(label_name)
sentence.add_label(
typename=label_name,
value=best_label.value,
score=best_label.score,
)
# clearing token embeddings to save memory
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, overall_count
| 40.6947 | 119 | 0.621436 | [
"MIT"
] | garciaeduardo7143/flair | flair/models/tars_model.py | 35,323 | Python |
# -*- coding: utf-8 -*-
"""Console script for python_learn."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for python_learn."""
click.echo("Replace this message by putting your code into "
"python_learn.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 22.526316 | 68 | 0.649533 | [
"MIT"
] | Zhazhanan/python_learn | python_learn/cli.py | 428 | Python |
# -------------------------------------------------------------------------
# Copyright (C) 2018 BMW Car IT GmbH
# -------------------------------------------------------------------------
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
# -------------------------------------------------------------------------
"""
Contains configurations constants
"""
G_WARNING_COUNT = 0
G_PROP_FILES = [
'proprietary',
'scripts/integration_tests/proprietary',
'scripts/integration_tests/run_smoke_tests_as_bat_tests.py',
'zuul.d'
]
G_LICENSE_TEMPLATE_OPEN = """
-------------------------------------------------------------------------
Copyright (C) [YYYY] BMW XXXX
-------------------------------------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at https://mozilla.org/MPL/2.0/.
-------------------------------------------------------------------------
"""
G_LICENSE_TEMPLATE_PROP = """
-------------------------------------------------------------------------
Copyright (C) [YYYY] BMW XXXX
All rights reserved.
-------------------------------------------------------------------------
This document contains proprietary information belonging to BMW XXXX.
Passing on and copying of this document, use and communication of its
contents is not permitted without prior written authorization.
-------------------------------------------------------------------------
"""
| 40.023256 | 76 | 0.440442 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Yuehai-Zhou/GENIVI | scripts/code_style_checker/common_modules/config.py | 1,721 | Python |
import time
import random
import highfive
# This is the remote worker for the sum example. Here, we define what the
# workers do when they get a call from the master. All we need is a single
# function which takes the call, does some processing, and returns a response.
# An interesting way to play with the workers is to spin some up, then shut
# them down before the job set running on the master is complete. The jobs
# which the workers are running will be requeued on the master so that when
# more workers connect, the jobs will be tried again. This makes network
# problems no big deal as long as you reconnect the workers at some point.
# In our case, we take in a pair of numbers and return their sum. To make
# it easier to watch the progress of the job set in real time, we sleep for
# anywhere between 0 and 1/4 seconds before the sum to simulate heavy
# processing.
def delayed_sum(numbers):
time.sleep(random.random() / 4)
return sum(numbers)
# Now we can easily start a worker pool to connect to a local HighFive master.
# We can also add a `host=<host name>` and `port=<port number>` to connect to a
# remote HighFive master. By default, `run_worker_pool()` creates a worker
# process for each available CPU core to maximize CPU utilization, but we can
# we can limit this with `max_workers=<number of workers>`.
if __name__ == "__main__":
try:
highfive.run_worker_pool(delayed_sum)
except KeyboardInterrupt:
print("keyboard interrupt")
| 38.333333 | 79 | 0.745819 | [
"MIT"
] | abau171/highfive | examples/sum_worker.py | 1,495 | Python |
import datetime
import inspect
import os
import pprint as pretty_print
import jinja2
from jinja2 import Environment, FileSystemLoader
DEFAULT_TEMPLATE_FOLDERS = ["templates"]
def get_log_errors(logs):
return [e for e in logs.list() if e["level"] >= 40]
def make_list(obj):
return list(obj)
def pprint(obj):
return pretty_print.pformat(obj)
def format_time(time):
if not isinstance(time, datetime.timedelta):
time = datetime.timedelta(seconds=int(time / 1000.0))
return ":".join(str(time).split(":")[:2]) + "h"
FILTERS = {
"pprint": pprint,
"list": make_list,
"get_log_errors": get_log_errors,
"format_time": format_time,
}
GLOBALS = {"datetime": datetime, "str": str}
def get_environment(paths):
loader = FileSystemLoader(paths)
environment = Environment(loader=loader, lstrip_blocks=True, trim_blocks=True)
for filter_name, filter in FILTERS.items():
environment.filters[filter_name] = filter
for global_name, global_value in GLOBALS.items():
environment.globals[global_name] = global_value
return environment
class TemplateLoader:
def __init__(self):
self.paths = []
self.reload_env()
def add_path(self, path):
if path not in self.paths and os.path.isdir(path):
self.paths.append(path)
self.reload_env()
def auto_discover(self, path=None, folder=None):
caller_folder = os.path.dirname(inspect.stack()[1][1])
if path:
caller_folder = os.path.join(caller_folder, path)
if folder:
self.add_path(os.path.join(caller_folder, folder))
else:
self.discover_folder(caller_folder)
def discover_folder(self, candidate_folder):
for folder in [
os.path.join(candidate_folder, dir) for dir in DEFAULT_TEMPLATE_FOLDERS
]:
self.add_path(folder)
def reload_env(self):
self.env = get_environment(self.paths)
def get_template(self, name):
if os.path.isabs(name): # If provided an absolute path to a template
environment = get_environment(os.path.dirname(name))
template = environment.get_template(os.path.basename(name))
else:
template = self.env.get_template(name)
return template
template_loader = TemplateLoader()
| 26.449438 | 83 | 0.661852 | [
"BSD-3-Clause"
] | ankitjavalkar/spidermon | spidermon/templates.py | 2,354 | Python |
# -*- coding: utf-8 -*-
#
# github-cli documentation build configuration file, created by
# sphinx-quickstart on Tue May 5 17:40:34 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'github-cli'
copyright = u'2009-2012, Sander Smits'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'github-clidoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'github-cli.tex', u'github-cli Documentation',
u'Sander Smits', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 32.34359 | 80 | 0.722531 | [
"BSD-3-Clause"
] | jsmits/github-cli | docs/source/conf.py | 6,307 | Python |
from __future__ import print_function
import argparse
import os
import pickle
import sys
import cv2
import numpy as np
import torch
import vlfeat # calls constructor
from sklearn.cluster import MiniBatchKMeans
from src.utils.cluster.eval_metrics import _hungarian_match, _original_match, \
_acc
from src.utils.segmentation.data import make_Coco_dataloaders, \
make_Potsdam_dataloaders
SIFT_DLEN = 128
SIFT_STEP = 10
def _get_vectorised_sift_samples(archetype_config, dataloader):
# returns num unmasked pixels x SIFT_DLEN, in uint8 format
# operates on greyscale 128 bit images
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual (box central) pixels
desc_side = int(img_sz / SIFT_STEP)
print("img sz %d, desc_side %d" % (img_sz, desc_side))
sys.stdout.flush()
descs_all = np.zeros((num_imgs_max, desc_side * desc_side,
SIFT_DLEN), dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, desc_side * desc_side), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
# when descriptor matrix flattened, goes along rows first (rows change slow)
central_inds_h = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((desc_side, 1)).repeat(desc_side,
axis=1)
central_inds_w = (np.arange(desc_side) * SIFT_STEP +
(SIFT_STEP / 2)).reshape((1, desc_side)).repeat(desc_side,
axis=0)
central_inds_h, central_inds_w = central_inds_h.reshape(-1), \
central_inds_w.reshape(-1)
for b_i, batch in enumerate(dataloader):
if len(batch) == 3: # test dataloader
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, desc_side * desc_side),
dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else: # training dataloader
store_labels = False
imgs, _, _, masks = batch
# imgs currently channel first, [0-1] range, floats
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
for i in range(curr_batch_sz):
grey_img = cv2.cvtColor(imgs[i, :, :, :], cv2.COLOR_RGB2GRAY)
locs, descs = vlfeat.vl_dsift(grey_img, step=SIFT_STEP)
descs = descs.transpose((1, 0)) # 40*40, 128
descs = descs.reshape(-1, SIFT_DLEN) # rows change slowest
# get the corresponding box central mask/label
mask = masks[i][central_inds_h, central_inds_w]
offset = start + i
descs_all[offset, :, :] = descs
masks_all[offset, :] = mask
if store_labels:
label = labels[i][central_inds_h, central_inds_w]
labels_all[offset, :] = label
actual_num_imgs += curr_batch_sz
descs_all = descs_all[:actual_num_imgs, :, :]
masks_all = masks_all[:actual_num_imgs, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = descs_all[masks_all, :].reshape(-1, SIFT_DLEN)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def _get_vectorised_colour_samples(archetype_config, dataloader):
num_batches, batch_sz = len(dataloader), archetype_config.dataloader_batch_sz
num_imgs_max = num_batches * batch_sz # estimate
img_sz = archetype_config.input_sz
# cluster individual pixels
imgs_all = np.zeros(
(num_imgs_max, img_sz, img_sz, archetype_config.in_channels),
dtype=np.uint8)
masks_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.bool)
labels_all = None
actual_num_imgs = 0
for b_i, batch in enumerate(dataloader):
if len(batch) == 3:
store_labels = True
if (labels_all is None):
labels_all = np.zeros((num_imgs_max, img_sz, img_sz), dtype=np.int32)
imgs, labels, masks = batch
labels = labels.cpu().numpy().astype(np.int32)
else:
store_labels = False
imgs, _, _, masks = batch
# channels last
imgs = (imgs * 255.).permute(0, 2, 3, 1).cpu().numpy().astype(np.uint8)
masks = masks.cpu().numpy().astype(np.bool)
curr_batch_sz, h, w, c = imgs.shape
assert (h == archetype_config.input_sz and w == archetype_config.input_sz
and c == archetype_config.in_channels)
if b_i < num_batches - 1:
assert (batch_sz == curr_batch_sz)
start = b_i * batch_sz
imgs_all[start:(start + curr_batch_sz), :, :, :] = imgs
masks_all[start:(start + curr_batch_sz), :, :] = masks
if store_labels:
labels_all[start:(start + curr_batch_sz), :, :] = labels
actual_num_imgs += curr_batch_sz
imgs_all = imgs_all[:actual_num_imgs, :, :, :]
masks_all = masks_all[:actual_num_imgs, :, :]
num_unmasked = masks_all.sum()
if store_labels:
labels_all = labels_all[:actual_num_imgs, :, :]
samples_labels = labels_all[masks_all].reshape(-1)
assert (samples_labels.shape[0] == num_unmasked)
samples = imgs_all[masks_all, :].reshape(-1, archetype_config.in_channels)
assert (samples.shape[0] == num_unmasked)
if not store_labels:
return samples
else:
return samples, samples_labels
def main():
# based on segmentation_multioutput_twohead - we pass in the config of the
# IID run we are comparing against, so the settings can be copied
parser = argparse.ArgumentParser()
parser.add_argument("--model_ind", type=int, required=True)
parser.add_argument("--out_root", type=str,
default="/scratch/shared/slow/xuji/iid_private")
parser.add_argument("--IID_model_ind", type=int, required=True)
parser.add_argument("--max_num_train", type=int, required=True)
parser.add_argument("--test_code", default=False, action="store_true")
parser.add_argument("--do_sift", default=False, action="store_true")
config = parser.parse_args()
config.out_dir = os.path.join(config.out_root, str(config.model_ind))
if not os.path.exists(config.out_dir):
os.makedirs(config.out_dir)
archetype_config_path = os.path.join(config.out_root,
str(config.IID_model_ind),
"config.pickle")
print("Loading archetype config from: %s" % archetype_config_path)
with open(archetype_config_path, "rb") as config_f:
archetype_config = pickle.load(config_f)
assert (config.IID_model_ind == archetype_config.model_ind)
assert (archetype_config.mode == "IID") # compare against fully unsup
sample_fn = _get_vectorised_colour_samples
if config.do_sift:
sample_fn = _get_vectorised_sift_samples
# set it to be only rgb (and ir if nec) but no sobel - we're clustering
# single pixel colours
archetype_config.include_rgb = True
archetype_config.no_sobel = True
if "Coco" in archetype_config.dataset:
assert (not archetype_config.using_IR)
archetype_config.in_channels = 3
elif archetype_config.dataset == "Potsdam": # IR
assert (archetype_config.using_IR)
archetype_config.in_channels = 4
# Data
# -------------------------------------------------------------------------
if "Coco" in archetype_config.dataset:
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Coco_dataloaders(archetype_config)
elif archetype_config.dataset == "Potsdam":
dataloaders_head_A, mapping_assignment_dataloader, \
mapping_test_dataloader = \
make_Potsdam_dataloaders(archetype_config)
else:
raise NotImplementedError
# unlike in clustering script for STL - isn't any data from unknown classes
dataloaders_head_B = dataloaders_head_A
# networks and optimisers
# ------------------------------------------------------
assert (archetype_config.num_dataloaders == 1)
dataloader = dataloaders_head_B[0]
samples = sample_fn(archetype_config, dataloader)
print("got training samples")
sys.stdout.flush()
if config.test_code:
print("testing code, taking 10000 samples only")
samples = samples[:10000, :]
else:
num_samples_train = min(samples.shape[0], config.max_num_train)
print("taking %d samples" % num_samples_train)
chosen_inds = np.random.choice(samples.shape[0], size=num_samples_train,
replace=False)
samples = samples[chosen_inds, :]
print(samples.shape)
sys.stdout.flush()
kmeans = MiniBatchKMeans(n_clusters=archetype_config.gt_k, verbose=1).fit(
samples)
print("trained kmeans")
sys.stdout.flush()
# use mapping assign to assign output_k=gt_k to gt_k
# and also assess on its predictions, since it's identical to
# mapping_test_dataloader
assign_samples, assign_labels = sample_fn(archetype_config,
mapping_assignment_dataloader)
num_samples = assign_samples.shape[0]
assign_preds = kmeans.predict(assign_samples)
print("finished prediction for mapping assign/test data")
sys.stdout.flush()
assign_preds = torch.from_numpy(assign_preds).cuda()
assign_labels = torch.from_numpy(assign_labels).cuda()
if archetype_config.eval_mode == "hung":
match = _hungarian_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig": # flat!
match = _original_match(assign_preds, assign_labels,
preds_k=archetype_config.gt_k,
targets_k=archetype_config.gt_k)
elif archetype_config.eval_mode == "orig_soft":
assert (False) # not used
# reorder predictions to be same cluster assignments as gt_k
found = torch.zeros(archetype_config.gt_k)
reordered_preds = torch.zeros(num_samples).to(torch.int32).cuda()
for pred_i, target_i in match:
reordered_preds[assign_preds == pred_i] = target_i
found[pred_i] = 1
assert (found.sum() == archetype_config.gt_k) # each output_k must get mapped
acc = _acc(reordered_preds, assign_labels, archetype_config.gt_k)
print("got acc %f" % acc)
config.epoch_acc = [acc]
config.centroids = kmeans.cluster_centers_
config.match = match
# write results and centroids to model_ind output file
with open(os.path.join(config.out_dir, "config.pickle"), "w") as outfile:
pickle.dump(config, outfile)
with open(os.path.join(config.out_dir, "config.txt"), "w") as text_file:
text_file.write("%s" % config)
if __name__ == "__main__":
main()
| 36.618421 | 80 | 0.676967 | [
"MIT"
] | THinnerichs/MiS-Information-Clustering | src/scripts/segmentation/baselines/kmeans_and_sift.py | 11,132 | Python |
from ruamel import yaml
import great_expectations as ge
if __name__ == "__main__":
context = ge.get_context()
datasource_config = {
"name": "my_notion_pandas_data_source",
"class_name": "Datasource",
"module_name": "great_expectations.datasource",
"execution_engine": {
"module_name": "great_expectations.execution_engine",
"class_name": "PandasExecutionEngine",
},
"data_connectors": {
"my_notion_pandas_data_connector": {
"class_name": "RuntimeDataConnector",
"module_name": "great_expectations.datasource.data_connector",
"batch_identifiers": ["default_identifier_name"],
},
},
}
context.test_yaml_config(yaml.dump(datasource_config))
context.add_datasource(**datasource_config)
| 31.777778 | 78 | 0.630536 | [
"MIT"
] | datarootsio/notion-dbs-data-quality | src/build_data_source.py | 858 | Python |
from django.db import models
from django.template.defaultfilters import truncatechars
from django.utils import timezone
from camper.sked.models import Event, Session
from camper.twit.threads import SendTweetThread
class TweetTooLongError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'Adding this session would result in a tweet longer than 140 characters.'
class AlreadyAssignedError(Exception):
def __init__(self, msg=None):
self.msg = msg
if not self.msg:
self.msg = 'This session already belongs to a tweet in this sequence.'
class Tweet(models.Model):
sent_at = models.DateTimeField(blank=True, null=True)
class Meta:
abstract = True
def send(self):
# ''' This is weird. It can only be called from the first tweet in
# a series, raising NotImplementedError if called on a non-initial tweet.
# It spins off a thread to make the actual api calls, which
# manages state within the series.
# '''
if self.previous:
raise NotImplementedError('Serial tweets can only be sent from the beginning.')
SendTweetThread(self).start()
@property
def is_sent(self):
return self.sent_at is not None
class SessionBlockTweetManager(models.Manager):
def unsent(qs):
return qs.filter(sent_at=None, previous=None)
class SessionBlockTweet(Tweet):
timeslot = models.DateTimeField()
event = models.ForeignKey(Event, related_name="session_tweets")
session_ids = models.CommaSeparatedIntegerField(max_length=128,
blank=True, default="")
previous = models.OneToOneField('SessionBlockTweet', blank=True,
null=True, unique=True, related_name="next")
objects = SessionBlockTweetManager()
class Meta:
ordering = ('-timeslot', 'id')
def __unicode__(self):
try:
return 'Tweet %s of %s for %s at %s' % (
self.index + 1, self.total, self.timeslot, self.event)
except:
return 'Tweet for %s at %s' % (self.timeslot, self.event)
def touch(self):
self._seq = None
self._sessions = None
def get_sequence(self):
try:
if self._seq is not None:
return self._seq
except AttributeError:
pass
seq = []
cursor = self
while cursor.previous:
cursor = cursor.previous
seq.append(cursor)
while True:
try:
cursor = cursor.next
seq.append(cursor)
except SessionBlockTweet.DoesNotExist:
break
self._seq = seq
return self.get_sequence()
def first_in_sequence(self):
seq = self.get_sequence()
return seq[0]
def get_session_ids(self):
try:
return [int(id) for id in self.session_ids.split(',')]
except:
return []
def add_session(self, session):
if self.length < 140:
assigned = [id for tweet in self.get_sequence() for id in tweet.get_session_ids()]
if session.id in assigned:
raise AlreadyAssignedError()
locally_assigned = self.get_session_ids()
locally_assigned.append(session.id)
self.session_ids = ','.join([str(id) for id in locally_assigned])
self.touch()
if self.length > 140:
if self.sessions.count() > 1:
self.remove_session(session)
raise TweetTooLongError()
else:
raise TweetTooLongError()
def remove_session(self, session):
self.session_ids = ','.join([str(id) for
id in self.get_session_ids() if
id != session.id])
self.touch()
@property
def sessions(self):
try:
if self._sessions is not None:
return self._sessions
except AttributeError:
pass
try:
self._sessions = Session.objects.filter(id__in=self.get_session_ids())
except ValueError:
self._sessions = Session.objects.none()
return self.sessions
@property
def index(self):
seq = self.get_sequence()
return seq.index(self)
@property
def is_first(self):
return self.previous is None
@property
def is_last(self):
try:
return self.next is None
except SessionBlockTweet.DoesNotExist:
return True
@property
def total(self):
seq = self.get_sequence()
return len(seq)
@property
def text(self):
txt = u''
if self.is_first:
txt += u'Coming up at %s: ' % (self.timeslot
.astimezone(timezone.get_current_timezone())
.strftime('%-I:%M'))
txt += u', '.join(['%s (%s)' % (truncatechars(s.title, 120) if
self.sessions.count() is 1 else
s.title, s.location.name) for
s in self.sessions])
return txt
@property
def length(self):
return len(self.text)
| 30.58427 | 96 | 0.560434 | [
"BSD-3-Clause"
] | drinks/camper | camper/twit/models.py | 5,444 | Python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import re
import logging
from itertools import groupby
from six.moves.cPickle import dumps, loads
from collections import defaultdict
from six.moves.urllib.parse import unquote
import bson
import pymongo
from tg import tmpl_context as c
from ming import collection, Field, Index
from ming import schema as S
from ming.utils import LazyProperty
from ming.orm import session, mapper
from ming.orm import ForeignIdProperty, RelationProperty
from allura.lib import helpers as h
from .session import main_doc_session, main_orm_session
from .project import Project
import six
log = logging.getLogger(__name__)
# Collection definitions
ArtifactReferenceDoc = collection(
str('artifact_reference'), main_doc_session,
Field('_id', str),
Field('artifact_reference', dict(
cls=S.Binary(),
project_id=S.ObjectId(),
app_config_id=S.ObjectId(),
artifact_id=S.Anything(if_missing=None))),
Field('references', [str], index=True),
Index('artifact_reference.project_id'), # used in ReindexCommand
)
ShortlinkDoc = collection(
str('shortlink'), main_doc_session,
Field('_id', S.ObjectId()),
# index needed for from_artifact() and index_tasks.py:del_artifacts
Field('ref_id', str, index=True),
Field('project_id', S.ObjectId()),
Field('app_config_id', S.ObjectId()),
Field('link', str),
Field('url', str),
# used by from_links() More helpful to have project_id first, for other
# queries
Index('project_id', 'link'),
)
# Class definitions
class ArtifactReference(object):
@classmethod
def from_artifact(cls, artifact):
'''Upsert logic to generate an ArtifactReference object from an artifact'''
obj = cls.query.get(_id=artifact.index_id())
if obj is not None:
return obj
try:
obj = cls(
_id=artifact.index_id(),
artifact_reference=dict(
cls=bson.Binary(dumps(artifact.__class__, protocol=2)),
project_id=artifact.app_config.project_id,
app_config_id=artifact.app_config._id,
artifact_id=artifact._id))
session(obj).flush(obj)
return obj
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(obj).expunge(obj)
return cls.query.get(_id=artifact.index_id())
@LazyProperty
def artifact(self):
'''Look up the artifact referenced'''
aref = self.artifact_reference
try:
cls = loads(six.binary_type(aref.cls))
with h.push_context(aref.project_id):
return cls.query.get(_id=aref.artifact_id)
except Exception:
log.exception('Error loading artifact for %s: %r',
self._id, aref)
class Shortlink(object):
'''Collection mapping shorthand_ids for artifacts to ArtifactReferences'''
# Regexes used to find shortlinks
_core_re = r'''(\[
(?:(?P<project_id>.*?):)? # optional project ID
(?:(?P<app_id>.*?):)? # optional tool ID
(?P<artifact_id>.*) # artifact ID
\])'''
re_link_1 = re.compile(r'\s' + _core_re, re.VERBOSE)
re_link_2 = re.compile(r'^' + _core_re, re.VERBOSE)
def __repr__(self):
return '<Shortlink %s %s %s -> %s>' % (
self.project_id,
self.app_config_id,
self.link,
self.ref_id)
@classmethod
def lookup(cls, link):
return cls.from_links(link)[link]
@classmethod
def from_artifact(cls, a):
result = cls.query.get(ref_id=a.index_id())
if result is None:
try:
result = cls(
ref_id=a.index_id(),
project_id=a.app_config.project_id,
app_config_id=a.app_config._id)
session(result).flush(result)
except pymongo.errors.DuplicateKeyError: # pragma no cover
session(result).expunge(result)
result = cls.query.get(ref_id=a.index_id())
result.link = a.shorthand_id()
result.url = a.url()
if result.link is None:
result.delete()
return None
return result
@classmethod
def from_links(cls, *links):
'''Convert a sequence of shortlinks to the matching Shortlink objects'''
if len(links):
result = {}
# Parse all the links
parsed_links = dict((link, cls._parse_link(link))
for link in links)
links_by_artifact = defaultdict(list)
project_ids = set()
for link, d in list(parsed_links.items()):
if d:
project_ids.add(d['project_id'])
links_by_artifact[unquote(d['artifact'])].append(d)
else:
result[link] = parsed_links.pop(link)
q = cls.query.find(
dict(
link={'$in': list(links_by_artifact.keys())},
project_id={'$in': list(project_ids)}
),
validate=False,
sort=[('_id', pymongo.DESCENDING)], # if happen to be multiple (ticket move?) have newest first
)
matches_by_artifact = dict(
(link, list(matches))
for link, matches in groupby(q, key=lambda s: unquote(s.link)))
for link, d in six.iteritems(parsed_links):
matches = matches_by_artifact.get(unquote(d['artifact']), [])
matches = (
m for m in matches
if m.project.shortname == d['project'] and
m.project.neighborhood_id == d['nbhd'] and
m.app_config is not None and
m.project.app_instance(m.app_config.options.mount_point))
if d['app']:
matches = (
m for m in matches
if m.app_config.options.mount_point == d['app'])
result[link] = cls._get_correct_match(link, list(matches))
return result
else:
return {}
@classmethod
def _get_correct_match(cls, link, matches):
result = None
if len(matches) == 1:
result = matches[0]
elif len(matches) > 1 and getattr(c, 'app', None):
# use current app's link
for m in matches:
if m.app_config_id == c.app.config._id:
result = m
break
if not result:
cls.log_ambiguous_link('Can not remove ambiguity for link %s with c.app %s', matches, link, c.app)
result = matches[0]
elif len(matches) > 1 and not getattr(c, 'app', None):
cls.log_ambiguous_link('Ambiguous link to %s and c.app is not present to remove ambiguity', matches, link)
result = matches[0]
return result
@classmethod
def log_ambiguous_link(cls, msg, matches, *args):
log.warn(msg, *args)
for m in matches:
log.warn('... %r', m)
@classmethod
def _parse_link(cls, s):
'''Parse a shortlink into its nbhd/project/app/artifact parts'''
s = s.strip()
if s.startswith('['):
s = s[1:]
if s.endswith(']'):
s = s[:-1]
parts = s.split(':')
p_shortname = None
p_id = None
p_nbhd = None
if getattr(c, 'project', None):
p_shortname = getattr(c.project, 'shortname', None)
p_id = getattr(c.project, '_id', None)
p_nbhd = c.project.neighborhood_id
if len(parts) == 3:
p = Project.query.get(shortname=parts[0], neighborhood_id=p_nbhd)
if p:
p_id = p._id
return dict(
nbhd=p_nbhd,
project=parts[0],
project_id=p_id,
app=parts[1],
artifact=parts[2])
elif len(parts) == 2:
return dict(
nbhd=p_nbhd,
project=p_shortname,
project_id=p_id,
app=parts[0],
artifact=parts[1])
elif len(parts) == 1:
return dict(
nbhd=p_nbhd,
project=p_shortname,
project_id=p_id,
app=None,
artifact=parts[0])
else:
return None
# Mapper definitions
mapper(ArtifactReference, ArtifactReferenceDoc, main_orm_session)
mapper(Shortlink, ShortlinkDoc, main_orm_session, properties=dict(
ref_id=ForeignIdProperty(ArtifactReference),
project_id=ForeignIdProperty('Project'),
app_config_id=ForeignIdProperty('AppConfig'),
project=RelationProperty('Project'),
app_config=RelationProperty('AppConfig'),
ref=RelationProperty(ArtifactReference)))
| 36.225455 | 118 | 0.57639 | [
"Apache-2.0"
] | brondsem/allura | Allura/allura/model/index.py | 9,962 | Python |
import os, sys
sys.path.insert(0, os.path.join("..",".."))
from nodebox.graphics.context import *
from nodebox.graphics import *
from nodebox.graphics.geometry import coordinates
from nodebox.graphics.shader import dropshadow, OffscreenBuffer, transparent, stretch
from time import time
flower = Image("cell.png")
shadow = dropshadow(flower, alpha=1.0) # = image(blur(flower), color=(0,0,0,1))
# Each "flower" is drawn with a shadow underneath to add some depth.
# The global shadow layer is at the bottom of the plant.
# Ideally, each growing root would have its own shadow,
# but it is faster this way using only one offscreen buffer for all global shadows
# and a pre-rendered shadow image for each individual flower.
class Root:
def __init__(self, x, y, angle=90, radius=20, step=60, time=1.0, color=Color(0)):
self.x = x
self.y = y
self.angle = angle
self.radius = radius # Segment length.
self.step = step # Maximum left or right rotation from current angle.
self.time = time
self.color = color
def copy(self):
return Root(
self.x,
self.y,
self.angle,
self.radius,
self.step,
self.time,
self.color.copy())
def update(self):
# The performance trick is that we don't keep a history,
# e.g. no list with all the previous segments in the growing root.
# We simply keep the position and heading of the last segment.
# The previous segments have been rendered in a texture, i.e. they are "frozen".
self.x, self.y = coordinates(self.x, self.y, self.radius, self.angle)
self.angle += random(-self.step, self.step)
self.time *= 0.8 + random(0.2)
def draw(self):
push()
translate(self.x, self.y)
strokewidth(2)
stroke(
self.color.r,
self.color.g,
self.color.b,
self.color.a * self.time) # More transparent over time.
ellipse(0, 0,
width = 0.2+ 0.5 * self.time * self.radius,
height = 0.2+ 0.5 * self.time * self.radius) # Smaller over time.
rotate(self.angle)
line(0, 0, self.radius, 0)
scale(0.2 + self.time)
image(shadow, -15, -15, width=20, height=20, alpha=0.5)
image(flower, -10, -10, width=20, height=20, alpha=0.5,
color=(canvas.mouse.relative_x*0.5+0.5, 1, self.time+0.5, 1))
pop()
CLR = Color(0.27,0.29,0.36)
CLR = lighter(CLR, 0.3)
plant = [Root(200, -50, color=CLR) for i in range(10)]
def grow(plant=[], branch=0.01):
""" Updates each root in the given list to a new position.
Roots can branch and will disappear over time.
Returns the updated list.
"""
new = []
for root in plant:
root.update()
if root.time > 0.05:
new.append(root)
elif len(plant) < 50:
# Replace the disappeared root with a new one.
# Vary the time (=lifespan) so new roots appear at irregular intervals.
x, y, angle = choice((
(200 + random(50), -50, 90+random(-10,10)),
#(-50, random(50), 0)
))
new.append(Root(x, y, angle=angle, color=CLR, time=random(0.5, 3.5, bias=0.3)))
if random() < branch:
new.append(root.copy())
return new
# Roots are drawn into an offscreen buffer instead of directly to the screen.
# This way we get an image with a transparent background, which we can use
# to generate a dropshadow on-the-fly.
# The bigger the size of the buffer, the more pixels and the slower it gets.
# We work at a lower resolution and then scale the buffer up to the size of the screen.
RESOLUTION = 0.5
buffer = OffscreenBuffer(
RESOLUTION * canvas.screen.width,
RESOLUTION * canvas.screen.height)
def draw(canvas):
# It takes some juggling with the contrast of the colors to avoid artefacts.
colorplane(0, 0, canvas.width, canvas.height,
lighter(color(0.14, 0.13, 0.18)),
color(0.07, 0.06, 0.14),
color(0.14, 0.20, 0.18),
color(0.07, 0.06, 0.14))
global plant
plant = grow(plant)
# Draw each root in the offscreen texture.
# The texture already contains whatever was drawn in it previous frame.
buffer.push()
for root in plant:
root.draw()
root.step = canvas.mouse.relative_x * 60
root.radius = canvas.mouse.relative_y * 30
buffer.pop()
# Every few frames, make the buffered image more transparent,
# so that old content fades away.
if canvas.frame % 2 == 0 and not canvas.mouse.pressed:
buffer.texture = transparent(buffer.texture, 0.9).texture
# Scale up the buffered image to the screen size.
# Draw the image with a dropshadow effect.
# Since the offscreen buffer is scaled, the edges will look rough.
# Apply a small blur effect to smoothen them.
img = buffer.texture
#img = mirror(img, vertical=True, dx=0.35, dy=0) # Interesting patterns.
image(dropshadow(img, alpha=1.0, amount=1), 0, -50,
width = canvas.width,
height = canvas.height+50)
# Hypnotizing breathing effect:
img = stretch(img, 0.2, 0.1, radius=0.75, zoom=0.4-cos(canvas.frame*0.01)*0.4)
image(img, 0, 0,
width = canvas.width,
height = canvas.height,
)#filter = blurred(scale=0.75))
canvas.fps = 20
canvas.size = 800, 600
canvas.fullscreen = True
canvas.run(draw) | 37.304636 | 91 | 0.606071 | [
"BSD-3-Clause"
] | pepsipepsi/nodebox_opengl_python3 | examples/07-filter/09-buffer.py | 5,633 | Python |
import tkinter as tk
from PIL import Image, ImageTk
# The Custom Variable Widgets
class MyBar(tk.Canvas) :
def __init__(self, master:object, shape:object, value=0, maximum=100,
bg="#231303", trough_color='#8a7852', bar_color='#f7f4bf'):
"""Creating the alpha mask and creating a custom widget of the given shape and dimensions."""
# open shape mask with PIL
im_shape_alpha = Image.open(shape).convert('L')
# create bar shape image with the choosen backgroound color
im_shape = Image.new('RGBA', im_shape_alpha.size, bg)
# apply shape as alpha mask to "cut out" the bar shape
im_shape.putalpha(im_shape_alpha)
width, height = im_shape_alpha.size
# create the canvas
tk.Canvas.__init__(self, master, bg=trough_color, width=width, height=height, highlightthickness=0)
self._value = value # bar value
self.maximum = maximum # maximum value
# bar width and height
self.height = height
self.width = width
# create tkinter image for the shape from the PIL Image
self.img_trough = ImageTk.PhotoImage(im_shape, master=self)
# create bar to display the value
self.create_rectangle(0, height, width, height * (1 - value/self.maximum), width=0, fill=bar_color, tags='pbar')
# display shape on top
self.create_image(0, 0, anchor='nw', image=self.img_trough)
@property
def value(self):
"""Return bar's value."""
return self._value
@value.setter
def value(self, value:int):
"""Set bar's value."""
self._value = value
# adjust bar height to value
self.coords('pbar', 0, self.height, self.width, self.height*(1 - value/self.maximum)) | 40.409091 | 120 | 0.644544 | [
"MIT"
] | 1337-inc/Captain | Scripts/mybar.py | 1,778 | Python |
from typing import List, Generator
def n_gons(partial: List[int], size: int, sums: int=None) -> \
Generator[List[int], None, None]:
length = len(partial)
if length == size * 2:
yield partial
for i in range(1, size * 2 + 1):
if i in partial:
continue
partial.append(i)
if length == 2:
sums = sum(partial[0: 3])
elif (length > 2 and length % 2 == 0 and
sums != sum(partial[-1: -4: -1]))\
or \
(length == size * 2 - 1 and sums != partial[1] + partial[-1] +
partial[-2]):
partial.pop()
continue
yield from n_gons(list(partial), size, sums)
partial.pop()
def n_gon_to_representation(n_gon: List[int]) -> int:
n_gon_str = [str(n) for n in n_gon]
size = len(n_gon_str) // 2
result = ''
minimal = min(n_gon[0], *n_gon[3::2])
index = n_gon.index(minimal)
start = n_gon.index(minimal) // 2 if index >= 3 else 0
for i in range(start, start + size):
current = i % size
if current == 0:
result += ''.join(n_gon_str[0:3])
elif current == size - 1:
result += ''.join([n_gon_str[-1], n_gon_str[-2], n_gon_str[1]])
else:
result += ''.join([n_gon_str[current * 2 + 1],
n_gon_str[current * 2],
n_gon_str[current * 2 + 2]])
return int(result)
def solve() -> int:
return max([n_gon_to_representation(n_gon)
for n_gon in n_gons([], 5)
if n_gon_to_representation(n_gon) < 10 ** 16])
| 27.147541 | 75 | 0.504227 | [
"MIT"
] | cryvate/project-euler | project_euler/solutions/problem_68.py | 1,656 | Python |
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
'''btDown.py - Download resource for HTTP/HTTPS/FTP/Thunder/Magnet/BT
Usage: python3 btDown.py <url> [path]
Required:
url HTTP/HTTPS/FTP/Thunder/MagNet/BT downloading URL
Optionals:
path The store path for the downloaded file
Notice: Python3 required for btDown.py
Author: zanran ([email protected])
CreatedAt: Mon Oct 8 21:27:28 CST 2018
'''
import os
import sys
import requests
import time
import re
import enum
import base64
from urllib import parse, request
def usage(err=None):
if err:
print(err)
print(__doc__)
sys.exit(0)
@enum.unique
class PROTROL_TYEP(enum.Enum):
UNKNOWN = 0
HTTP = 1 # HTTP/HTTPS下载
FTP = 2 # FTP下载
BT = 3 # BT下载
MAGNET = 4 # 磁力下载
THUNDER = 5 # 讯雷下载
class BtDown(object):
def __init__(self, url, path = None):
self.origin_url = url
self.dest_path = path
def detectProtrolType(self, url):
bt_type = PROTROL_TYEP.UNKNOWN
if (re.match('^ftp://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.FTP
elif (re.match('^thunder://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.THUNDER
elif (re.match('^magnet:?', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.MAGNET
elif (re.search(r'\.torrent$', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.BT
# http/https detect must be after torrent
elif (re.match('^https?://', url, re.IGNORECASE)):
bt_type = PROTROL_TYEP.HTTP
return bt_type
def _parserThunderUrl(self, url):
thunder_url = re.sub('^thunder://', '', url, re.IGNORECASE)
normalize_url = base64.b64decode(thunder_url).decode()
normalize_url = re.sub('^AA', '', normalize_url)
normalize_url = re.sub('ZZ$', '', normalize_url)
return normalize_url
def _parserMagnetUrl(self, url):
return ''
def parseUrlProtrol(self, url):
normalize_url = url
bt_type = self.detectProtrolType(url)
if bt_type in [PROTROL_TYEP.THUNDER]:
normalize_url = self._parserThunderUrl(url)
elif bt_type in [PROTROL_TYEP.MAGNET]:
normalize_url = self._parserMagnetUrl(url)
elif bt_type in [PROTROL_TYEP.BT]:
raise Exception('BT (torrent) is unsupported by now !')
return normalize_url
def getTitle(self, url):
title = 'unnamed_file'
bt_type = self.detectProtrolType(url)
if bt_type in [PROTROL_TYEP.HTTP, PROTROL_TYEP.FTP]:
last_slash = url.rfind('/')
if last_slash != -1:
title = url[last_slash + 1:].strip()
if title.count('%') > 1:
title = parse.unquote(title)
return title
def _showDownloadProgress(self, file, percent):
base_file = os.path.basename(file)
if(percent > 100):
percent = 100
message = '\r Downloading %s ...... %2.f%%' % (base_file, percent)
print(message, end='')
return
def _download_http(self, url, dest_file):
res = requests.get(url, stream=True)
max_file_bytes = int(res.headers['Content-Length'])
chunk_size = 1024*1024*4
downloaded_size = 0
f = open(dest_file, 'wb')
for data in res.iter_content(chunk_size):
downloaded_size += len(data)
percent = downloaded_size / max_file_bytes * 100
self._showDownloadProgress(dest_file, percent)
f.write(data)
f.close()
def _download_ftp(self, url, dest_file):
def _report(blocknum, blocksize, totalsize):
if not totalsize:
return
percent = 100.0 * blocknum * blocksize / totalsize
self._showDownloadProgress(dest_file, percent)
url = parse.quote(url, safe=':/@')
request.urlretrieve(url, dest_file, _report)
def download(self):
print('Start downloading %s' % self.origin_url)
normalize_url = self.parseUrlProtrol(self.origin_url)
print('Parse real url %s' % normalize_url)
title = self.getTitle(normalize_url)
dest_file = title
if self.dest_path:
if not os.path.exists(self.dest_path):
os.makedirs(self.dest_path)
dest_file = os.path.join(self.dest_path, title)
if os.path.exists(dest_file):
os.remove(dest_file)
bt_type = self.detectProtrolType(normalize_url)
if bt_type in [PROTROL_TYEP.HTTP]:
self._download_http(normalize_url, dest_file)
elif bt_type in [PROTROL_TYEP.FTP]:
self._download_ftp(normalize_url, dest_file)
else:
raise Exception('Unknown protrol type detected !')
print('\nSaved file: %s' % dest_file)
return
def main():
if len(sys.argv) not in [2, 3]:
usage()
url = sys.argv[1]
path = None
if len(sys.argv) > 2:
path = sys.argv[2]
bt = BtDown(url, path)
bt.download()
print('------------------ Well done ------------------')
if __name__ == '__main__':
main()
| 31.760736 | 75 | 0.596871 | [
"MIT"
] | tianxiaxi/iDown | btDown.py | 5,205 | Python |
"""Commands for starting daemons."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import pprint
import confpy.api
import confpy.core.option
from .. import messages
cfg = confpy.api.Configuration(
transport=confpy.api.Namespace(
description='Message transport options.',
source=confpy.core.option.Option(
description='The transport to fetch new requests from.',
required=True,
),
error=confpy.core.option.Option(
description='The transport to which errors are written.',
required=True,
),
result=confpy.core.option.Option(
description='The transport to which results are written.',
required=True,
),
),
daemon=confpy.api.Namespace(
description='Long running daemon options.',
profiler=confpy.core.option.Option(
description='The profiler implementation to use.',
required=True,
),
process=confpy.core.option.Option(
description='The daemon interface implemention to use.',
required=True,
),
pidfile=confpy.api.StringOption(
description='The location to use as a pidfile.',
required=True,
),
),
)
def _common_args():
"""ArgumentParser setup for all CLI commands."""
parser = argparse.ArgumentParser(
description='Start a new profiler process.'
)
parser.add_argument(
'--config',
required=True,
help='The Python configuration file for the process.',
)
return parser
def profiler_main():
"""Manage a profiler daemon."""
parser = _common_args()
parser.add_argument(
'--action',
required=True,
choices=('start', 'stop', 'restart'),
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
proc = cfg.daemon.process(
source_transport=cfg.transport.source,
error_transport=cfg.transport.error,
results_transport=cfg.transport.result,
profiler=cfg.daemon.profiler,
pidfile=cfg.daemon.pidfile,
)
if args.action == 'stop':
proc.stop()
if args.action == 'start':
proc.start()
if args.action == 'restart':
proc.restart()
def send_request():
"""Send a profile request to the daemon."""
parser = _common_args()
parser.add_argument(
'--identifier',
required=True,
help='The unique message identifier.',
)
parser.add_argument(
'--setup',
default='pass',
help='Any setup code if needed for the profile.',
)
parser.add_argument(
'--code',
required=True,
help='The code to profile.',
)
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
cfg.transport.source().send(
messages.ProfileRequest(
identifier=args.identifier,
setup=args.setup,
code=args.code,
),
)
def fetch_result():
"""Fetch a result from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.result()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
def fetch_error():
"""Fetch an error from the transport."""
parser = _common_args()
args, _ = parser.parse_known_args()
cfg = confpy.api.parse_options(files=(args.config,), env_prefix='PYPERF')
transport = cfg.transport.error()
msg = transport.fetch()
if msg is not None:
transport.complete(msg)
pprint.pprint(msg.json)
| 25.74026 | 77 | 0.619324 | [
"Apache-2.0"
] | kevinconway/PyPerf | pyperf/cmd/daemons.py | 3,964 | Python |
from kat.harness import Query, EDGE_STACK
from abstract_tests import AmbassadorTest, HTTP
from abstract_tests import ServiceType
from selfsigned import TLSCerts
from kat.utils import namespace_manifest
#####
# XXX This file is annoying.
#
# RedirectTestsWithProxyProto and RedirectTestsInvalidSecret used to be subclasses of RedirectTests,
# which makes a certain amount of sense. Problem is that when I wanted to modify just RedirectTests
# to have secrets defined, that ended up affecting the two subclasses in bad ways. There's basically
# no way to subclass an AmbassadorTest without having your base class be run separately, which isn't
# what I wanted here. Sigh.
class RedirectTests(AmbassadorTest):
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def manifests(self):
return namespace_manifest("redirect-namespace") + f"""
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
namespace: redirect-namespace
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
---
apiVersion: v1
kind: Secret
metadata:
name: redirect-cert
type: kubernetes.io/tls
data:
tls.crt: {TLSCerts["localhost"].k8s_crt}
tls.key: {TLSCerts["localhost"].k8s_key}
""" + super().manifests()
def config(self):
# Use self here, not self.target, because we want the TLS module to
# be annotated on the Ambassador itself.
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: redirect-cert
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/", scheme="http"), expected=301)
# [1] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors",
scheme="https"),
insecure=True,
phase=2)
def check(self):
# For query 0, check the redirection target.
assert len(self.results[0].headers['Location']) > 0
assert self.results[0].headers['Location'][0].find('/tls-target/') > 0
# For query 1, we require no errors.
# XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
errors = self.results[1].json
assert(len(errors) == 0)
class RedirectTestsWithProxyProto(AmbassadorTest):
target: ServiceType
def init(self):
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: ambassador
config:
use_proxy_proto: true
enable_ipv6: true
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# TODO (concaf): FWIW, this query only covers one side of the story. This tests that this is the correct
# deviation from the normal behavior (301 response), but does not test a 301 when proxy proto is actually sent.
# This is because net/http does not yet support adding proxy proto to HTTP requests, and hence it's difficult
# to test with kat. We will need to open a raw TCP connection (e.g. telnet/nc) and send the entire HTTP Request
# in plaintext to test this behavior (or use curl with --haproxy-protocol).
yield Query(self.url("tls-target/"), error=[ "EOF", "connection reset by peer" ])
# We can't do the error check until we have the PROXY client mentioned above.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class RedirectTestsInvalidSecret(AmbassadorTest):
"""
This test tests that even if the specified secret is invalid, the rest of TLS Context should
go through. In this case, even though the secret does not exist, redirect_cleartext_from
should still take effect.
"""
target: ServiceType
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.xfail = "FIXME: IHA"
self.target = HTTP()
def requirements(self):
# only check https urls since test readiness will only end up barfing on redirect
yield from (r for r in super().requirements() if r[0] == "url" and r[1].url.startswith("https"))
def config(self):
yield self, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Module
name: tls
ambassador_id: [{self.ambassador_id}]
config:
server:
enabled: True
secret: does-not-exist-secret
redirect_cleartext_from: 8080
""")
yield self.target, self.format("""
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: tls_target_mapping
hostname: "*"
prefix: /tls-target/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url("tls-target/"), expected=301)
# There's kind of no way to do this. Looks like we need to speak HTTP to the port on which we
# think the server is listening for HTTPS? This is a bad config all the way around, really.
# # [1] -- PHASE 2
# yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors", scheme="https"), phase=2)
#
# def check(self):
# # We don't have to check anything about query 0, the "expected" clause is enough.
#
# # For query 1, we require no errors.
# # XXX Ew. If self.results[1].json is empty, the harness won't convert it to a response.
# errors = self.results[1].json
# assert(len(errors) == 0)
class XFPRedirect(AmbassadorTest):
parent: AmbassadorTest
target: ServiceType
edge_stack_cleartext_host = False
def init(self):
if EDGE_STACK:
self.xfail = "Not yet supported in Edge Stack"
self.target = HTTP()
self.add_default_http_listener = False
self.add_default_https_listener = False
def manifests(self):
return self.format('''
---
apiVersion: getambassador.io/v3alpha1
kind: Listener
metadata:
name: ambassador-listener-8080
spec:
ambassador_id: [{self.ambassador_id}]
port: 8080
protocol: HTTP
securityModel: XFP
l7Depth: 1
hostBinding:
namespace:
from: ALL
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: weird-xfp-test-host
spec:
ambassador_id: [{self.ambassador_id}]
requestPolicy:
insecure:
action: Redirect
''') + super().manifests()
def config(self):
yield self.target, self.format("""
kind: Module
name: ambassador
config:
use_remote_address: false
---
apiVersion: getambassador.io/v3alpha1
kind: Mapping
name: {self.name}
hostname: "*"
prefix: /{self.name}/
service: {self.target.path.fqdn}
""")
def queries(self):
# [0]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "http" }, expected=301)
# [1]
yield Query(self.url(self.name + "/target/"), headers={ "X-Forwarded-Proto": "https" }, expected=200)
# [2] -- PHASE 2
yield Query(self.url("ambassador/v0/diag/?json=true&filter=errors"), headers={ "X-Forwarded-Proto": "https" }, phase=2)
def check(self):
# For query 0, check the redirection target.
expected_location = ["https://" + self.path.fqdn + "/" + self.name + "/target/"]
actual_location = self.results[0].headers['Location']
assert actual_location == expected_location, "Expected redirect location to be {}, got {} instead".format(
expected_location,
actual_location
)
# For query 1, we don't have to check anything, the "expected" clause is enough.
# For query 2, we require no errors.
# XXX Ew. If self.results[2].json is empty, the harness won't convert it to a response.
errors = self.results[2].json
assert(len(errors) == 0)
def requirements(self):
# We're replacing super()'s requirements deliberately here: we need the XFP header or they can't work.
yield ("url", Query(self.url("ambassador/v0/check_ready"), headers={"X-Forwarded-Proto": "https"}))
yield ("url", Query(self.url("ambassador/v0/check_alive"), headers={"X-Forwarded-Proto": "https"}))
| 31.398693 | 127 | 0.658097 | [
"Apache-2.0"
] | DoodleScheduling/emissary | python/tests/kat/t_redirect.py | 9,608 | Python |
#!/usr/bin/env python3
# Copyright (C) 2019 - Virtual Open Systems SAS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# author = Teodora Sechkova
# author_email = [email protected]
import bjointsp.api.placement as placement
# Start the placement server
def main():
placement.api.app.run(host='localhost', port=3800, debug=True)
if __name__ == '__main__':
main()
| 31.344828 | 74 | 0.731573 | [
"Apache-2.0"
] | 5GCity/5GCity-resource-placement | src/bjointsp/main.py | 909 | Python |
"""
A simple Line class.
NOTE: This is NOT rosegraphics -- it is your OWN Line class.
Authors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,
Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,
and Jacob Jarski.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import math
import m1t_test_Line as m1t
###############################################################################
# IMPORTANT:
# Your instructor will help you get started on this exercise.
###############################################################################
# -----------------------------------------------------------------------------
# DONE: 2. Right-click on the src folder and
# Mark Directory as ... Sources Root,
# if you have not already done so.
#
# Then, with your instructor, READ THE INSTRUCTIONS in file
# m0_INSTRUCTIONS.txt
# asking questions as needed. Once you understand the instructions,
# mark this _TODO_ as DONE.
# -----------------------------------------------------------------------------
###############################################################################
# NOTE: For ALL of the methods that you implement, the method is allowed
# to have additional side effects as needed by it and/or other methods.
###############################################################################
def main():
"""
Calls the TEST functions in this module, but ONLY if the method
to be tested has at least a partial implementation. That is,
a TEST function will not be called until you begin work
on the code that it is testing.
"""
if m1t.is_implemented('__init__'):
run_test_init()
if m1t.is_implemented('clone'):
run_test_clone()
if m1t.is_implemented('reverse'):
run_test_reverse()
if m1t.is_implemented('slope'):
run_test_slope()
if m1t.is_implemented('length'):
run_test_length()
if m1t.is_implemented('get_number_of_clones'):
run_test_get_number_of_clones()
if m1t.is_implemented('line_plus'):
run_test_line_plus()
if m1t.is_implemented('line_minus'):
run_test_line_minus()
if m1t.is_implemented('midpoint'):
run_test_midpoint()
if m1t.is_implemented('is_parallel'):
run_test_is_parallel()
if m1t.is_implemented('reset'):
run_test_reset()
###############################################################################
# Students:
# Do NOT touch the following Point class - it has no TO DO.
# Do NOT copy code from the methods in this Point class.
#
# DO ** READ ** this Point class,
# asking questions about any of it that you do not understand.
#
# DO ** CALL ** methods in this Point class as needed
# in implementing and testing the methods of the ** Line ** class.
#
# IMPORTANT, IMPORTANT, IMPORTANT:
# *** In your ** Line ** class methods, you should NEVER have code
# *** that a ** Point ** class method could do for you.
###############################################################################
# The Point class (and its methods) begins here.
###############################################################################
class Point(object):
""" Represents a point in 2-dimensional space. """
def __init__(self, x, y):
""" Sets instance variables x and y to the given coordinates. """
self.x = x
self.y = y
def __repr__(self):
"""
Returns a string representation of this Point.
For each coordinate (x and y), the representation:
- Uses no decimal points if the number is close to an integer,
- Else it uses 2 decimal places after the decimal point.
Examples:
Point(10, 3.14)
Point(3.01, 2.99)
"""
decimal_places = 2 # Use 2 places after the decimal point
formats = []
numbers = []
for coordinate in (self.x, self.y):
if abs(coordinate - round(coordinate)) < (10 ** -decimal_places):
# Treat it as an integer:
formats.append('{}')
numbers.append(round(coordinate))
else:
# Treat it as a float to decimal_places decimal places:
formats.append('{:.' + str(decimal_places) + 'f}')
numbers.append(round(coordinate, decimal_places))
format_string = 'Point(' + formats[0] + ', ' + formats[1] + ')'
return format_string.format(numbers[0], numbers[1])
def __eq__(self, p2):
"""
Defines == for Points: a == b is equivalent to a.__eq__(b).
Treats two numbers as "equal" if they are within 6 decimal
places of each other for both x and y coordinates.
"""
return (round(self.x, 6) == round(p2.x, 6) and
round(self.y, 6) == round(p2.y, 6))
def clone(self):
""" Returns a new Point at the same (x, y) as this Point. """
return Point(self.x, self.y)
def distance_from(self, p2):
""" Returns the distance this Point is from the given Point. """
dx_squared = (self.x - p2.x) ** 2
dy_squared = (self.y - p2.y) ** 2
return math.sqrt(dx_squared + dy_squared)
def halfway_to(self, p2):
"""
Given another Point object p2, returns a new Point
that is half-way between this Point and the given Point (p2).
"""
return Point((self.x + p2.x) / 2,
(self.y + p2.y) / 2)
def plus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
PLUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.plus(p2)
print(p3)
would print: Point(600, 33)
"""
return Point(self.x + p2.x, self.y + p2.y)
def minus(self, p2):
"""
Returns a Point whose coordinates are those of this Point
MINUS the given Point. For example:
p1 = Point(500, 20)
p2 = Point(100, 13)
p3 = p1.minus(p2)
print(p3)
would print: Point(400, 7)
"""
return Point(self.x - p2.x, self.y - p2.y)
###############################################################################
# The Line class (and its methods) begins here.
###############################################################################
class Line(object):
""" Represents a line segment in 2-dimensional space. """
def __init__(self, start, end):
self.start = start.clone()
self.originalstart = start.clone()
self.end = end.clone()
self.originalend = end.clone()
self.timescloned = 0
"""
What comes in:
-- self
-- a Point object named start
-- a Point object named end
where the two Points are to be the initial start and end points,
respectively, of this Line.
What goes out: Nothing (i.e., None).
Side effects: MUTATEs this Line by setting two instance
variables named:
-- start
-- end
to CLONES of the two Point arguments, respectively.
Other methods must maintain those instance variables as needed
so that they always indicate the CURRENT start and end points
of this Line.
Also, initializes other instance variables as needed
by other Line methods.
Example: This __init__ method runs when one constructs
a Line. So the 3rd of the following statements
invokes the __init__ method of this Line class:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
Type hints:
:type start: Point
:type end: Point
"""
# ---------------------------------------------------------------------
# DONE: 3.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def __repr__(self):
"""
What comes in:
-- self
What goes out: Returns a string representation of this Line,
in the form:
Line[(x1, y1), (x2, y2)]
Side effects: None.
Note: print(BLAH) causes BLAH's __repr__ to be called.
BLAH's __repr__ returns a string,
which the print function then prints.
Example: Since the print function calls __repr__ on the
object to be printed:
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
# The following statement causes __repr__ to run,
# hence should print: Line[(30, 17), (50, 80)]
print(line)
Type hints:
:rtype: str
"""
# ---------------------------------------------------------------------
# We have already implemented this __repr__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
start = repr(self.start).replace('Point', '')
end = repr(self.end).replace('Point', '')
return 'Line[{}, {}]'.format(start, end)
def __eq__(self, line2):
"""
What comes in:
-- self
-- a Line object
What goes out: Returns True if:
this Line's start point is equal to line2's start point AND
this Line's end point is equal to line2's end point.
Returns False otherwise.
Side effects: None.
Note: a == b is equivalent to a.__eq__(b).
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = Line(p1, p2)
line3 = Line(p2, p1)
print(line1 == line1) # Should print: True
print(line1 == line2) # Should print: True
print(line1 == line3) # Should print: False
line1.start = Point(0, 0)
print(line1 == line2) # Should now print: False
Type hints:
:type line2: Line
:rtype: bool
"""
# ---------------------------------------------------------------------
# We have already implemented this __eq__ function for you.
# Do NOT modify it.
# ---------------------------------------------------------------------
return (self.start == line2.start) and (self.end == line2.end)
def clone(self):
self.timescloned = self.timescloned + 1
clone = Line(self.start, self.end)
return clone
"""
What comes in:
-- self
What goes out: Returns a new Line whose START is a clone of
this Line's START and whose END is a clone of this Line's END.
Side effects: None.
Example:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
Type hints:
:rtype: Line
"""
# ---------------------------------------------------------------------
# DONE: 4.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def reverse(self):
reversestart = self.end
reverseend = self.start
self.start = reversestart
self.end = reverseend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its direction is reversed
(that is, its start and end points are swapped).
** Must NOT mutate its start and end points -- just SWAP them. **
Examples:
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
"""
# ---------------------------------------------------------------------
# DONE: 5.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def slope(self):
slopex = (self.end.x-self.start.x)
slopey = (self.end.y-self.start.y)
if slopex == 0:
return math.inf
else:
return slopey/slopex
"""
What comes in:
-- self
What goes out: Returns the slope of this Line, or
math.inf
if the line is vertical (i.e., has "infinite" slope).
Side effects: None.
Examples:
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 6.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def length(self):
length = math.sqrt(((self.start.x- self.end.x) ** 2) + ((self.start.y - self.end.y) ** 2))
return length
"""
What comes in:
-- self
What goes out: Returns the length of this Line.
Side effects: None.
Example:
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
Type hints:
:rtype: float
"""
# ---------------------------------------------------------------------
# DONE: 7.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def get_number_of_clones(self):
return self.timescloned
"""
What comes in:
-- self
What goes out:
-- Returns the number of times that this Line has been cloned
(via the clone method).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
would print:
3 [since there are three line1.clone() statements]
0 [since there are no line2.clone() statements]
1 [since there is one line3.clone() statement]
0 [since there are no line4.clone() statements]
0 [since there are no line5.clone() statements]
Type hints:
:rtype: int:
"""
# ---------------------------------------------------------------------
# DONE: 8.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
def line_plus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is the sum of this Line's start (a Point)
and the other_line's start (another Point).
-- end is the sum of this Line's end (a Point)
and the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
would print: Line[(600, 33), (500, 16)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 9.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x + other_line.start.x, self.start.y + other_line.start.y)
end = Point(self.end.x + other_line.end.x, self.end.y + other_line.end.y)
line_plus = Line(start, end)
return line_plus
def line_minus(self, other_line):
"""
What comes in:
-- self
-- another Line object
What goes out:
-- Returns a Line whose:
-- start is this Line's start (a Point)
minus the other_line's start (another Point).
-- end is this Line's end (a Point)
minus the other_line's end (another Point).
Side effects: None.
Example:
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
would print: Line[(400, 7), (-300, 0)]
Type hints:
:type other_line: Line
:rtype: Line:
"""
# ---------------------------------------------------------------------
# DONE: 10.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
start = Point(self.start.x - other_line.start.x, self.start.y - other_line.start.y)
end = Point(self.end.x - other_line.end.x, self.end.y - other_line.end.y)
line_minus = Line(start, end)
return line_minus
def midpoint(self):
"""
What comes in:
-- self
What goes out: returns a Point at the midpoint of this Line.
Side effects: None.
Example:
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
Type hints:
:rtype: Point
"""
# ---------------------------------------------------------------------
# DONE: 11.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
midpoint = Point((self.end.x + self.start.x)/2, (self.end.y + self.start.y)/2)
return midpoint
def is_parallel(self, line2):
"""
What comes in:
-- self
-- another Line object (line2)
What goes out: Returns True if this Line is parallel to the
given Line (line2). Returns False otherwise.
*** SEE THE IMPORTANT NOTE BELOW, re ROUNDING numbers.
Side effects: None.
Examples:
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
Type hints:
:type line2: Line
:rtype: bool
"""
selfslopex = (self.end.x - self.start.x)
line2slopex = (line2.end.x - line2.start.x)
if line2slopex == 0:
if line2slopex == selfslopex:
return True
else:
return False
if selfslopex == 0:
return False
selfslope =((self.end.y - self.start.y)/(self.end.x - self.start.x))
line2slope = ((line2.end.y - line2.start.y)/ (line2.end.x - line2.start.x))
if round(line2slope, 10) == round(selfslope, 10):
return True
else:
return False
# ---------------------------------------------------------------------
# DONE: 12.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
#######################################################################
#
# IMPORTANT: When you test whether two FLOATING POINT numbers
# are "equal", you must deal with the imprecision
# of floating-point arithmetic. For example, in REAL arithmetic,
# 1 / (24 * math.pi - 20 * math.pi)
# and
# 3 / (72 * math.pi - 60 * math.pi)
# are equal. But in FLOATING point arithmetic, they are:
# 0.07957747154594767
# and
# 0.07957747154594765
# respectively (hence NOT equal).
# Try it out if you don't believe me!
#
#######################################################################
# IMPORTANT BOTTOM-LINE: When you want to test whether two
# FLOATING POINT numbers a and b are the same, as in this method,
# DON'T use: a == b
# INSTEAD use: round(a, 12) == round(b, 12)
########################################################################
#
# The latter compares the numbers rounded to 12 decimal places.
# In the context of this exercise, doing so is adequate to ignore
# floating-point errors while distinguishing numbers that really
# are different from each other.
#######################################################################
def reset(self):
self.start = self.originalstart
self.end = self.originalend
"""
What comes in:
-- self
What goes out: Nothing (i.e., None).
Side effects: MUTATES this Line so that its start and end points
revert to what they were when this Line was constructed.
Examples:
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
... [various actions, including some like these:]
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
"""
# ---------------------------------------------------------------------
# DONE: 13.
# a. READ the above specification, including the Example.
# ** ASK QUESTIONS AS NEEDED. **
# ** Be sure you understand it, ESPECIALLY the Example.
# b. Implement and test this method.
# The tests are already written (below).
# They include the Example in the above doc-string.
# ---------------------------------------------------------------------
###############################################################################
# The TEST functions for the Line class begin here.
#
# We have already written the TEST functions. They all take the form:
# -- m1t.run_test_BLAH() # This runs OUR tests.
# -- One more test (or set of tests) that came directly from the Example
# in the specification.
###############################################################################
def run_test_init():
""" Tests the __init__ method of the Line class. """
m1t.run_test_init() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line = Line(p1, p2) # Causes __init__ to run
print(line.start) # Should print Point(30, 17)
print(line.end) # Should print Point(50, 80)
print(line.start == p1) # Should print True
print(line.start is p1) # Should print False
print('The above should print:')
print(' Point(30, 17)')
print(' Point(50, 80)')
print(' True')
print(' False')
def run_test_clone():
""" Tests the clone method of the Line class. """
m1t.run_test_clone() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should print: True
print(line1 is line2) # Should print: False
print(line1.start is line2.start) # Should print: False
print(line1.end is line2.end) # Should print: False
line1.start = Point(11, 12)
print(line1) # Should print: Line[(11, 12), (50, 80)]
print(line2) # Should print: Line[(30, 17), (50, 80)]
print(line1 == line2) # Should now print: False
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(30, 17), (50, 80)]')
print(' True')
print(' False')
print(' False')
print(' False')
print(' Line[(11, 12), (50, 80)]')
print(' Line[(30, 17), (50, 80)')
print(' False')
def run_test_reverse():
""" Tests the reverse method of the Line class. """
m1t.run_test_reverse() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 17)
p2 = Point(50, 80)
line1 = Line(p1, p2)
line2 = line1.clone()
print(line1) # Should print: Line[(30, 17), (50, 80)]
line1.reverse()
print(line1) # Should print: Line[(50, 80), (30, 17)]
print(line1 == line2) # Should print: False
line1.reverse()
print(line1 == line2) # Should now print: True
print('The above should print:')
print(' Line[(30, 17), (50, 80)]')
print(' Line[(50, 80), (30, 17)')
print(' False')
print(' True')
def run_test_slope():
""" Tests the slope method of the Line class. """
m1t.run_test_slope() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(30, 3)
p2 = Point(50, 8)
line1 = Line(p1, p2)
# Since the slope is (8 - 3) / (50 - 30) , which is 0.25:
print(line1.slope()) # Should print [approximately]: 0.25
line2 = Line(Point(10, 10), Point(10, 5))
print(line2.slope()) # Should print: inf
# math.inf is NOT the STRING 'inf', so:
print(line2.slope() == 'inf') # Should print False
print('The above should print:')
print(' 0.25 (approximately)')
print(' inf')
print(' False')
def run_test_length():
""" Tests the length method of the Line class. """
m1t.run_test_length() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(166, 10)
p2 = Point(100, 10)
line1 = Line(p1, p2)
# Since the distance from p1 to p2 is 66:
print(line1.length()) # Should print: 66.0
p3 = Point(0, 0)
p4 = Point(3, 4)
line2 = Line(p3, p4)
print(line2.length()) # Should print about 5.0
print('The above should print:')
print(' 66.0')
print(' 5.0 (approximately)')
def run_test_get_number_of_clones():
""" Tests the get_number_of_clones method of the Line class. """
m1t.run_test_get_number_of_clones() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = line1.clone()
line3 = line1.clone()
line4 = line3.clone()
line5 = line1.clone()
print(line1.get_number_of_clones())
print(line2.get_number_of_clones())
print(line3.get_number_of_clones())
print(line4.get_number_of_clones())
print(line5.get_number_of_clones())
print('The above should print 3, then 0, then 1, then 0, then 0.')
def run_test_line_plus():
""" Tests the line_plus method of the Line class. """
m1t.run_test_line_plus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_plus(line2)
print(line3)
print('The above should print: Line[(600, 33), (500, 16)]')
def run_test_line_minus():
""" Tests the line_minus method of the Line class. """
m1t.run_test_line_minus() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(500, 20), Point(100, 8))
line2 = Line(Point(100, 13), Point(400, 8))
line3 = line1.line_minus(line2)
print(line3)
print('The above should print: Line[(400, 7), (-300, 0)]')
def run_test_midpoint():
""" Tests the midpoint method of the Line class. """
m1t.run_test_midpoint() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(3, 10)
p2 = Point(9, 20)
line1 = Line(p1, p2)
print(line1.midpoint()) # Should print: Point(6, 15)
print('The above should print: Point(6, 15)')
def run_test_is_parallel():
""" Tests the is_parallel method of the Line class. """
m1t.run_test_is_parallel() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
line1 = Line(Point(15, 30), Point(17, 50)) # slope is 10.0
line2 = Line(Point(10, 10), Point(15, 60)) # slope is 10.0
line3 = Line(Point(10, 10), Point(80, 80)) # slope is 7.0
line4 = Line(Point(10, 10), Point(10, 20)) # slope is inf
print(line1.is_parallel(line2)) # Should print: True
print(line2.is_parallel(line1)) # Should print: True
print(line1.is_parallel(line3)) # Should print: False
print(line1.is_parallel(line4)) # Should print: False
print(line1.is_parallel(line1)) # Should print: True
print(line4.is_parallel(line4)) # Should print: True
print('The above should print:')
print(' True, True, False, False, True, True')
def run_test_reset():
""" Tests the reset method of the Line class. """
m1t.run_test_reset() # This runs OUR tests.
# -------------------------------------------------------------------------
# One ADDITIONAL test (or set of tests).
# -------------------------------------------------------------------------
p1 = Point(-3, -4)
p2 = Point(3, 4)
line1 = Line(p1, p2)
line2 = Line(Point(0, 1), Point(10, 20))
line1.start = Point(100, 300)
line2.end = Point(99, 4)
line1.reverse()
# Should print: Line[(x1, y1), (x2, y2)] where (x1, y1) and
# (x2, y2) are the CURRENT coordinates of line1's endpoints.
print(line1)
print(line2) # Similarly for line2
line1.reset()
line2.reset()
print(line1) # Should print: Line[(-3, -4), (3, 4)]
print(line2) # Should print: Line[(0, 1), (10, 20)]
print('The above should print:')
print(' Line[(3, 4), (100, 300)]')
print(' Line[(0, 1), (99, 4)]')
print(' Line[(-3, -4), (3, 4)]')
print(' Line[(0, 1), (10, 20)]')
# -----------------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# It is necessary here to enable the automatic testing in m1t_test_Line.py.
# -----------------------------------------------------------------------------
if __name__ == '__main__':
main()
| 38.270729 | 98 | 0.479757 | [
"MIT"
] | jarskijr/10-MoreImplementingClasses | src/m1_Line.py | 38,309 | Python |
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='product-name']/h1",
'price' : "//p[@class='special-price']/span[@class='price']|//span[@class='regular-price']/span[@class='price']",
'category' : "//div[@class='breadcrumbs']/ul/li/a",
'description' : "//div[@class='box-collateral box-description']/div[@id='details-area']",
'images' : "//p[@class='product-image']/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : "",
'in_stock' : "",
'guarantee' : "",
'promotion' : ""
}
name = 'azora.vn'
allowed_domains = ['azora.vn']
start_urls = ['http://azora.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = ['']
rules = [
#Rule(LinkExtractor(), 'parse_item'),
#Rule(LinkExtractor(), 'parse'),
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\.html($|\?p=\d+$)']), 'parse_item_and_links'),
]
| 34.3 | 117 | 0.609329 | [
"MIT"
] | chongiadung/choinho | scraper/storage_spiders/azoravn.py | 1,029 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import re
import base64
import json
import os
import tempfile
import requests
import urllib3
from kubernetes_py.utils.ConvertData import convert
from six.moves.urllib.parse import urlencode
RE_VALID_SSL_IP = re.compile(
r'^https://(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])')
class HttpRequest:
def __init__(self, method='GET', host='localhost:80', url='/', data=None, auth=None,
cert=None, ca_cert=None, ca_cert_data=None, token=None):
self.http_method = method
self.http_host = host
self.url = url
self.data = data
self.auth = auth
self.cert = cert
self.ca_cert = ca_cert
self.ca_cert_data = ca_cert_data
self.token = token
def send(self):
state = dict(success=False, reason=None, status=None, data=None)
http_headers = dict()
http_headers['Accept'] = 'application/json'
if self.http_method in ['PUT', 'POST', 'PATCH']:
http_headers['Content-type'] = 'application/json'
if self.token is not None:
http_headers['Authorization'] = 'Bearer {token}'.format(token=self.token)
if self.data is not None and self.http_method in ['GET']:
url = "{0}?{1}".format(self.url, urlencode(self.data))
self.url = url
self.url = self.http_host + self.url
temp = None
verify = False
if self.ca_cert is not None:
verify = self.ca_cert
if self.ca_cert_data is not None:
temp = tempfile.NamedTemporaryFile(delete=False)
data = base64.b64decode(self.ca_cert_data)
temp.write(data)
temp.close()
verify = temp.name
# TODO: TLS issue with Python 2.7 and urllib3 when hostname is an IP address
# A better fix should be found but I can't think of anything else for now.
search_result = RE_VALID_SSL_IP.search(self.http_host)
if search_result:
verify = False
urllib3.disable_warnings()
try:
response = requests.request(
method=self.http_method,
url=self.url,
auth=self.auth,
cert=self.cert,
headers=http_headers,
data="" if self.data is None else json.dumps(self.data),
verify=verify
)
except Exception as err:
raise err
finally:
if temp is not None:
os.unlink(temp.name)
state['status'] = response.status_code
state['reason'] = response.reason
# There was an issue with "kubectl logs" type requests where returned content is "text/plain" and
# we do have characters of unknown origin.
try:
resp_data = response.content.decode('utf-8')
except UnicodeDecodeError:
resp_data = response.content
if len(resp_data) > 0:
try:
state['data'] = convert(data=json.loads(resp_data))
except Exception:
state['data'] = resp_data
if 200 <= state['status'] <= 299:
state['success'] = True
return state
| 31.327273 | 120 | 0.578642 | [
"Apache-2.0"
] | ThinkIQ/kubernetes-py | kubernetes_py/utils/HttpRequest.py | 3,446 | Python |
"""
The code below crawls the annotations of the MADE 1.0 Train Data and stores them
as Corpus ID, Annotation ID, Type, Length, Offset, Text in the
CSV_Annotations.csv file.
Input Files:
All xml files in the annotations folder in the made_train_data folder
Output Files:
CSV_Annotations.csv
Note: Make sure to delete the CSV_Annotations.csv file if already existing in
the folder as this code appends to the existing file.
"""
# Importing required Files
import os
import xml.etree.ElementTree as ET
import csv
final =list()
final.append(["Content ID", "Annotation ID", "Type", "Length", "Offset", "Text"])
# Reading required files
path ="C:\\Project_NLP_Final\\Project Dataset\\made_train_data\\annotations\\"
dirListing = os.listdir(path)
for item in dirListing:
tree = ET.parse(path + '\\' + item)
root = tree.getroot()
annot = dict()
for i in root.findall('./document/passage'):
flag = 0
for doc in i.findall('./annotation'):
annot=list()
annot.append(item[0:-9])
annot.append(doc.get('id'))
for typ in doc:
if typ.tag =='infon':
annot.append(typ.text)
elif typ.tag =='location':
annot.append(typ.get('length'))
annot.append(typ.get('offset'))
elif typ.tag == 'text':
annot.append(typ.text)
final.append(annot)
flag = 1
if flag == 0:
annot = [item[0:-9], None, None, None, None, None]
final.append(annot)
# Writing the required files
with open("C:\\Project_NLP_Final\\Project Dataset\\PreProcessing\\Regex\\CSV_Annotations.csv",'a', encoding = 'utf8', newline='') as outcsv:
writer = csv.writer(outcsv, delimiter=',',quotechar = '"')
for row in final:
writer.writerow(row) | 34.890909 | 143 | 0.600313 | [
"MIT"
] | avsharma96/Named-Entity-Recognition | Code/PreProcessing/Regex/annotation_crawling.py | 1,919 | Python |
import matplotlib.pyplot as plt
import numpy as np
from prmlmy.util import cv_, norm2s, calc_range
def plot_decision_boundary(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.predict(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, alpha=0.2)
if title:
ax.set_title(title)
def plot_decision_proba(model, X_train, y_train=None, x1_range=None, x2_range=None, points=300,
title=None, pad_ratio=0.2, ax=None):
ax = ax or plt
x1_range = x1_range or calc_range(X_train[:, 0], pad_ratio=pad_ratio)
x2_range = x2_range or calc_range(X_train[:, 1], pad_ratio=pad_ratio)
if y_train is None:
y_train = np.zeros(X_train.shape[0])
x1s = np.linspace(x1_range[0], x1_range[1], num=points)
x2s = np.linspace(x2_range[0], x2_range[1], num=points)
x1, x2 = np.meshgrid(x1s, x2s)
x = np.array([x1, x2]).reshape(2, -1).T
y = model.proba(x).reshape(points, points)
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
ax.contourf(x1, x2, y, np.linspace(0, 1, 5), alpha=0.2)
if title:
ax.set_title(title)
def get_figsize_default(ncols, nrows):
width = ncols * 5 + 1
height = nrows * 4 + 1
return width, height
def grid_plot(rows, cols, plot_func, row_names=None, col_names=None, figsize=None, *args, **kwargs):
row_names = row_names or [str(row) for row in rows]
col_names = col_names or [str(col) for col in cols]
figsize = figsize or get_figsize_default(len(cols), len(rows))
fig, axs = plt.subplots(nrows=len(rows), ncols=len(cols), figsize=figsize)
axs = axs.reshape(len(rows), len(cols))
for row_axs, row, row_name in zip(axs, rows, row_names):
for ax, col, col_name in zip(row_axs, cols, col_names):
title = ":".join([row_name, col_name])
plot_func(row, col, title, ax=ax, *args, **kwargs)
| 39.516129 | 100 | 0.647347 | [
"MIT"
] | jms7446/PRML | prmlmy/plot_util.py | 2,450 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
One of the trickier parts of creating a mock btrfs filesystem is tracking
the structures of the write forks, respecting `truncate`, `write`, and
`clone` operations. We achieve this as follows:
- Sequentially apply `btrfs send` operations to create & update:
* `IncompleteInode`s and their `Extent`s,
* the path -> `IncompleteInode` mapping.
- Run `extents_to_chunks_with_clones()` to summarize which files clone
which other files. A quick clarificaiton of the notation:
* `Extent` is actually a tree of extents, which captures the history of
how the file's sequence of extents was created. Refer to `extent.py`.
* `Chunk` more directly corresponds to a filesystem extent. It's either
data or a hole of a given length. A file is just a contiguous sequence
of `Chunk`s. Beyond recording the kind, and the length, each `Chunk`
records precisely how other files clone from it.
So `extents_to_chunks_with_clones()` flattens the history-preserving,
clone-aware tree in `Extent` objects into a test-friendly list of
`Chunk`s.
For testing, it is important to produce a representation that is as
normalized as possible: our output should deterministically and uniquely
capture the information we wish to test, and omit everything else[1].
We do NOT want our output to depend on the order of the operations that
created the filesystem, but only on the final filesystem state.
Specifically:
* For any byte offset[2] in the file, we need to know whether it's a
`HOLE`, or it contains `DATA` (see `Extent.Kind`). An offset -> kind
map is too verbose to use in manual tests, so we merge adjacent
offsets with the same `Extent.Kind` into `Chunk`s.
* For any offset in the file, we need to know whether it is a clone of
any other file locations (i.e. copy-on-write sharing of underlying
storage). For this reason, each `Chunk` has a set of `ChunkClones`,
which form a normalized[3] description of the shared-storage links on
the filesystem.
To give an example -- let's say that columns are byte offsets, and we
have this 10-byte extent, parts of which were cloned to make files
`A`, `B`, and `C`:
0123456789 # offsets on disk
BBBBBAAA # some part of file `B` includes offsets 1-5; `A` -- 6-8
AAACCCCC # `A` ALSO includes 0-2, possibly separated from its 6-8
(Aside: `test_extents_to_chunks_with_clones` also uses such figures)
Reading this figure, we see that:
- A has a 6-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 1 into B at `offset` 0 with length 2, aka `B:0+2@1`
* From `offset` 3 into C at `offset` 3 with length 2, aka `C:3+2@3'
- B has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into A at `offset` 1 with length 2, aka `A:1+2@0`
* From `offset` 2 into C at `offset` 0 with length 3, aka `C:0+3@2'
- C has a 5-byte DATA `Chunk` with two `ChunkClones`:
* From `offset` 0 into B at `offset` 2 with length 3, aka `B:2+3@0`
* From `offset` 3 into A at `offset` 3 with length 2, aka `A:3+2@3'
You can see that our representation of "a set of `ChunkClone`s for
every `Chunk`" is NOT parsimonious. If the same range of bytes is
cloned into N `Chunk`s, each of those `Chunk`s will refer to every
other `Chunk`, for a total of N*(N-1)/2 references. This is far less
efficient than a spanning tree with `N - 1` references.
E.g. in the above example, N = 4, and we stored 6 `ChunkClones`:
{'A': {'B:0+2@1', 'C:3+2@3'},
'B': {'A:1+2@0', 'C:0+3@2'},
'C': {'B:2+3@0', 'A:3+2@3'}}
The redundancy is obvious, e.g. each of these pairs are mirror images:
- 'A': 'B:0+2@1' versus 'B': 'A:1+2@0'
- 'A': 'C:3+2@3' versus 'C': 'A:3+2@3'
- 'B': 'C:0+3@2' versus 'C': 'B:2+3@0'
Picking one ChunkClone from each line would make a 3-edge spanning tree.
Using an inefficient presentation is an intentional design decision.
In most test filesystems, the copy number of any Chunk will be low, so
the cost of enumerating all references is minimal. The upside of this
quadratic representation is that it is unique and simple.
In contrast, presenting the clone structure via a spanning tree breaks
the symmetry, and then each test author has to understand the process
by which the N-1 spanning tree edges are selected. It's easy to make
such a process deterministic, but it still adds cognitive load.
[1] The current code tracks clones of HOLEs, because it makes no effort to
ignore them. I would guess that btrfs lacks this tracking, since such
clones would save no space. Once this is confirmed, it would be very
easy to either ignore, or leave unpopulated the `chunk_clones` field for
`Chunk` object with `kind == Extent.Kind.HOLE`.
[2] I refer to "bytes" throughout, but in actuality filesystems are
block-oriented. To deal with this, divide all lengths and offsets by
your block size to get the sense of "bytes" used here.
[3] The current code does NOT merge adjacent ChunkClones that were created
by separate `clone` operations. This is easy to fix once it comes up in
real applications. Tested in `test_cannot_merge_adjacent_clones()`.
"""
# Future: frozentypes instead of NamedTuples can permit some cleanups below.
import functools
from collections import defaultdict
from typing import Dict, Iterable, NamedTuple, Sequence, Tuple
from .extent import Extent
from .inode import Chunk, ChunkClone, Clone
from .inode_id import InodeID
class _CloneExtentRef(NamedTuple):
"""
Connects a part of a HOLE/DATA leaf Extent to a location in an Inode.
Although the Extent is shared between many inodes and/or disjoint
locations in the same inode, each _CloneExtentRef object is specific to
one occurrence of this Extent in the `gen_trimmed_leaves` of one inode.
We initially create a _CloneExtentRef for every piece of every inode,
but later we only retain those have some inter-inode overlap within
their `.extent`, thus identifying cloned chunks of inodes.
Aside: Unlike the simplified data model in `inode.py`, the Extent's
object identity captures the original reason that parts of some inodes
became identified via a clone relationship. We mostly use this for
assertions.
Future: With `frozentype`, __new__ could assert that `offset` and
`clone.length` are sane with respect to `extent`.
"""
clone: Clone # `clone.length` trims `extent`
extent: Extent
offset: int # Trims `extent`
# The position in `gen_trimmed_leaves` of the specific trimmed leaf that
# is being connected to another inode.
#
# It is possible for a Inode to have two instances of the same Extent
# with the same offset & length in its `gen_trimmed_leaves` stream, see
# e.g. `test_multi_extent`. In that case, we cannot correctly assign
# `ChunkClone`s to their trimmed leaves solely based on the content of
# the trimmed leaf: `(offset, length, extent)`.
#
# You might ask why the `ChunkClone` lists would differ between
# identical trimmed extents? Here is why: the first has to refer to the
# second, but not to itself, and conversely, the second must refer to
# the first, but not to itself.
#
# We could avoid this denormalization by keying `CloneChunk`s on
# `(inode_offset, offset, length, extent)`, which is unique. And
# `extents_to_chunks_with_clones` does already track `inode_offset`.
# However, the denormalized approach seemed cleaner.
leaf_idx: int
def __repr__(self): # pragma: no cover
return (
f"{self.clone.inode_id}:{self.clone.offset}"
f"+{self.clone.length}:{id(self.extent)}" # Extent is too noisy
)
# If these change, we have to update `_clone_op_compare_key`
assert Clone._fields.index("inode_id") == 0
assert _CloneExtentRef._fields.index("clone") == 0
# Our _CloneOp ordering obeys the following invariants:
# - sort by position first
# - sort by action second, putting POPs before PUSHes (see their def'ns)
# We do not need finer-grained ordering because:
# (1) we only do work on POPs,
# (2) the work done on all the POPs at one position does not depend on the
# order of the _CloneOps -- we symmetrically record the relationship in
# both directions:
# (just-popped op, each unpopped op)
# (each unpopped op, just-popped op)
#
# We could get the desired ordering implicitly by:
# - relying on the order of field declaration in `_CloneOp` (not bad)
# - making `Inode`s comparable (a bit ugly, comparing Extents is pricy,
# comparing InodeIDs would require some comparator boilerplate)
# Luckily, being explicit is not *that* painful.
def _clone_op_compare_key(c: "_CloneOp"):
return (
# The preceding asserts make these [1:] hacks tolerable.
c.pos,
c.action,
c.ref[1:],
c.ref.clone[1:],
c.ref.clone.inode_id.id,
)
def _clone_op_compare(fn):
@functools.wraps(fn)
def cmp(self: "_CloneOp", other: "_CloneOp"):
assert isinstance(other, _CloneOp)
# We only compare ops within one extent. The tests assume this to
# justify focusing on single-extent examples, so check it.
assert self.ref.extent is other.ref.extent
# All our items are distinct, since `clone.offset` is `inode_offset`,
# which is strictly increasing in each inode. We have no business
# comparing a _CloneOp with itself.
assert tuple.__ne__(self, other)
return fn(_clone_op_compare_key(self), _clone_op_compare_key(other))
return cmp
class _CloneOp(NamedTuple):
PUSH = "push"
POP = "pop"
assert POP < PUSH # We want to sort all POPs before any PUSHes
pos: int
action: str
ref: _CloneExtentRef
# NamedTuple confuses functools.total_ordering, so define all 6 comparators
__eq__ = _clone_op_compare(tuple.__eq__)
__ne__ = _clone_op_compare(tuple.__ne__)
__lt__ = _clone_op_compare(tuple.__lt__)
__le__ = _clone_op_compare(tuple.__le__)
__gt__ = _clone_op_compare(tuple.__gt__)
__ge__ = _clone_op_compare(tuple.__ge__)
def _leaf_extent_id_to_clone_ops(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
"""
To collect the parts of a Chunk that are cloned, we will run a variation
on the standard interval-overlap algorithm. We first sort the starts &
ends of each interval, and then do a sequential scan that uses starts to
add, and ends to remove, a tracking object from a "current intervals"
structure.
This function simply prepares the set of interval starts & ends for each
InodeID, the computation is in `_leaf_ref_to_chunk_clones_from_clone_ops`.
"""
leaf_extent_id_to_clone_ops = defaultdict(list)
for ino_id, extent in ids_and_extents:
file_offset = 0
for leaf_idx, (offset, length, leaf_extent) in enumerate(
extent.gen_trimmed_leaves()
):
ref = _CloneExtentRef(
clone=Clone(inode_id=ino_id, offset=file_offset, length=length),
extent=leaf_extent,
offset=offset,
leaf_idx=leaf_idx,
)
leaf_extent_id_to_clone_ops[id(leaf_extent)].extend(
[
_CloneOp(pos=offset, action=_CloneOp.PUSH, ref=ref),
_CloneOp(pos=offset + length, action=_CloneOp.POP, ref=ref),
]
)
file_offset += length
return leaf_extent_id_to_clone_ops
def _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id: int, clone_ops: Iterable[_CloneOp]
):
"As per `_leaf_extent_id_to_clone_ops`, this computes interval overlaps"
active_ops: Dict[_CloneExtentRef, _CloneOp] = {} # Tracks open intervals
leaf_ref_to_chunk_clones = defaultdict(list)
for op in sorted(clone_ops):
# Whenever an interval (aka an Inode's Extent's "trimmed leaf")
# ends, we create `ChunkClone` objects **to** and **from** all the
# concurrently open intervals.
if op.action is _CloneOp.POP:
pushed_op = active_ops.pop(op.ref)
assert pushed_op.ref is op.ref
assert id(op.ref.extent) == extent_id
assert pushed_op.pos == op.ref.offset
assert pushed_op.pos + op.ref.clone.length == op.pos
for clone_op in active_ops.values():
assert op.ref.extent is clone_op.ref.extent
# The cloned portion's extent offset is the larger of the 2
bigger_offset = max(clone_op.ref.offset, op.ref.offset)
# Record that `clone_op` clones part of `op`'s inode.
leaf_ref_to_chunk_clones[op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=clone_op.ref.clone.inode_id,
offset=clone_op.ref.clone.offset
+ (bigger_offset - clone_op.ref.offset),
length=op.pos - bigger_offset,
),
)
)
# Record that `op` clones part of `clone_op`'s inode.
leaf_ref_to_chunk_clones[clone_op.ref].append(
ChunkClone(
offset=bigger_offset,
clone=Clone(
inode_id=op.ref.clone.inode_id,
offset=op.ref.clone.offset
+ (bigger_offset - op.ref.offset),
length=op.pos - bigger_offset, # Same length
),
)
)
# Sorting guarantees all POPs for `pos` are handled before PUSHes
elif op.action == _CloneOp.PUSH:
assert op.ref not in active_ops
active_ops[op.ref] = op
else:
raise AssertionError(op) # pragma: no cover
return leaf_ref_to_chunk_clones
def _id_to_leaf_idx_to_chunk_clones(
ids_and_extents: Iterable[Tuple[InodeID, Extent]]
):
'Aggregates newly created ChunkClones per InodeID, and per "trimmed leaf"'
id_to_leaf_idx_to_chunk_clones = defaultdict(dict)
for extent_id, clone_ops in _leaf_extent_id_to_clone_ops(
ids_and_extents
).items():
leaf_ref_to_chunk_clones = _leaf_ref_to_chunk_clones_from_clone_ops(
extent_id, clone_ops
)
for leaf_ref, offsets_clones in leaf_ref_to_chunk_clones.items():
d = id_to_leaf_idx_to_chunk_clones[leaf_ref.clone.inode_id]
# A `leaf_idx` from a specific inode ID refers to one extent,
# and each extent is handled in one iteration, so it cannot be
# that two iterations contribute to the same `leaf_idx` key.
assert leaf_ref.leaf_idx not in d
# `leaf_idx` is the position in `gen_trimmed_leaves` of the
# chunk, whose clones we computed. That fully specifies where
# `extents_to_chunks_with_clones` should put the clones.
d[leaf_ref.leaf_idx] = offsets_clones
return id_to_leaf_idx_to_chunk_clones
def extents_to_chunks_with_clones(
ids_and_extents: Sequence[Tuple[InodeID, Extent]]
) -> Iterable[Tuple[InodeID, Sequence[Chunk]]]:
"""
Converts the nested, history-preserving `Extent` structures into flat
sequences of `Chunk`s, while being careful to annotate cloned parts as
described in this file's docblock. The `InodeID`s are needed to ensure
that the `Chunk`s' `Clone` objects refer to the appropriate files.
"""
id_to_leaf_idx_to_chunk_clones = _id_to_leaf_idx_to_chunk_clones(
ids_and_extents
)
for ino_id, extent in ids_and_extents:
leaf_to_chunk_clones = id_to_leaf_idx_to_chunk_clones.get(ino_id, {})
new_chunks = []
for leaf_idx, (offset, length, extent) in enumerate(
extent.gen_trimmed_leaves()
):
chunk_clones = leaf_to_chunk_clones.get(leaf_idx, [])
assert isinstance(extent.content, Extent.Kind)
# If the chunk kind matches, merge into the previous chunk.
if new_chunks and new_chunks[-1].kind == extent.content:
prev_length = new_chunks[-1].length
prev_clones = new_chunks[-1].chunk_clones
else: # Otherwise, make a new one.
prev_length = 0
prev_clones = set()
new_chunks.append(None)
new_chunks[-1] = Chunk(
kind=extent.content,
length=length + prev_length,
chunk_clones=prev_clones,
)
new_chunks[-1].chunk_clones.update(
# Future: when switching to frozentype, __new__ should
# validate that clone offset & length are sane relative
# to the trimmed extent.
ChunkClone(
clone=clone,
# Subtract `offset` because `ChunkClone.offset` is
# Extent-relative, but in the actual file layout, the
# leaf Extent is trimmed further.
offset=clone_offset + prev_length - offset,
)
for clone_offset, clone in chunk_clones
)
# Future: `deepfrozen` was made for this:
yield ino_id, tuple(
Chunk(
kind=c.kind,
length=c.length,
chunk_clones=frozenset(c.chunk_clones),
)
for c in new_chunks
)
| 43.310263 | 80 | 0.648372 | [
"MIT"
] | SaurabhAgarwala/antlir | antlir/btrfs_diff/extents_to_chunks.py | 18,147 | Python |
from sympy.vector.coordsysrect import CoordSysCartesian
from sympy.vector.scalar import BaseScalar
from sympy import sin, cos, pi, ImmutableMatrix as Matrix, \
symbols, simplify, zeros, expand
from sympy.vector.functions import express
from sympy.vector.point import Point
from sympy.vector.vector import Vector
from sympy.vector.orienters import (AxisOrienter, BodyOrienter,
SpaceOrienter, QuaternionOrienter)
a, b, c, q = symbols('a b c q')
q1, q2, q3, q4 = symbols('q1 q2 q3 q4')
def test_func_args():
A = CoordSysCartesian('A')
assert A.x.func(*A.x.args) == A.x
expr = 3*A.x + 4*A.y
assert expr.func(*expr.args) == expr
assert A.i.func(*A.i.args) == A.i
v = A.x*A.i + A.y*A.j + A.z*A.k
assert v.func(*v.args) == v
assert A.origin.func(*A.origin.args) == A.origin
def test_coordsyscartesian_equivalence():
A = CoordSysCartesian('A')
A1 = CoordSysCartesian('A')
assert A1 == A
B = CoordSysCartesian('B')
assert A != B
def test_orienters():
A = CoordSysCartesian('A')
axis_orienter = AxisOrienter(a, A.k)
body_orienter = BodyOrienter(a, b, c, '123')
space_orienter = SpaceOrienter(a, b, c, '123')
q_orienter = QuaternionOrienter(q1, q2, q3, q4)
assert axis_orienter.rotation_matrix(A) == Matrix([
[ cos(a), sin(a), 0],
[-sin(a), cos(a), 0],
[ 0, 0, 1]])
assert body_orienter.rotation_matrix() == Matrix([
[ cos(b)*cos(c), sin(a)*sin(b)*cos(c) + sin(c)*cos(a),
sin(a)*sin(c) - sin(b)*cos(a)*cos(c)],
[-sin(c)*cos(b), -sin(a)*sin(b)*sin(c) + cos(a)*cos(c),
sin(a)*cos(c) + sin(b)*sin(c)*cos(a)],
[ sin(b), -sin(a)*cos(b),
cos(a)*cos(b)]])
assert space_orienter.rotation_matrix() == Matrix([
[cos(b)*cos(c), sin(c)*cos(b), -sin(b)],
[sin(a)*sin(b)*cos(c) - sin(c)*cos(a),
sin(a)*sin(b)*sin(c) + cos(a)*cos(c), sin(a)*cos(b)],
[sin(a)*sin(c) + sin(b)*cos(a)*cos(c), -sin(a)*cos(c) +
sin(b)*sin(c)*cos(a), cos(a)*cos(b)]])
assert q_orienter.rotation_matrix() == Matrix([
[q1**2 + q2**2 - q3**2 - q4**2, 2*q1*q4 + 2*q2*q3,
-2*q1*q3 + 2*q2*q4],
[-2*q1*q4 + 2*q2*q3, q1**2 - q2**2 + q3**2 - q4**2,
2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4, q1**2 - q2**2 - q3**2 + q4**2]])
def test_coordinate_vars():
"""
Tests the coordinate variables functionality with respect to
reorientation of coordinate systems.
"""
A = CoordSysCartesian('A')
# Note that the name given on the lhs is different from A.x._name
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}') == A.x
assert BaseScalar('A.y', 1, A, 'A_y', r'\mathbf{{y}_{A}}') == A.y
assert BaseScalar('A.z', 2, A, 'A_z', r'\mathbf{{z}_{A}}') == A.z
assert BaseScalar('A.x', 0, A, 'A_x', r'\mathbf{{x}_{A}}').__hash__() == A.x.__hash__()
assert isinstance(A.x, BaseScalar) and \
isinstance(A.y, BaseScalar) and \
isinstance(A.z, BaseScalar)
assert A.x*A.y == A.y*A.x
assert A.scalar_map(A) == {A.x: A.x, A.y: A.y, A.z: A.z}
assert A.x.system == A
assert A.x.diff(A.x) == 1
B = A.orient_new_axis('B', q, A.k)
assert B.scalar_map(A) == {B.z: A.z, B.y: -A.x*sin(q) + A.y*cos(q),
B.x: A.x*cos(q) + A.y*sin(q)}
assert A.scalar_map(B) == {A.x: B.x*cos(q) - B.y*sin(q),
A.y: B.x*sin(q) + B.y*cos(q), A.z: B.z}
assert express(B.x, A, variables=True) == A.x*cos(q) + A.y*sin(q)
assert express(B.y, A, variables=True) == -A.x*sin(q) + A.y*cos(q)
assert express(B.z, A, variables=True) == A.z
assert expand(express(B.x*B.y*B.z, A, variables=True)) == \
expand(A.z*(-A.x*sin(q) + A.y*cos(q))*(A.x*cos(q) + A.y*sin(q)))
assert express(B.x*B.i + B.y*B.j + B.z*B.k, A) == \
(B.x*cos(q) - B.y*sin(q))*A.i + (B.x*sin(q) + \
B.y*cos(q))*A.j + B.z*A.k
assert simplify(express(B.x*B.i + B.y*B.j + B.z*B.k, A, \
variables=True)) == \
A.x*A.i + A.y*A.j + A.z*A.k
assert express(A.x*A.i + A.y*A.j + A.z*A.k, B) == \
(A.x*cos(q) + A.y*sin(q))*B.i + \
(-A.x*sin(q) + A.y*cos(q))*B.j + A.z*B.k
assert simplify(express(A.x*A.i + A.y*A.j + A.z*A.k, B, \
variables=True)) == \
B.x*B.i + B.y*B.j + B.z*B.k
N = B.orient_new_axis('N', -q, B.k)
assert N.scalar_map(A) == \
{N.x: A.x, N.z: A.z, N.y: A.y}
C = A.orient_new_axis('C', q, A.i + A.j + A.k)
mapping = A.scalar_map(C)
assert mapping[A.x] == (C.x*(2*cos(q) + 1)/3 +
C.y*(-2*sin(q + pi/6) + 1)/3 +
C.z*(-2*cos(q + pi/3) + 1)/3)
assert mapping[A.y] == (C.x*(-2*cos(q + pi/3) + 1)/3 +
C.y*(2*cos(q) + 1)/3 +
C.z*(-2*sin(q + pi/6) + 1)/3)
assert mapping[A.z] == (C.x*(-2*sin(q + pi/6) + 1)/3 +
C.y*(-2*cos(q + pi/3) + 1)/3 +
C.z*(2*cos(q) + 1)/3)
D = A.locate_new('D', a*A.i + b*A.j + c*A.k)
assert D.scalar_map(A) == {D.z: A.z - c, D.x: A.x - a, D.y: A.y - b}
E = A.orient_new_axis('E', a, A.k, a*A.i + b*A.j + c*A.k)
assert A.scalar_map(E) == {A.z: E.z + c,
A.x: E.x*cos(a) - E.y*sin(a) + a,
A.y: E.x*sin(a) + E.y*cos(a) + b}
assert E.scalar_map(A) == {E.x: (A.x - a)*cos(a) + (A.y - b)*sin(a),
E.y: (-A.x + a)*sin(a) + (A.y - b)*cos(a),
E.z: A.z - c}
F = A.locate_new('F', Vector.zero)
assert A.scalar_map(F) == {A.z: F.z, A.x: F.x, A.y: F.y}
def test_rotation_matrix():
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
D = N.orient_new_axis('D', q4, N.j)
E = N.orient_new_space('E', q1, q2, q3, '123')
F = N.orient_new_quaternion('F', q1, q2, q3, q4)
G = N.orient_new_body('G', q1, q2, q3, '123')
assert N.rotation_matrix(C) == Matrix([
[- sin(q1) * sin(q2) * sin(q3) + cos(q1) * cos(q3), - sin(q1) *
cos(q2), sin(q1) * sin(q2) * cos(q3) + sin(q3) * cos(q1)], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), \
cos(q1) * cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * \
cos(q3)], [- sin(q3) * cos(q2), sin(q2), cos(q2) * cos(q3)]])
test_mat = D.rotation_matrix(C) - Matrix(
[[cos(q1) * cos(q3) * cos(q4) - sin(q3) * (- sin(q4) * cos(q2) +
sin(q1) * sin(q2) * cos(q4)), - sin(q2) * sin(q4) - sin(q1) *
cos(q2) * cos(q4), sin(q3) * cos(q1) * cos(q4) + cos(q3) * \
(- sin(q4) * cos(q2) + sin(q1) * sin(q2) * cos(q4))], \
[sin(q1) * cos(q3) + sin(q2) * sin(q3) * cos(q1), cos(q1) * \
cos(q2), sin(q1) * sin(q3) - sin(q2) * cos(q1) * cos(q3)], \
[sin(q4) * cos(q1) * cos(q3) - sin(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * \
sin(q4)), sin(q2) *
cos(q4) - sin(q1) * sin(q4) * cos(q2), sin(q3) * \
sin(q4) * cos(q1) + cos(q3) * (cos(q2) * cos(q4) + \
sin(q1) * sin(q2) * sin(q4))]])
assert test_mat.expand() == zeros(3, 3)
assert E.rotation_matrix(N) == Matrix(
[[cos(q2)*cos(q3), sin(q3)*cos(q2), -sin(q2)],
[sin(q1)*sin(q2)*cos(q3) - sin(q3)*cos(q1), \
sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3), sin(q1)*cos(q2)], \
[sin(q1)*sin(q3) + sin(q2)*cos(q1)*cos(q3), - \
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1), cos(q1)*cos(q2)]])
assert F.rotation_matrix(N) == Matrix([[
q1**2 + q2**2 - q3**2 - q4**2,
2*q1*q4 + 2*q2*q3, -2*q1*q3 + 2*q2*q4],[ -2*q1*q4 + 2*q2*q3,
q1**2 - q2**2 + q3**2 - q4**2, 2*q1*q2 + 2*q3*q4],
[2*q1*q3 + 2*q2*q4,
-2*q1*q2 + 2*q3*q4,
q1**2 - q2**2 - q3**2 + q4**2]])
assert G.rotation_matrix(N) == Matrix([[
cos(q2)*cos(q3), sin(q1)*sin(q2)*cos(q3) + sin(q3)*cos(q1),
sin(q1)*sin(q3) - sin(q2)*cos(q1)*cos(q3)], [
-sin(q3)*cos(q2), -sin(q1)*sin(q2)*sin(q3) + cos(q1)*cos(q3),
sin(q1)*cos(q3) + sin(q2)*sin(q3)*cos(q1)],[
sin(q2), -sin(q1)*cos(q2), cos(q1)*cos(q2)]])
def test_vector():
"""
Tests the effects of orientation of coordinate systems on
basic vector operations.
"""
N = CoordSysCartesian('N')
A = N.orient_new_axis('A', q1, N.k)
B = A.orient_new_axis('B', q2, A.i)
C = B.orient_new_axis('C', q3, B.j)
#Test to_matrix
v1 = a*N.i + b*N.j + c*N.k
assert v1.to_matrix(A) == Matrix([[ a*cos(q1) + b*sin(q1)],
[-a*sin(q1) + b*cos(q1)],
[ c]])
#Test dot
assert N.i.dot(A.i) == cos(q1)
assert N.i.dot(A.j) == -sin(q1)
assert N.i.dot(A.k) == 0
assert N.j.dot(A.i) == sin(q1)
assert N.j.dot(A.j) == cos(q1)
assert N.j.dot(A.k) == 0
assert N.k.dot(A.i) == 0
assert N.k.dot(A.j) == 0
assert N.k.dot(A.k) == 1
assert N.i.dot(A.i + A.j) == -sin(q1) + cos(q1) == \
(A.i + A.j).dot(N.i)
assert A.i.dot(C.i) == cos(q3)
assert A.i.dot(C.j) == 0
assert A.i.dot(C.k) == sin(q3)
assert A.j.dot(C.i) == sin(q2)*sin(q3)
assert A.j.dot(C.j) == cos(q2)
assert A.j.dot(C.k) == -sin(q2)*cos(q3)
assert A.k.dot(C.i) == -cos(q2)*sin(q3)
assert A.k.dot(C.j) == sin(q2)
assert A.k.dot(C.k) == cos(q2)*cos(q3)
#Test cross
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.k) == -sin(q1)*A.i - cos(q1)*A.j
assert N.j.cross(A.i) == -cos(q1)*A.k
assert N.j.cross(A.j) == sin(q1)*A.k
assert N.j.cross(A.k) == cos(q1)*A.i - sin(q1)*A.j
assert N.k.cross(A.i) == A.j
assert N.k.cross(A.j) == -A.i
assert N.k.cross(A.k) == Vector.zero
assert N.i.cross(A.i) == sin(q1)*A.k
assert N.i.cross(A.j) == cos(q1)*A.k
assert N.i.cross(A.i + A.j) == sin(q1)*A.k + cos(q1)*A.k
assert (A.i + A.j).cross(N.i) == (-sin(q1) - cos(q1))*N.k
assert A.i.cross(C.i) == sin(q3)*C.j
assert A.i.cross(C.j) == -sin(q3)*C.i + cos(q3)*C.k
assert A.i.cross(C.k) == -cos(q3)*C.j
assert C.i.cross(A.i) == (-sin(q3)*cos(q2))*A.j + \
(-sin(q2)*sin(q3))*A.k
assert C.j.cross(A.i) == (sin(q2))*A.j + (-cos(q2))*A.k
assert express(C.k.cross(A.i), C).trigsimp() == cos(q3)*C.j
def test_orient_new_methods():
N = CoordSysCartesian('N')
orienter1 = AxisOrienter(q4, N.j)
orienter2 = SpaceOrienter(q1, q2, q3, '123')
orienter3 = QuaternionOrienter(q1, q2, q3, q4)
orienter4 = BodyOrienter(q1, q2, q3, '123')
D = N.orient_new('D', (orienter1, ))
E = N.orient_new('E', (orienter2, ))
F = N.orient_new('F', (orienter3, ))
G = N.orient_new('G', (orienter4, ))
assert D == N.orient_new_axis('D', q4, N.j)
assert E == N.orient_new_space('E', q1, q2, q3, '123')
assert F == N.orient_new_quaternion('F', q1, q2, q3, q4)
assert G == N.orient_new_body('G', q1, q2, q3, '123')
def test_locatenew_point():
"""
Tests Point class, and locate_new method in CoordSysCartesian.
"""
A = CoordSysCartesian('A')
assert isinstance(A.origin, Point)
v = a*A.i + b*A.j + c*A.k
C = A.locate_new('C', v)
assert C.origin.position_wrt(A) == \
C.position_wrt(A) == \
C.origin.position_wrt(A.origin) == v
assert A.origin.position_wrt(C) == \
A.position_wrt(C) == \
A.origin.position_wrt(C.origin) == -v
assert A.origin.express_coordinates(C) == (-a, -b, -c)
p = A.origin.locate_new('p', -v)
assert p.express_coordinates(A) == (-a, -b, -c)
assert p.position_wrt(C.origin) == p.position_wrt(C) == \
-2 * v
p1 = p.locate_new('p1', 2*v)
assert p1.position_wrt(C.origin) == Vector.zero
assert p1.express_coordinates(C) == (0, 0, 0)
p2 = p.locate_new('p2', A.i)
assert p1.position_wrt(p2) == 2*v - A.i
assert p2.express_coordinates(C) == (-2*a + 1, -2*b, -2*c)
def test_evalf():
A = CoordSysCartesian('A')
v = 3*A.i + 4*A.j + a*A.k
assert v.n() == v.evalf()
assert v.evalf(subs={a:1}) == v.subs(a, 1).evalf()
| 43.071186 | 91 | 0.488745 | [
"BSD-3-Clause"
] | Anshnrag02/sympy | sympy/vector/tests/test_coordsysrect.py | 12,706 | Python |
# Webhooks for external integrations.
from django.http import HttpRequest, HttpResponse
from django.utils.translation import ugettext as _
from zerver.models import get_client
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
from typing import Any, Dict
@api_key_only_webhook_view('Semaphore')
@has_request_variables
def api_semaphore_webhook(request, user_profile,
payload=REQ(argument_type='body'),
stream=REQ(default='builds')):
# type: (HttpRequest, UserProfile, Dict[str, Any], str) -> HttpResponse
# semaphore only gives the last commit, even if there were multiple commits
# since the last build
branch_name = payload["branch_name"]
project_name = payload["project_name"]
result = payload["result"]
event = payload["event"]
commit_id = payload["commit"]["id"]
commit_url = payload["commit"]["url"]
author_email = payload["commit"]["author_email"]
message = payload["commit"]["message"]
if event == "build":
build_url = payload["build_url"]
build_number = payload["build_number"]
content = u"[build %s](%s): %s\n" % (build_number, build_url, result)
elif event == "deploy":
build_url = payload["build_html_url"]
build_number = payload["build_number"]
deploy_url = payload["html_url"]
deploy_number = payload["number"]
server_name = payload["server_name"]
content = u"[deploy %s](%s) of [build %s](%s) on server %s: %s\n" % \
(deploy_number, deploy_url, build_number, build_url, server_name, result)
else: # should never get here
content = u"%s: %s\n" % (event, result)
content += "!avatar(%s) [`%s`](%s): %s" % (author_email, commit_id[:7],
commit_url, message)
subject = u"%s/%s" % (project_name, branch_name)
check_send_stream_message(user_profile, request.client, stream, subject, content)
return json_success()
| 38.614035 | 91 | 0.664244 | [
"Apache-2.0"
] | Rishabh570/zulip | zerver/webhooks/semaphore/view.py | 2,201 | Python |
#!/usr/bin/env python
#$Id: rotate_molecule.py,v 1.2.10.1 2016/02/11 09:24:08 annao Exp $
import os
from MolKit import Read
from MolKit.pdbWriter import PdbWriter, PdbqsWriter, PdbqWriter, PdbqtWriter
from mglutil.math.rotax import rotax
import numpy
if __name__ == '__main__':
import sys
import getopt
def usage():
"Print helpful, accurate usage statement to stdout."
print("Usage: rotate_molecule.py -f filename")
print()
print(" Description of command...")
print(" [-f] filename")
print(" Optional parameters:")
print(" [-o] alternative output filename")
print(" (default is 'rotated_' +filename)")
print(" [-y] rotate around the y axis")
print(" (default is rotation around the z axis)")
print(" [-x] rotate around the x axis")
print(" (default is rotation around the z axis)")
print(" [-u] user-defined axis of rotation '1.0,2.0,-6.2'")
print(" (default is rotation around the z axis)")
print(" [-a] angle for rotation about axis ")
print(" (default is rotation around the z axis)")
print(" [-v] verbose output")
# process command arguments
try:
opt_list, args = getopt.getopt(sys.argv[1:], 'f:o:xyu:a:v')
except getopt.GetoptError as msg:
print('rotate_molecule.py: %s' %msg)
usage()
sys.exit(2)
# initialize required parameters
#-f: pdb_filename_stem
filename = None
# optional parameters
verbose = None
outputfilename = None
rotation = 'z'
#arbitrary axis angle for rotation
axis = None
angle = None
#'f:o:v'
for o, a in opt_list:
print("o=", o, " a=",a)
if o in ('-f', '--f'):
filename = a
if verbose: print('set filename to ', filename)
outputfilename = 'rotated_' + filename
if o in ('-o', '--o'):
outputfilename = a
if verbose:
print('set output outputfilename to ', a)
if o in ('-x', '--x'):
rotation = 'x'
if verbose: print('set rotation to ', rotation)
if o in ('-y', '--y'):
rotation = 'y'
if verbose: print('set rotation to ', rotation)
if o in ('-u', '--u'):
axis = a
if verbose: print('set user-defined axis to ', axis)
if o in ('-a', '--a'):
angle = a
if verbose: print('set angle for rotation to ', angle)
if o in ('-v', '--v'):
verbose = True
if verbose: print('set verbose to ', True)
if o in ('-h', '--'):
usage()
sys.exit()
if not filename:
print('rotate_molecule: filename must be specified.')
usage()
sys.exit()
mol = Read(filename)[0]
if verbose: print('read ', filename)
filetype = os.path.splitext(os.path.basename(filename))[1]
if verbose: print("filetype=", filetype)
writer = None
if filetype=='.pdbqt':
writer = PdbqtWriter()
elif filetype=='.pdbq':
writer = PdbqWriter()
elif filetype=='.pdbqs':
writer = PdbqsWriter()
elif filetype=='.pdb':
writer = PdbWriter()
else:
print('Sorry! Unable to write this filetype->', filetype)
center = numpy.add.reduce(mol.allAtoms.coords)/len(mol.allAtoms)
crds = numpy.array(mol.allAtoms.coords)
center = numpy.add.reduce(crds)/len(mol.allAtoms)
crds = crds - center
crds = crds.tolist()
mol.allAtoms.updateCoords(crds)
lenCoords = len(crds)
#rotate the atoms here
if axis is not None and angle is not None:
rot = (float(angle)* 3.14159/180.)%(2 * numpy.pi)
x = numpy.array([0.,0.,0.])
y = numpy.array(list(map(float,axis.split(','))))
matrix = rotax(x,y, rot)
_ones = numpy.ones(lenCoords, 'f')
_ones.shape = (lenCoords,1)
mov_coords = numpy.concatenate((crds, _ones),1)
newcoords = numpy.dot(mov_coords, matrix)
nc = newcoords[:,:3].astype('f')
for i in range(lenCoords):
mol.allAtoms[i]._coords[0] = nc[i].tolist()
else:
if rotation=='z':
#for rotation around z-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][1] = -1.*a._coords[0][1]
elif rotation=='y':
#for rotation around y-axis:
for a in mol.allAtoms:
a._coords[0][0] = -1.*a._coords[0][0]
a._coords[0][2] = -1.*a._coords[0][2]
elif rotation=='x':
#for rotation around x-axis:
for a in mol.allAtoms:
a._coords[0][1] = -1.*a._coords[0][1]
a._coords[0][2] = -1.*a._coords[0][2]
ncrds = numpy.array(mol.allAtoms.coords)
ncrds = ncrds + center
ncrds = ncrds.tolist()
mol.allAtoms.updateCoords(ncrds)
if writer:
outptr = open(outputfilename, 'w')
liglines = mol.parser.allLines
ctr = 0
for l in liglines:
if l.find("ATOM")!=0 and l.find("HETATM")!=0:
outptr.write(l)
else:
writer.write_atom(outptr, mol.allAtoms[ctr])
ctr += 1
outptr.close()
# To execute this command type:
# rotate_molecule.py -f filename [-o outputfilename -u axis -a angle to rotate] -v
| 33.6 | 82 | 0.537879 | [
"BSD-3-Clause"
] | e-mayo/autodocktools-prepare-py3k | AutoDockTools/Utilities24/rotate_molecule.py | 5,544 | Python |
import subprocess
import socket
import tempfile
import redis
import time
import os
import itertools
import sys
# Environment variable pointing to the redis executable
REDIS_PATH_ENVVAR = 'REDIS_PATH'
def get_random_port():
sock = socket.socket()
sock.listen(0)
_, port = sock.getsockname()
sock.close()
return port
class DisposableRedis(object):
def __init__(self, port=None, path='redis-server', **extra_args):
"""
:param port: port number to start the redis server on. Specify none to automatically generate
:type port: int|None
:param extra_args: any extra arguments kwargs will be passed to redis server as --key val
"""
self._port = port
# this will hold the actual port the redis is listening on. It's equal to `_port` unless `_port` is None
# in that case `port` is randomly generated
self.port = None
self.extra_args = list(itertools.chain(
*(('--%s'%k, v) for k, v in extra_args.items())
))
self.path = os.getenv(REDIS_PATH_ENVVAR, path)
def __enter__(self):
if self._port is None:
self.port = get_random_port()
else:
self.port = self._port
args = [self.path,
'--port', str(self.port),
'--dir', tempfile.gettempdir(),
'--save', ''] + self.extra_args
self.process = subprocess.Popen(
args,
#cwd=os.getcwd(),
stdin=subprocess.PIPE,
stdout=open(os.devnull, 'w')
# stdout=sys.stdout,
# env=os.environ.copy()
)
while True:
try:
self.client().ping()
break
except redis.ConnectionError:
self.process.poll()
if self.process.returncode is not None:
raise RuntimeError("Process has exited")
time.sleep(0.1)
return self.client()
def __exit__(self, exc_type, exc_val, exc_tb):
self.process.terminate()
def client(self):
"""
:rtype: redis.StrictRedis
"""
return redis.StrictRedis(port=self.port, decode_responses=True)
| 27.439024 | 112 | 0.569333 | [
"BSD-2-Clause"
] | MPalarya/RAMP | RAMP/disposableredis/__init__.py | 2,250 | Python |
# Copyright (c) 2020, RTE (https://www.rte-france.com)
# See AUTHORS.txt
# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0.
# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file,
# you can obtain one at http://mozilla.org/MPL/2.0/.
# SPDX-License-Identifier: MPL-2.0
# This file is part of LightSim2grid, LightSim2grid implements a c++ backend targeting the Grid2Op platform.
import unittest
import numpy as np
import pdb
import pandapower.networks as pn
from lightsim2grid_cpp import PandaPowerConverter
class MakeTests(unittest.TestCase):
def setUp(self):
self.converter = PandaPowerConverter()
self.tol = 1e-8
def assert_equal(self, tmp, ref):
assert np.max(np.abs(tmp - ref)) <= self.tol
assert np.sum(np.abs(tmp - ref)) <= tmp.shape[0] * self.tol
def test_case6_data(self):
net = pn.case6ww()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([0.001, 0.0005, 0.001, 0.0008, 0.0005, 0.0005, 0.001, 0.0007, 0.0012, 0.0002, 0.002])
res_x = np.array([0.002, 0.002, 0.003, 0.003, 0.0025, 0.001, 0.003, 0.002, 0.0026, 0.001, 0.004])
res_h = np.array([4.+0.j, 4.+0.j, 6.+0.j, 6.+0.j, 6.+0.j, 2.+0.j, 4.+0.j, 5.+0.j, 5.+0.j, 2.+0.j, 8.+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
def test_case30_data(self):
net = pn.case30()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([0.0002, 0.0005, 0. , 0. , 0. , 0. , 0. , 0. ,
0.0012, 0.0007, 0.0009, 0.0022, 0.0006, 0.0008, 0.0011, 0.0006,
0.0003, 0.0009, 0.0003, 0.0003, 0.0007, 0.0001, 0.001 , 0.0001,
0.0012, 0.0013, 0.0019, 0.0025, 0.0011, 0. , 0.0022, 0.0032,
0.0024, 0.0006, 0.0005, 0.0002, 0.0006, 0.0001, 0.0005, 0.0003,
0.0001])
res_x = np.array([0.0006, 0.0019, 0.0021, 0.0056, 0.0021, 0.0011, 0.0026, 0.0014,
0.0026, 0.0013, 0.002 , 0.002 , 0.0017, 0.0019, 0.0022, 0.0013,
0.0007, 0.0021, 0.0008, 0.0007, 0.0015, 0.0002, 0.002 , 0.0004,
0.0018, 0.0027, 0.0033, 0.0038, 0.0021, 0.004 , 0.0042, 0.006 ,
0.0045, 0.002 , 0.002 , 0.0006, 0.0018, 0.0004, 0.0012, 0.0008,
0.0004])
res_h = np.array([3.+0.j, 2.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 2.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j,
0.+0.j, 2.+0.j, 2.+0.j, 1.+0.j, 2.+0.j, 0.+0.j, 1.+0.j, 1.+0.j,
0.+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
def test_case118_data(self):
net = pn.case118()
self.converter.set_sn_mva(net.sn_mva) # TODO raise an error if not set !
self.converter.set_f_hz(net.f_hz)
line_r, line_x, line_h = self.converter.get_line_param(
net.line["r_ohm_per_km"].values * net.line["length_km"].values,
net.line["x_ohm_per_km"].values * net.line["length_km"].values,
net.line["c_nf_per_km"].values * net.line["length_km"].values,
net.line["g_us_per_km"].values * net.line["length_km"].values,
net.bus.loc[net.line["from_bus"]]["vn_kv"],
net.bus.loc[net.line["to_bus"]]["vn_kv"]
)
res_r = np.array([3.030e-04, 1.290e-04, 5.950e-05, 8.820e-05, 4.880e-04, 4.460e-04,
8.660e-05, 4.010e-04, 4.280e-04, 4.050e-04, 1.230e-04, 4.440e-04,
3.090e-04, 1.870e-04, 6.010e-04, 3.760e-05, 5.460e-05, 1.700e-04,
2.940e-04, 1.560e-04, 2.980e-04, 1.120e-04, 6.250e-04, 4.300e-04,
4.840e-04, 3.020e-04, 3.500e-04, 2.000e-04, 2.390e-04, 1.390e-04,
5.180e-04, 2.380e-04, 2.540e-04, 9.900e-05, 3.930e-04, 8.620e-05,
3.870e-04, 2.580e-04, 4.810e-04, 2.230e-04, 1.320e-04, 3.560e-04,
1.620e-04, 2.690e-04, 1.830e-04, 2.380e-04, 2.225e-04, 4.540e-04,
6.480e-04, 1.780e-04, 1.710e-04, 1.730e-04, 3.970e-04, 1.800e-04,
2.770e-04, 1.230e-04, 2.460e-04, 2.150e-04, 1.600e-04, 4.510e-04,
4.660e-04, 5.350e-04, 6.050e-04, 9.940e-05, 1.400e-04, 5.300e-04,
2.610e-04, 5.300e-04, 7.440e-04, 1.050e-04, 3.906e-04, 2.780e-04,
2.200e-04, 2.470e-04, 9.130e-05, 6.150e-04, 1.350e-04, 1.640e-04,
2.300e-05, 5.950e-04, 3.290e-04, 1.450e-04, 1.640e-04, 2.120e-04,
1.320e-04, 1.760e-05, 4.540e-04, 1.230e-04, 1.119e-04, 2.520e-04,
1.200e-04, 1.830e-04, 2.090e-04, 3.420e-04, 1.350e-04, 1.560e-04,
2.410e-04, 3.180e-04, 1.913e-04, 2.370e-04, 4.310e-05, 7.990e-05,
4.740e-04, 1.080e-04, 3.170e-04, 2.980e-04, 2.290e-04, 1.190e-04,
3.800e-04, 7.520e-04, 2.240e-05, 1.100e-04, 4.150e-04, 8.710e-05,
2.560e-05, 3.210e-04, 5.930e-04, 4.640e-05, 4.590e-05, 1.840e-04,
1.450e-04, 5.550e-04, 4.100e-04, 6.080e-04, 4.130e-04, 2.240e-04,
4.000e-04, 3.800e-04, 6.010e-04, 2.440e-05, 1.910e-04, 7.150e-04,
7.150e-04, 6.840e-04, 1.790e-04, 2.670e-04, 4.860e-04, 2.030e-04,
4.050e-04, 2.630e-04, 2.580e-05, 7.300e-04, 8.690e-04, 1.690e-04,
2.750e-05, 4.880e-05, 3.430e-04, 4.740e-04, 3.430e-04, 2.550e-04,
5.030e-04, 2.090e-04, 8.250e-04, 8.030e-04, 4.739e-04, 3.170e-04,
3.280e-04, 2.640e-05, 1.230e-04, 8.240e-05, 1.720e-05, 9.010e-05,
2.030e-04, 2.690e-05, 1.800e-04, 1.800e-04, 4.820e-04, 2.580e-04,
2.240e-04, 8.440e-04, 9.850e-04, 3.000e-04, 2.210e-05])
res_x = np.array([9.990e-04, 4.240e-04, 1.960e-04, 3.550e-04, 1.960e-03, 1.800e-03,
4.540e-04, 1.323e-03, 1.410e-03, 1.220e-03, 4.060e-04, 1.480e-03,
1.010e-03, 6.160e-04, 1.999e-03, 1.240e-04, 2.440e-04, 4.850e-04,
1.050e-03, 7.040e-04, 8.530e-04, 3.665e-04, 1.320e-03, 1.480e-03,
1.600e-03, 6.410e-04, 1.230e-03, 1.020e-03, 1.730e-03, 7.120e-04,
1.880e-03, 9.970e-04, 8.360e-04, 5.050e-04, 1.581e-03, 3.400e-04,
1.272e-03, 8.480e-04, 1.580e-03, 7.320e-04, 4.340e-04, 1.820e-03,
5.300e-04, 8.690e-04, 9.340e-04, 1.080e-03, 7.310e-04, 2.060e-03,
2.950e-03, 5.800e-04, 5.470e-04, 8.850e-04, 1.790e-03, 8.130e-04,
1.262e-03, 5.590e-04, 1.120e-03, 7.070e-04, 5.250e-04, 2.040e-03,
1.584e-03, 1.625e-03, 2.290e-03, 3.780e-04, 5.470e-04, 1.830e-03,
7.030e-04, 1.830e-03, 2.444e-03, 2.880e-04, 1.813e-03, 7.620e-04,
7.550e-04, 6.400e-04, 3.010e-04, 2.030e-03, 6.120e-04, 7.410e-04,
1.040e-04, 1.950e-03, 1.400e-03, 4.810e-04, 5.440e-04, 8.340e-04,
4.370e-04, 7.980e-05, 1.801e-03, 5.050e-04, 4.930e-04, 1.170e-03,
3.940e-04, 8.490e-04, 9.700e-04, 1.590e-03, 4.920e-04, 8.000e-04,
1.080e-03, 1.630e-03, 8.550e-04, 9.430e-04, 5.040e-04, 8.600e-04,
1.563e-03, 3.310e-04, 1.153e-03, 9.850e-04, 7.550e-04, 5.400e-04,
1.244e-03, 2.470e-03, 1.020e-04, 4.970e-04, 1.420e-03, 2.680e-04,
9.400e-05, 1.060e-03, 1.680e-03, 5.400e-04, 2.080e-04, 6.050e-04,
4.870e-04, 1.830e-03, 1.350e-03, 2.454e-03, 1.681e-03, 9.010e-04,
1.356e-03, 1.270e-03, 1.890e-03, 3.050e-04, 6.250e-04, 3.230e-03,
3.230e-03, 1.860e-03, 5.050e-04, 7.520e-04, 1.370e-03, 5.880e-04,
1.635e-03, 1.220e-03, 3.220e-04, 2.890e-03, 2.910e-03, 7.070e-04,
9.550e-05, 1.510e-04, 9.660e-04, 1.340e-03, 9.660e-04, 7.190e-04,
2.293e-03, 6.880e-04, 2.510e-03, 2.390e-03, 2.158e-03, 1.450e-03,
1.500e-03, 1.350e-04, 5.610e-04, 3.760e-04, 2.000e-04, 9.860e-04,
6.820e-04, 3.020e-04, 9.190e-04, 9.190e-04, 2.180e-03, 1.170e-03,
1.015e-03, 2.778e-03, 3.240e-03, 1.270e-03, 4.115e-03])
res_h = np.array([ 2.54 +0.j, 1.082+0.j, 0.502+0.j, 0.878+0.j, 4.88 +0.j,
4.444+0.j, 1.178+0.j, 3.368+0.j, 3.6 +0.j, 12.4 +0.j,
1.034+0.j, 3.68 +0.j, 10.38 +0.j, 1.572+0.j, 4.978+0.j,
1.264+0.j, 0.648+0.j, 4.72 +0.j, 2.28 +0.j, 1.87 +0.j,
8.174+0.j, 3.796+0.j, 2.58 +0.j, 3.48 +0.j, 4.06 +0.j,
1.234+0.j, 2.76 +0.j, 2.76 +0.j, 4.7 +0.j, 1.934+0.j,
5.28 +0.j, 10.6 +0.j, 2.14 +0.j, 5.48 +0.j, 4.14 +0.j,
0.874+0.j, 3.268+0.j, 2.18 +0.j, 4.06 +0.j, 1.876+0.j,
1.11 +0.j, 4.94 +0.j, 5.44 +0.j, 2.3 +0.j, 2.54 +0.j,
2.86 +0.j, 1.876+0.j, 5.46 +0.j, 4.72 +0.j, 6.04 +0.j,
1.474+0.j, 2.4 +0.j, 4.76 +0.j, 2.16 +0.j, 3.28 +0.j,
1.464+0.j, 2.94 +0.j, 1.816+0.j, 5.36 +0.j, 5.41 +0.j,
4.07 +0.j, 4.08 +0.j, 6.2 +0.j, 0.986+0.j, 1.434+0.j,
4.72 +0.j, 1.844+0.j, 4.72 +0.j, 6.268+0.j, 0.76 +0.j,
4.61 +0.j, 2.02 +0.j, 2. +0.j, 6.2 +0.j, 0.768+0.j,
5.18 +0.j, 1.628+0.j, 1.972+0.j, 0.276+0.j, 5.02 +0.j,
3.58 +0.j, 1.198+0.j, 1.356+0.j, 2.14 +0.j, 4.44 +0.j,
0.21 +0.j, 4.66 +0.j, 1.298+0.j, 1.142+0.j, 2.98 +0.j,
1.01 +0.j, 2.16 +0.j, 2.46 +0.j, 4.04 +0.j, 4.98 +0.j,
8.64 +0.j, 2.84 +0.j, 17.64 +0.j, 2.16 +0.j, 2.38 +0.j,
51.4 +0.j, 90.8 +0.j, 3.99 +0.j, 0.83 +0.j, 11.73 +0.j,
2.51 +0.j, 1.926+0.j, 1.426+0.j, 3.194+0.j, 6.32 +0.j,
0.268+0.j, 1.318+0.j, 3.66 +0.j, 0.568+0.j, 0.984+0.j,
2.7 +0.j, 4.2 +0.j, 42.2 +0.j, 0.55 +0.j, 1.552+0.j,
1.222+0.j, 4.66 +0.j, 3.44 +0.j, 6.068+0.j, 4.226+0.j,
2.24 +0.j, 3.32 +0.j, 3.16 +0.j, 4.72 +0.j, 116.2 +0.j,
1.604+0.j, 8.6 +0.j, 8.6 +0.j, 4.44 +0.j, 1.258+0.j,
1.874+0.j, 3.42 +0.j, 1.396+0.j, 4.058+0.j, 3.1 +0.j,
123. +0.j, 7.38 +0.j, 7.3 +0.j, 2.02 +0.j, 0.732+0.j,
0.374+0.j, 2.42 +0.j, 3.32 +0.j, 2.42 +0.j, 1.788+0.j,
5.98 +0.j, 1.748+0.j, 5.69 +0.j, 5.36 +0.j, 5.646+0.j,
3.76 +0.j, 3.88 +0.j, 1.456+0.j, 1.468+0.j, 0.98 +0.j,
21.6 +0.j, 104.6 +0.j, 1.738+0.j, 38. +0.j, 2.48 +0.j,
2.48 +0.j, 5.78 +0.j, 3.1 +0.j, 2.682+0.j, 7.092+0.j,
8.28 +0.j, 12.2 +0.j, 10.198+0.j])
self.assert_equal(line_r, res_r)
self.assert_equal(line_x, res_x)
self.assert_equal(line_h, res_h)
pp_net = net
# fix the missing values
tap_step_pct = 1.0 * pp_net.trafo["tap_step_percent"].values
tap_step_pct[~np.isfinite(tap_step_pct)] = 0.
tap_pos = 1.0 * pp_net.trafo["tap_pos"].values
tap_pos[~np.isfinite(tap_pos)] = 0.
is_tap_hv_side = pp_net.trafo["tap_side"].values == "hv"
is_tap_hv_side[~np.isfinite(is_tap_hv_side)] = True
tap_angles_ = 1.0 * pp_net.trafo["tap_step_degree"].values
tap_angles_[~np.isfinite(tap_angles_)] = 0.
tap_angles_ = np.deg2rad(tap_angles_)
trafo_r, trafo_x, trafo_b = self.converter.get_trafo_param(tap_step_pct,
tap_pos,
tap_angles_, # in radian !
is_tap_hv_side,
pp_net.bus.loc[pp_net.trafo["hv_bus"]]["vn_kv"],
pp_net.bus.loc[pp_net.trafo["lv_bus"]]["vn_kv"],
pp_net.trafo["vk_percent"].values,
pp_net.trafo["vkr_percent"].values,
pp_net.trafo["sn_mva"].values,
pp_net.trafo["pfe_kw"].values,
pp_net.trafo["i0_percent"].values,
)
trafo_r_res = np.array([0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.81494977e-04,
3.39887086e-06, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 1.37295648e-05, 0.00000000e+00,
1.73571860e-05])
trafo_x_res = np.array([2.67000000e-04, 3.82000000e-04, 3.70000000e-04, 2.06930358e-03,
4.04933224e-05, 3.88000000e-04, 3.75000000e-04, 3.86000000e-04,
2.68000000e-04, 3.70000000e-04, 1.59594718e-04, 3.70000000e-04,
2.01181945e-04])
trafo_h_res = np.array([ 0. -0.j , 0. -0.j ,
0. -0.j , 4.4602909 -0.00140652j,
16.40272367-0.00022869j, 0. -0.j ,
0. -0.j , 0. -0.j ,
0. -0.j , 0. -0.j ,
63.96323106-0.01411497j, 0. -0.j ,
81.1310369 -0.02879733j])
self.assert_equal(trafo_r, trafo_r_res)
self.assert_equal(trafo_x, trafo_x_res)
self.assert_equal(trafo_b, trafo_h_res)
if __name__ == "__main__":
unittest.main() | 71.242553 | 115 | 0.443256 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | BDonnot/lightsim2grid | lightsim2grid/tests/test_DataConverter.py | 16,742 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
from .video_record import VideoRecord
from datetime import timedelta
import time
def timestamp_to_sec(timestamp):
x = time.strptime(timestamp, '%H:%M:%S.%f')
sec = float(timedelta(hours=x.tm_hour,
minutes=x.tm_min,
seconds=x.tm_sec).total_seconds()) + float(
timestamp.split('.')[-1]) / 100
return sec
class EpicKitchensVideoRecord(VideoRecord):
def __init__(self, tup):
self._index = str(tup[0])
self._series = tup[1]
@property
def participant(self):
return self._series['participant_id']
@property
def untrimmed_video_name(self):
return self._series['video_id']
@property
def start_frame(self):
return int(round(timestamp_to_sec(self._series['start_timestamp']) * self.fps))
@property
def end_frame(self):
return int(round(timestamp_to_sec(self._series['stop_timestamp']) * self.fps))
@property
def fps(self):
is_100 = len(self.untrimmed_video_name.split('_')[1]) == 3
return 50 if is_100 else 60
@property
def num_frames(self):
return self.end_frame - self.start_frame
@property
def label(self):
return {'verb': self._series['verb_class'] if 'verb_class' in self._series else -1,
'noun': self._series['noun_class'] if 'noun_class' in self._series else -1}
@property
def metadata(self):
return {'narration_id': self._index} | 28.672727 | 91 | 0.637286 | [
"Apache-2.0"
] | Lucaweihs/Motionformer | slowfast/datasets/epickitchens_record.py | 1,577 | Python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import unittest
import torch
import torch.nn as nn
from opacus import PerSampleGradientClipper
from opacus.dp_model_inspector import DPModelInspector
from opacus.layers import DPLSTM, DPMultiheadAttention, SequenceBias
from opacus.utils.clipping import ConstantFlatClipper
class LayersGradTest(unittest.TestCase):
def setUp(self):
self.validator = DPModelInspector()
def _reset_seeds(self):
torch.manual_seed(1337)
torch.cuda.manual_seed(1337)
def _run_once(self, layer, criterion, *args):
self._reset_seeds()
layer.zero_grad()
output = layer(*args)
if isinstance(output, tuple):
output = output[0]
output = output.squeeze()
y = torch.zeros_like(output)
loss = criterion(output, y)
loss.backward()
def _check_one_layer(self, layer, *args, **kwargs):
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="mean"), *args, **kwargs
)
self._check_one_layer_with_criterion(
layer, nn.L1Loss(reduction="sum"), *args, **kwargs
)
def _check_one_layer_with_criterion(self, layer, criterion, *args, **kwargs):
self.validator.validate(layer)
for name, param in layer.named_parameters():
if ("weight" in name) or ("bias" in name):
nn.init.uniform_(param, -1.0, 1.0)
# run without DP
self._run_once(layer, criterion, *args)
vanilla_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# run with DP
clipper = PerSampleGradientClipper(
layer,
ConstantFlatClipper(1e9),
batch_first=kwargs.get("batch_first", True),
loss_reduction=criterion.reduction,
)
self._run_once(layer, criterion, *args)
for param_name, param in layer.named_parameters():
if param.requires_grad:
self.assertTrue(
hasattr(param, "grad_sample"),
f"Per-sample gradients haven't been computed for {param_name}",
)
clipper.clip_and_accumulate()
clipper.pre_step()
private_run_grads = [
(name, p.grad.detach())
for (name, p) in layer.named_parameters()
if p.requires_grad
]
# compare
for (vanilla_name, vanilla_grad), (private_name, private_grad) in zip(
vanilla_run_grads, private_run_grads
):
assert vanilla_name == private_name
self.assertTrue(
torch.allclose(vanilla_grad, private_grad, atol=10e-5, rtol=10e-3),
f"Gradient mismatch. Parameter: {layer}.{vanilla_name}, loss: {criterion.reduction}",
)
clipper.close()
def test_conv1d(self):
x = torch.randn(64, 16, 24)
layer = nn.Conv1d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_conv2d(self):
x = torch.randn(64, 16, 24, 24)
layer = nn.Conv2d(16, 32, 3, 1)
self._check_one_layer(layer, x)
def test_linear(self):
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8))
self._check_one_layer(nn.Linear(8, 4), torch.randn(16, 8, 8))
def test_layernorm(self):
x = torch.randn(64, 16, 24, 24)
self._check_one_layer(nn.LayerNorm(24), x)
self._check_one_layer(nn.LayerNorm((24, 24)), x)
self._check_one_layer(nn.LayerNorm((16, 24, 24)), x)
def test_groupnorm(self):
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9))
self._check_one_layer(nn.GroupNorm(4, 16), torch.randn(64, 16, 10, 9, 8))
def test_instancenorm(self):
self._check_one_layer(
nn.InstanceNorm1d(16, affine=True), torch.randn(64, 16, 10)
)
self._check_one_layer(
nn.InstanceNorm2d(16, affine=True), torch.randn(64, 16, 10, 9)
)
self._check_one_layer(
nn.InstanceNorm3d(16, affine=True), torch.randn(64, 16, 10, 9, 8)
)
def test_sequence_bias(self):
x = torch.randn(4, 3, 2)
layer = SequenceBias(2)
self._check_one_layer(layer, x, batch_first=False)
def test_multihead_attention(self):
x = torch.randn(16, 24, 32)
layer = DPMultiheadAttention(32, 1)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True, dropout=0.05)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(32, 1, bias=True, add_bias_kv=True)
self._check_one_layer(layer, x, x, x, batch_first=False)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True
)
self._check_one_layer(layer, x, x, x, batch_first=False)
q = torch.randn(16, 24, 32)
k = torch.randn(20, 24, 28)
v = torch.randn(20, 24, 28)
layer = DPMultiheadAttention(
32, 1, bias=True, add_bias_kv=True, add_zero_attn=True, kdim=28, vdim=28
)
self._check_one_layer(layer, q, k, v, batch_first=False)
def test_embedding(self):
layer = nn.Embedding(256, 100)
x1 = torch.randint(0, 255, (128, 42)).long()
x2 = torch.randint(0, 255, (64,)).long()
self._check_one_layer(layer, x1)
self._check_one_layer(layer, x2)
def test_lstm_batch_first(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=True case
layer = DPLSTM(25, 12, 1, batch_first=True)
x = torch.randn(30, 20, 25)
self._check_one_layer(layer, x, batch_first=True)
def test_lstm_batch_second(self):
# input size : 25 output size : 12 minibatch : 30 sequence length : 20
# Test batch_first=False case
layer = DPLSTM(25, 12, 1, batch_first=False)
x = torch.randn(20, 30, 25)
self._check_one_layer(layer, x, batch_first=False)
| 34.326087 | 101 | 0.611146 | [
"Apache-2.0"
] | RosaYen/DP_FL_recreation | DP_FL_recreate/opacus/tests/layers_grad_test.py | 6,316 | Python |
import yaml
class HdcpSource:
def __init__(self, conf_yaml):
f = open(conf_yaml, "r")
conf = yaml.load(f)
f.close()
def process_request(req):
msg_type, msg = req
if __name__ == "__main__":
HdcpSource("yaml/rx1.yaml")
| 14.157895 | 34 | 0.583643 | [
"MIT"
] | imamotts/hdcp_test | src/hdcp_source.py | 269 | Python |
import os
import shutil
import yaml
from mock import patch
from dsl_parser.exceptions import DSLParsingLogicException
from .. import cfy
from ... import env
from ...config import config
from ...commands import init
from .test_base import CliCommandTest
from .constants import BLUEPRINTS_DIR, SAMPLE_INPUTS_PATH, \
DEFAULT_BLUEPRINT_FILE_NAME, SAMPLE_CUSTOM_NAME_ARCHIVE
class InitTest(CliCommandTest):
def test_init_initialized_directory(self):
self.use_manager()
self.invoke(
'cfy init',
err_str_segment='Environment is already initialized')
def test_init_overwrite(self):
# Config values shouldn't change between init resets
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['colors'] = True
f.write(yaml.safe_dump(conf))
cfy.invoke('init -r')
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertTrue(conf['colors'])
def test_init_overwrite_hard(self):
# Config values should change between hard init resets
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['colors'] = True
f.write(yaml.safe_dump(conf))
self.invoke('cfy init -r --hard')
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
self.assertFalse(conf['colors'])
def test_init_overwrite_on_initial_init(self):
# Simply verifying the overwrite flag doesn't break the first init
cfy.purge_dot_cloudify()
self.invoke('cfy init -r')
def test_init_invalid_blueprint_path(self):
self.invoke(
'cfy init idonotexist.yaml',
err_str_segment='You must provide either a path to a local file',
)
def test_init_default_outputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
self.invoke('cfy init {0}'.format(blueprint_path))
cfy.register_commands()
output = self.invoke(
'cfy deployments outputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3", ', output)
self.assertIn(' "param": null, ', output)
self.assertIn(' "custom_param": null, ', output)
self.assertIn(' "provider_context": null', output)
def test_init_default_inputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
command = 'cfy init {0}'.format(blueprint_path)
self.invoke(command)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3"', output)
def test_init_with_inputs(self):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
DEFAULT_BLUEPRINT_FILE_NAME
)
command = 'cfy init {0} -i {1} -i key3=val3'.format(
blueprint_path,
SAMPLE_INPUTS_PATH
)
self.invoke(command)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "val1", ', output)
self.assertIn(' "key2": "val2", ', output)
self.assertIn(' "key3": "val3"', output)
def test_init_validate_definitions_version_false(self):
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['validate_definitions_version'] = False
f.write(yaml.safe_dump(conf))
self.invoke(
'cfy init {0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR)
)
def test_init_validate_definitions_version_true(self):
self.invoke(
'cfy init {0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR),
err_str_segment='description not supported in version',
exception=DSLParsingLogicException
)
@patch('cloudify.workflows.local.init_env')
@patch('cloudify_cli.local._install_plugins')
def test_init_install_plugins(self, install_plugins_mock, *_):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_with_plugins.yaml'
)
command = 'cfy init {0} --install-plugins'.format(blueprint_path)
self.invoke(command)
install_plugins_mock.assert_called_with(blueprint_path=blueprint_path)
@patch('cloudify.workflows.local.init_env')
def test_init_with_empty_requirements(self, *_):
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_without_plugins.yaml'
)
command = 'cfy init {0} --install-plugins'.format(blueprint_path)
self.invoke(command)
def test_init_missing_plugins(self):
# TODO: put back possible solutions
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_with_plugins.yaml'
)
self.invoke(
'cfy init {0}'.format(blueprint_path),
err_str_segment='mapping error: No module named tasks',
exception=ImportError
)
def test_no_init(self):
# make sure no error is thrown
cfy.purge_dot_cloudify()
self.invoke('cfy profiles list')
def test_init_blueprint_archive_default_name(self):
self.invoke(
'cfy init {0}'.format(SAMPLE_CUSTOM_NAME_ARCHIVE),
err_str_segment='Could not find `blueprint.yaml`'
)
def test_init_blueprint_archive(self):
self.invoke(
'cfy init {0} -b local -n simple_blueprint.yaml'
.format(SAMPLE_CUSTOM_NAME_ARCHIVE)
)
cfy.register_commands()
output = self.invoke(
'cfy deployments inputs -b local').logs.split('\n')
self.assertIn(' "key1": "default_val1", ', output)
self.assertIn(' "key2": "default_val2", ', output)
self.assertIn(' "key3": "default_val3"', output)
def test_set_config(self):
shutil.rmtree(env.CLOUDIFY_WORKDIR)
os.makedirs(env.CLOUDIFY_WORKDIR)
self.assertFalse(os.path.isfile(
os.path.join(env.CLOUDIFY_WORKDIR, 'config.yaml')))
init.set_config()
self.assertTrue(os.path.isfile(
os.path.join(env.CLOUDIFY_WORKDIR, 'config.yaml')))
| 33.882629 | 78 | 0.618678 | [
"Apache-2.0"
] | TS-at-WS/cloudify-cli | cloudify_cli/tests/commands/test_init.py | 7,217 | Python |
from django.db import migrations, models
import django_simple_file_handler.models
class Migration(migrations.Migration):
dependencies = [
('django_simple_file_handler', '0002_auto_20180521_1545'),
]
operations = [
migrations.AlterField(
model_name='privatedocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='privatepdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='processedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicdocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='publicpdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarydocument',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='temporarypdf',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
migrations.AlterField(
model_name='unprocessedimage',
name='saved_file',
field=models.FileField(max_length=254, upload_to=django_simple_file_handler.models.create_file_path, verbose_name='uploaded file'),
),
]
| 43.45283 | 143 | 0.671733 | [
"MIT"
] | jonathanrickard/django-simple-file-handler | django_simple_file_handler/migrations/0003_auto_20180525_1035.py | 2,303 | Python |
import atexit
import logging
import multiprocessing
import gc
import os
from sys import exit
import warnings
import click
import dask
from distributed import Nanny, Worker
from distributed.security import Security
from distributed.cli.utils import check_python_3, install_signal_handlers
from distributed.comm import get_address_host_port
from distributed.preloading import validate_preload_argv
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from toolz import valmap
from tornado.ioloop import IOLoop, TimeoutError
from tornado import gen
logger = logging.getLogger("distributed.dask_worker")
pem_file_option_type = click.Path(exists=True, resolve_path=True)
@click.command(context_settings=dict(ignore_unknown_options=True))
@click.argument("scheduler", type=str, required=False)
@click.option(
"--tls-ca-file",
type=pem_file_option_type,
default=None,
help="CA cert(s) file for TLS (in PEM format)",
)
@click.option(
"--tls-cert",
type=pem_file_option_type,
default=None,
help="certificate file for TLS (in PEM format)",
)
@click.option(
"--tls-key",
type=pem_file_option_type,
default=None,
help="private key file for TLS (in PEM format)",
)
@click.option(
"--worker-port",
type=int,
default=0,
help="Serving computation port, defaults to random",
)
@click.option(
"--nanny-port", type=int, default=0, help="Serving nanny port, defaults to random"
)
@click.option(
"--bokeh-port", type=int, default=None, help="Deprecated. See --dashboard-address"
)
@click.option(
"--dashboard-address",
type=str,
default=":0",
help="Address on which to listen for diagnostics dashboard",
)
@click.option(
"--dashboard/--no-dashboard",
"dashboard",
default=True,
required=False,
help="Launch the Dashboard [default: --dashboard]",
)
@click.option(
"--bokeh/--no-bokeh",
"bokeh",
default=None,
help="Deprecated. See --dashboard/--no-dashboard.",
required=False,
)
@click.option(
"--listen-address",
type=str,
default=None,
help="The address to which the worker binds. Example: tcp://0.0.0.0:9000",
)
@click.option(
"--contact-address",
type=str,
default=None,
help="The address the worker advertises to the scheduler for "
"communication with it and other workers. "
"Example: tcp://127.0.0.1:9000",
)
@click.option(
"--host",
type=str,
default=None,
help="Serving host. Should be an ip address that is"
" visible to the scheduler and other workers. "
"See --listen-address and --contact-address if you "
"need different listen and contact addresses. "
"See --interface.",
)
@click.option(
"--interface", type=str, default=None, help="Network interface like 'eth0' or 'ib0'"
)
@click.option(
"--protocol", type=str, default=None, help="Protocol like tcp, tls, or ucx"
)
@click.option("--nthreads", type=int, default=0, help="Number of threads per process.")
@click.option(
"--nprocs",
type=int,
default=1,
show_default=True,
help="Number of worker processes to launch.",
)
@click.option(
"--name",
type=str,
default=None,
help="A unique name for this worker like 'worker-1'. "
"If used with --nprocs then the process number "
"will be appended like name-0, name-1, name-2, ...",
)
@click.option(
"--memory-limit",
default="auto",
show_default=True,
help="Bytes of memory per process that the worker can use. "
"This can be an integer (bytes), "
"float (fraction of total system memory), "
"string (like 5GB or 5000M), "
"'auto', or zero for no memory management",
)
@click.option(
"--reconnect/--no-reconnect",
default=True,
help="Reconnect to scheduler if disconnected [default: --reconnect]",
)
@click.option(
"--nanny/--no-nanny",
default=True,
help="Start workers in nanny process for management [default: --nanny]",
)
@click.option("--pid-file", type=str, default="", help="File to write the process PID")
@click.option(
"--local-directory", default="", type=str, help="Directory to place worker files"
)
@click.option(
"--resources",
type=str,
default="",
help='Resources for task constraints like "GPU=2 MEM=10e9". '
"Resources are applied separately to each worker process "
"(only relevant when starting multiple worker processes with '--nprocs').",
)
@click.option(
"--scheduler-file",
type=str,
default="",
help="Filename to JSON encoded scheduler information. "
"Use with dask-scheduler --scheduler-file",
)
@click.option(
"--death-timeout",
type=str,
default=None,
help="Seconds to wait for a scheduler before closing",
)
@click.option(
"--dashboard-prefix", type=str, default="", help="Prefix for the dashboard"
)
@click.option(
"--lifetime",
type=str,
default="",
help="If provided, shut down the worker after this duration.",
)
@click.option(
"--lifetime-stagger",
type=str,
default="0 seconds",
show_default=True,
help="Random amount by which to stagger lifetime values",
)
@click.option(
"--lifetime-restart/--no-lifetime-restart",
"lifetime_restart",
default=False,
show_default=True,
required=False,
help="Whether or not to restart the worker after the lifetime lapses. "
"This assumes that you are using the --lifetime and --nanny keywords",
)
@click.option(
"--preload",
type=str,
multiple=True,
is_eager=True,
help="Module that should be loaded by each worker process "
'like "foo.bar" or "/path/to/foo.py"',
)
@click.argument(
"preload_argv", nargs=-1, type=click.UNPROCESSED, callback=validate_preload_argv
)
@click.version_option()
def main(
scheduler,
host,
worker_port,
listen_address,
contact_address,
nanny_port,
nthreads,
nprocs,
nanny,
name,
pid_file,
resources,
dashboard,
bokeh,
bokeh_port,
scheduler_file,
dashboard_prefix,
tls_ca_file,
tls_cert,
tls_key,
dashboard_address,
**kwargs
):
g0, g1, g2 = gc.get_threshold() # https://github.com/dask/distributed/issues/1653
gc.set_threshold(g0 * 3, g1 * 3, g2 * 3)
enable_proctitle_on_current()
enable_proctitle_on_children()
if bokeh_port is not None:
warnings.warn(
"The --bokeh-port flag has been renamed to --dashboard-address. "
"Consider adding ``--dashboard-address :%d`` " % bokeh_port
)
dashboard_address = bokeh_port
if bokeh is not None:
warnings.warn(
"The --bokeh/--no-bokeh flag has been renamed to --dashboard/--no-dashboard. "
)
dashboard = bokeh
sec = Security(
**{
k: v
for k, v in [
("tls_ca_file", tls_ca_file),
("tls_worker_cert", tls_cert),
("tls_worker_key", tls_key),
]
if v is not None
}
)
if nprocs > 1 and worker_port != 0:
logger.error(
"Failed to launch worker. You cannot use the --port argument when nprocs > 1."
)
exit(1)
if nprocs > 1 and not nanny:
logger.error(
"Failed to launch worker. You cannot use the --no-nanny argument when nprocs > 1."
)
exit(1)
if contact_address and not listen_address:
logger.error(
"Failed to launch worker. "
"Must specify --listen-address when --contact-address is given"
)
exit(1)
if nprocs > 1 and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when nprocs > 1."
)
exit(1)
if (worker_port or host) and listen_address:
logger.error(
"Failed to launch worker. "
"You cannot specify --listen-address when --worker-port or --host is given."
)
exit(1)
try:
if listen_address:
(host, worker_port) = get_address_host_port(listen_address, strict=True)
if contact_address:
# we only need this to verify it is getting parsed
(_, _) = get_address_host_port(contact_address, strict=True)
else:
# if contact address is not present we use the listen_address for contact
contact_address = listen_address
except ValueError as e:
logger.error("Failed to launch worker. " + str(e))
exit(1)
if nanny:
port = nanny_port
else:
port = worker_port
if not nthreads:
nthreads = multiprocessing.cpu_count() // nprocs
if pid_file:
with open(pid_file, "w") as f:
f.write(str(os.getpid()))
def del_pid_file():
if os.path.exists(pid_file):
os.remove(pid_file)
atexit.register(del_pid_file)
if resources:
resources = resources.replace(",", " ").split()
resources = dict(pair.split("=") for pair in resources)
resources = valmap(float, resources)
else:
resources = None
loop = IOLoop.current()
if nanny:
kwargs.update({"worker_port": worker_port, "listen_address": listen_address})
t = Nanny
else:
if nanny_port:
kwargs["service_ports"] = {"nanny": nanny_port}
t = Worker
if (
not scheduler
and not scheduler_file
and dask.config.get("scheduler-address", None) is None
):
raise ValueError(
"Need to provide scheduler address like\n"
"dask-worker SCHEDULER_ADDRESS:8786"
)
nannies = [
t(
scheduler,
scheduler_file=scheduler_file,
nthreads=nthreads,
loop=loop,
resources=resources,
security=sec,
contact_address=contact_address,
host=host,
port=port,
dashboard_address=dashboard_address if dashboard else None,
service_kwargs={"dashboard": {"prefix": dashboard_prefix}},
name=name if nprocs == 1 or not name else name + "-" + str(i),
**kwargs
)
for i in range(nprocs)
]
@gen.coroutine
def close_all():
# Unregister all workers from scheduler
if nanny:
yield [n.close(timeout=2) for n in nannies]
def on_signal(signum):
logger.info("Exiting on signal %d", signum)
close_all()
@gen.coroutine
def run():
yield nannies
yield [n.finished() for n in nannies]
install_signal_handlers(loop, cleanup=on_signal)
try:
loop.run_sync(run)
except TimeoutError:
# We already log the exception in nanny / worker. Don't do it again.
raise TimeoutError("Timed out starting worker.") from None
except KeyboardInterrupt:
pass
finally:
logger.info("End worker")
def go():
check_python_3()
main()
if __name__ == "__main__":
go()
| 26.997567 | 95 | 0.62518 | [
"BSD-3-Clause"
] | MdSalih/distributed | distributed/cli/dask_worker.py | 11,096 | Python |
# --- Imports --- #
import torch
import torch.nn.functional as F
# --- Perceptual loss network --- #
class LossNetwork(torch.nn.Module):
def __init__(self, vgg_model):
super(LossNetwork, self).__init__()
self.vgg_layers = vgg_model
self.layer_name_mapping = {
'3': "relu1_2",
'8': "relu2_2",
'15': "relu3_3"
}
def output_features(self, x):
output = {}
for name, module in self.vgg_layers._modules.items():
x = module(x)
if name in self.layer_name_mapping:
output[self.layer_name_mapping[name]] = x
return list(output.values())
def forward(self, dehaze, gt):
loss = []
dehaze_features = self.output_features(dehaze)
gt_features = self.output_features(gt)
for dehaze_feature, gt_feature in zip(dehaze_features, gt_features):
loss.append(F.mse_loss(dehaze_feature, gt_feature))
return sum(loss)/len(loss) | 33.366667 | 76 | 0.598402 | [
"MIT"
] | liuh127/NTIRE-2021-Dehazing-Two-branch | perceptual.py | 1,001 | Python |
import heapq
import math
import random as rnd
from functools import partial
from .core import Bag
def sample(population, k):
"""Chooses k unique random elements from a bag.
Returns a new bag containing elements from the population while
leaving the original population unchanged.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.sample(b, 3).compute())
[1, 3, 5]
"""
return _sample(population=population, k=k, replace=False)
def choices(population, k=1):
"""
Return a k sized list of elements chosen with replacement.
Parameters
----------
population: Bag
Elements to sample.
k: integer, optional
Number of elements to sample.
Examples
--------
>>> import dask.bag as db # doctest: +SKIP
... from dask.bag import random
...
... b = db.from_sequence(range(5), npartitions=2)
... list(random.choices(b, 3).compute())
[1, 1, 5]
"""
return _sample(population=population, k=k, replace=True)
def _sample(population, k, replace=False):
return population.reduction(
partial(_sample_map_partitions, k=k, replace=replace),
partial(_sample_reduce, k=k, replace=replace),
out_type=Bag,
)
def _sample_map_partitions(population, k, replace):
"""
Map function used on the sample and choices functions.
Parameters
----------
population : list
List of elements to sample.
k : int, optional
Number of elements to sample. Default is 1.
Returns
-------
sample: list
List of sampled elements from the partition.
lx: int
Number of elements on the partition.
k: int
Number of elements to sample.
"""
lx = len(population)
real_k = k if k <= lx else lx
sample_func = rnd.choices if replace else rnd.sample
# because otherwise it raises IndexError:
sampled = [] if real_k == 0 else sample_func(population=population, k=real_k)
return sampled, lx
def _sample_reduce(reduce_iter, k, replace):
"""
Reduce function used on the sample and choice functions.
Parameters
----------
reduce_iter : iterable
Each element is a tuple coming generated by the _sample_map_partitions function.
Returns a sequence of uniformly distributed samples;
"""
ns_ks = []
s = []
n = 0
# unfolding reduce outputs
for i in reduce_iter:
(s_i, n_i) = i
s.extend(s_i)
n += n_i
k_i = len(s_i)
ns_ks.append((n_i, k_i))
if k < 0 or (k > n and not replace):
raise ValueError("Sample larger than population or is negative")
# creating the probability array
p = []
for n_i, k_i in ns_ks:
if k_i > 0:
p_i = n_i / (k_i * n)
p += [p_i] * k_i
sample_func = rnd.choices if replace else _weighted_sampling_without_replacement
return sample_func(population=s, weights=p, k=k)
def _weighted_sampling_without_replacement(population, weights, k):
"""
Source:
Weighted random sampling with a reservoir, Pavlos S. Efraimidis, Paul G. Spirakis
"""
elt = [(math.log(rnd.random()) / weights[i], i) for i in range(len(weights))]
return [population[x[1]] for x in heapq.nlargest(k, elt)]
| 26.117647 | 89 | 0.623592 | [
"MIT"
] | CDU55/FakeNews | ServerComponent/venv/Lib/site-packages/dask/bag/random.py | 3,552 | Python |
from django.db.models.fields import Field
from django.urls import NoReverseMatch
from django.urls import reverse
from polymorphic.managers import PolymorphicManager
from common.business_rules import NoBlankDescription
from common.business_rules import UpdateValidity
from common.models.mixins.validity import ValidityStartMixin
from common.models.mixins.validity import ValidityStartQueryset
from common.models.records import TrackedModelQuerySet
from common.util import classproperty
class DescriptionQueryset(ValidityStartQueryset, TrackedModelQuerySet):
pass
class DescriptionMixin(ValidityStartMixin):
objects = PolymorphicManager.from_queryset(DescriptionQueryset)()
business_rules = (
NoBlankDescription,
UpdateValidity,
)
@classproperty
def described_object_field(cls) -> Field:
for rel in cls.relations.keys():
if rel.name.startswith("described_"):
return rel
raise TypeError(f"{cls} should have a described field.")
@classproperty
def validity_over(cls):
return cls.described_object_field.name
def get_described_object(self):
return getattr(self, self.described_object_field.name)
def get_url(self, action="detail"):
kwargs = {}
if action != "list":
kwargs = self.get_identifying_fields()
described_object = self.get_described_object()
for field, value in described_object.get_identifying_fields().items():
kwargs[f"{self.described_object_field.name}__{field}"] = value
try:
return reverse(
f"{self.get_url_pattern_name_prefix()}-ui-{action}",
kwargs=kwargs,
)
except NoReverseMatch:
return
def __str__(self):
return self.identifying_fields_to_string(
identifying_fields=(
self.described_object_field.name,
"validity_start",
),
)
class Meta:
abstract = True
| 31.307692 | 82 | 0.679607 | [
"MIT"
] | uktrade/tamato | common/models/mixins/description.py | 2,035 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pip install nvidia-ml-py3 --user
import pynvml
try:
pynvml.nvmlInit()
except pynvml.NVMLError as error:
print(error)
# Driver Not Loaded 驱动加载失败(没装驱动或者驱动有问题)
# Insufficent Permission 没有以管理员权限运行 pynvml.NVMLError_DriverNotLoaded: Driver Not Loaded
exit()
try:
print(pynvml.nvmlDeviceGetCount())
except pynvml.NVMLError as error:
print(error)
print(pynvml.nvmlDeviceGetCount())# total gpu count = 1
print(pynvml.nvmlSystemGetDriverVersion()) # 396.54
GPU_ID = 0
handle = pynvml.nvmlDeviceGetHandleByIndex(GPU_ID)
print(pynvml.nvmlDeviceGetName(handle)) # GeForce GTX 1060
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
MB_SIZE = 1024*1024
print(meminfo.total/MB_SIZE) # 6078 MB
print(meminfo.used/MB_SIZE) # 531 MB
print(meminfo.free/MB_SIZE) # 5546 MB
pynvml.nvmlShutdown()
| 25.441176 | 92 | 0.746821 | [
"MIT"
] | forwardmeasure/kubeflow | setup/nvidia/nvml-test.py | 919 | Python |
# Time: O(n * n!/(c_a!*...*c_z!), n is the length of A, B,
# c_a...c_z is the count of each alphabet,
# n = sum(c_a...c_z)
# Space: O(n * n!/(c_a!*...*c_z!)
# 854
# Strings A and B are K-similar (for some non-negative integer K)
# if we can swap the positions of two letters
# in A exactly K times so that the resulting string equals B.
#
# Given two anagrams A and B, return the smallest K for which A and B are
# K-similar.
#
# Example 1:
#
# Input: A = "ab", B = "ba"
# Output: 1
# Example 2:
#
# Input: A = "abc", B = "bca"
# Output: 2
# Example 3:
#
# Input: A = "abac", B = "baca"
# Output: 2
# Example 4:
#
# Input: A = "aabc", B = "abca"
# Output: 2
# Note:
# - 1 <= A.length == B.length <= 20
# - A and B contain only lowercase letters from
# the set {'a', 'b', 'c', 'd', 'e', 'f'}
# Solution Framework:
# The underlying graph of the problem is a graph with 6 nodes 'a', 'b', ..., 'f' and the edges A[i] -> B[i].
# Our goal is for this graph to have only self-edges (edges of the form a -> a.)
# If A = 'ca...' and B = 'ab...', then the first two edges of the underlying graph are c -> a and a -> b;
# and a swap between A[1] and A[0] changes these two edges to the single edge c -> b. Let's call this type
# of operation 'cutting corners'. Intuitively, our optimal swap schedule always increases the # of matches
# (A[i] == B[i]s) for each swap, so cutting corners is the only type of operation we need to consider.
# (This is essentially the happy swap assumption, proved in 765 - Couples Holding Hands)
#
# Now consider 'cycle decomposition' of the underlying graph. [This decomposition (or the # of cycles),
# is not necessarily unique.] Through operations of cutting corners, we'll delete all the (non-self) edges.
# Each cycle of length k requires k-1 operations to delete. Thus, the answer is just the minimum possible
# value of sum(C_k - 1), where C_1,... C_k are the lengths of the cycles in some cycle decomposition of
# the underlying graph. This can be re-written as (# of non-self edges) - (# of cycles).
# Hence, we want to maximize the # of cycles in a cycle decomposition of the underlying graph.
import collections
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Solution(object):
def kSimilarity(self, A, B):
"""
:type A: str
:type B: str
:rtype: int
"""
# Perform a regular breadth-first search: the neighbors to each node string S are all the strings
# reachable with 1 swap to get the first unmatched character in S matched.
# we can prove that an optimal solution swaps the left-most unmatched character A[i] with an
# appropriate match A[j] which equals to B[i] (j > i), as this increases # of self-edges.
# Time complexity: This reduces the # of "neighbors" of a node (string state) from O(N^2) to O(N):
# O(N^2): swap any pair of chars in the string,
# O(N): only swap the first unmatched char.
def neighbors(s):
for i, c in enumerate(s):
if c != B[i]:
break
t = list(s)
for j in xrange(i+1, len(s)):
if t[j] == B[i]:
t[i], t[j] = t[j], t[i]
yield "".join(t)
t[j], t[i] = t[i], t[j]
q = collections.deque([A])
steps = {A:0} # we need a set to remove repeatedness anyway, so put 'steps' together
while q:
s = q.popleft()
if s == B:
return steps[s]
for t in neighbors(s):
if t not in steps:
steps[t] = steps[s] + 1
q.append(t)
print(Solution().kSimilarity('abac', 'baca')) | 39.309278 | 108 | 0.586415 | [
"MIT"
] | RideGreg/LeetCode | Python/k-similar-strings.py | 3,813 | Python |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import subprocess
from ..utils.scripts import make_path
from ..utils.testing import Checker
from .obs_table import ObservationTable
from .hdu_index_table import HDUIndexTable
from .obs_table import ObservationTableChecker
from .observations import DataStoreObservation, Observations, ObservationChecker
__all__ = ["DataStore"]
log = logging.getLogger(__name__)
class DataStore(object):
"""IACT data store.
The data selection and access happens using an observation
and an HDU index file as described at :ref:`gadf:iact-storage`.
See :gp-extra-notebook:`cta_1dc_introduction` for usage examples.
Parameters
----------
hdu_table : `~gammapy.data.HDUIndexTable`
HDU index table
obs_table : `~gammapy.data.ObservationTable`
Observation index table
Examples
--------
Here's an example how to create a `DataStore` to access H.E.S.S. data:
>>> from gammapy.data import DataStore
>>> data_store = DataStore.from_dir('$GAMMAPY_DATA/hess-dl3-dr1')
>>> data_store.info()
"""
DEFAULT_HDU_TABLE = "hdu-index.fits.gz"
"""Default HDU table filename."""
DEFAULT_OBS_TABLE = "obs-index.fits.gz"
"""Default observation table filename."""
def __init__(self, hdu_table=None, obs_table=None):
self.hdu_table = hdu_table
self.obs_table = obs_table
def __str__(self):
return self.info(show=False)
@classmethod
def from_file(cls, filename, hdu_hdu="HDU_INDEX", hdu_obs="OBS_INDEX"):
"""Create from a FITS file.
The FITS file must contain both index files.
Parameters
----------
filename : str, Path
FITS filename
hdu_hdu : str or int
FITS HDU name or number for the HDU index table
hdu_obs : str or int
FITS HDU name or number for the observation index table
"""
filename = make_path(filename)
hdu_table = HDUIndexTable.read(filename, hdu=hdu_hdu, format="fits")
obs_table = ObservationTable.read(filename, hdu=hdu_obs, format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_dir(cls, base_dir, hdu_table_filename=None, obs_table_filename=None):
"""Create from a directory.
Parameters
----------
base_dir : str, Path
Base directory of the data files.
hdu_table_filename : str, Path
Filename of the HDU index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
obs_table_filename : str, Path
Filename of the observation index file. May be specified either relative
to `base_dir` or as an absolute path. If None, the default filename
will be looked for.
"""
base_dir = make_path(base_dir)
if hdu_table_filename:
hdu_table_filename = make_path(hdu_table_filename)
if (base_dir / hdu_table_filename).exists():
hdu_table_filename = base_dir / hdu_table_filename
else:
hdu_table_filename = base_dir / cls.DEFAULT_HDU_TABLE
if obs_table_filename:
obs_table_filename = make_path(obs_table_filename)
if (base_dir / obs_table_filename).exists():
obs_table_filename = base_dir / obs_table_filename
else:
obs_table_filename = base_dir / cls.DEFAULT_OBS_TABLE
if not hdu_table_filename.exists():
raise IOError("File not found: {}".format(hdu_table_filename))
log.debug("Reading {}".format(hdu_table_filename))
hdu_table = HDUIndexTable.read(str(hdu_table_filename), format="fits")
hdu_table.meta["BASE_DIR"] = str(base_dir)
if not obs_table_filename.exists():
raise IOError("File not found: {}".format(obs_table_filename))
log.debug("Reading {}".format(str(obs_table_filename)))
obs_table = ObservationTable.read(str(obs_table_filename), format="fits")
return cls(hdu_table=hdu_table, obs_table=obs_table)
@classmethod
def from_config(cls, config):
"""Create from a config dict."""
base_dir = config["base_dir"]
hdu_table_filename = config.get("hduindx", cls.DEFAULT_HDU_TABLE)
obs_table_filename = config.get("obsindx", cls.DEFAULT_OBS_TABLE)
hdu_table_filename = cls._find_file(hdu_table_filename, base_dir)
obs_table_filename = cls._find_file(obs_table_filename, base_dir)
return cls.from_files(
base_dir=base_dir,
hdu_table_filename=hdu_table_filename,
obs_table_filename=obs_table_filename,
)
@staticmethod
def _find_file(filename, dir):
"""Find a file at an absolute or relative location.
- First tries ``Path(filename)``
- Second tries ``Path(dir) / filename``
- Raises ``OSError`` if both don't exist.
"""
path1 = make_path(filename)
path2 = make_path(dir) / filename
if path1.is_file():
filename = path1
elif path2.is_file():
filename = path2
else:
raise OSError("File not found at {} or {}".format(path1, path2))
return filename
def info(self, show=True):
"""Print some info."""
s = "Data store:\n"
s += self.hdu_table.summary()
s += "\n\n"
s += self.obs_table.summary()
if show:
print(s)
else:
return s
def obs(self, obs_id):
"""Access a given `~gammapy.data.DataStoreObservation`.
Parameters
----------
obs_id : int
Observation ID.
Returns
-------
observation : `~gammapy.data.DataStoreObservation`
Observation container
"""
return DataStoreObservation(obs_id=int(obs_id), data_store=self)
def get_observations(self, obs_id, skip_missing=False):
"""Generate a `~gammapy.data.Observations`.
Parameters
----------
obs_id : list
Observation IDs.
skip_missing : bool, optional
Skip missing observations, default: False
Returns
-------
observations : `~gammapy.data.Observations`
Container holding a list of `~gammapy.data.DataStoreObservation`
"""
obs_list = []
for _ in obs_id:
try:
obs = self.obs(_)
except ValueError as err:
if skip_missing:
log.warning("Skipping missing obs_id: {!r}".format(_))
continue
else:
raise err
else:
obs_list.append(obs)
return Observations(obs_list)
def copy_obs(self, obs_id, outdir, hdu_class=None, verbose=False, overwrite=False):
"""Create a new `~gammapy.data.DataStore` containing a subset of observations.
Parameters
----------
obs_id : array-like, `~gammapy.data.ObservationTable`
List of observations to copy
outdir : str, Path
Directory for the new store
hdu_class : list of str
see :attr:`gammapy.data.HDUIndexTable.VALID_HDU_CLASS`
verbose : bool
Print copied files
overwrite : bool
Overwrite
"""
# TODO : Does rsync give any benefits here?
outdir = make_path(outdir)
if isinstance(obs_id, ObservationTable):
obs_id = obs_id["OBS_ID"].data
hdutable = self.hdu_table
hdutable.add_index("OBS_ID")
with hdutable.index_mode("discard_on_copy"):
subhdutable = hdutable.loc[obs_id]
if hdu_class is not None:
subhdutable.add_index("HDU_CLASS")
with subhdutable.index_mode("discard_on_copy"):
subhdutable = subhdutable.loc[hdu_class]
subobstable = self.obs_table.select_obs_id(obs_id)
for idx in range(len(subhdutable)):
# Changes to the file structure could be made here
loc = subhdutable.location_info(idx)
targetdir = outdir / loc.file_dir
targetdir.mkdir(exist_ok=True, parents=True)
cmd = ["cp", "-v"] if verbose else ["cp"]
if not overwrite:
cmd += ["-n"]
cmd += [str(loc.path()), str(targetdir)]
subprocess.call(cmd)
filename = str(outdir / self.DEFAULT_HDU_TABLE)
subhdutable.write(filename, format="fits", overwrite=overwrite)
filename = str(outdir / self.DEFAULT_OBS_TABLE)
subobstable.write(filename, format="fits", overwrite=overwrite)
def check(self, checks="all"):
"""Check index tables and data files.
This is a generator that yields a list of dicts.
"""
checker = DataStoreChecker(self)
return checker.run(checks=checks)
class DataStoreChecker(Checker):
"""Check data store.
Checks data format and a bit about the content.
"""
CHECKS = {
"obs_table": "check_obs_table",
"hdu_table": "check_hdu_table",
"observations": "check_observations",
"consistency": "check_consistency",
}
def __init__(self, data_store):
self.data_store = data_store
def check_obs_table(self):
"""Checks for the observation index table."""
checker = ObservationTableChecker(self.data_store.obs_table)
for record in checker.run():
yield record
def check_hdu_table(self):
"""Checks for the HDU index table."""
t = self.data_store.hdu_table
m = t.meta
if m.get("HDUCLAS1", "") != "INDEX":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS1=INDEX",
}
if m.get("HDUCLAS2", "") != "HDU":
yield {
"level": "error",
"hdu": "hdu-index",
"msg": "Invalid header key. Must have HDUCLAS2=HDU",
}
# Check that all HDU in the data files exist
for idx in range(len(t)):
location_info = t.location_info(idx)
try:
location_info.get_hdu()
except KeyError:
yield {
"level": "error",
"msg": "HDU not found: {!r}".format(location_info.__dict__),
}
def check_consistency(self):
"""Consistency checks between multiple HDUs"""
# obs and HDU index should have the same OBS_ID
obs_table_obs_id = set(self.data_store.obs_table["OBS_ID"])
hdu_table_obs_id = set(self.data_store.hdu_table["OBS_ID"])
if not obs_table_obs_id == hdu_table_obs_id:
yield {
"level": "error",
"msg": "Inconsistent OBS_ID in obs and HDU index tables",
}
# TODO: obs table and events header should have the same times
def check_observations(self):
"""Perform some sanity checks for all observations."""
for obs_id in self.data_store.obs_table["OBS_ID"]:
obs = self.data_store.obs(obs_id)
for record in ObservationChecker(obs).run():
yield record
| 33.871345 | 87 | 0.601865 | [
"BSD-3-Clause"
] | qpiel/gammapy | gammapy/data/data_store.py | 11,584 | Python |
from typing import Optional
import django_admin_relation_links
from adminutils import options
from authtools import admin as authtools_admin
from django.contrib import admin
from enumfields.admin import EnumFieldListFilter
from rangefilter.filter import DateRangeFilter
from solo.admin import SingletonModelAdmin
from eahub.base import models
from eahub.base.models import User
from eahub.profiles.models import Profile
@admin.register(models.User)
class UserAdmin(
django_admin_relation_links.AdminChangeLinksMixin, authtools_admin.UserAdmin
):
list_select_related = ["profile"]
list_display = [
"is_active",
"email",
"profile_link",
"is_profile_approved",
"date_joined",
"last_login",
"is_superuser",
"is_staff",
"get_visibility",
]
change_links = ["profile"]
list_filter = [
"is_superuser",
"is_staff",
"is_active",
"profile__is_approved",
("profile__visibility", EnumFieldListFilter),
("date_joined", DateRangeFilter),
("last_login", DateRangeFilter),
]
search_fields = ["email", "profile__first_name", "profile__last_name"]
@options(desc="Approved", boolean=True)
def is_profile_approved(self, user) -> Optional[bool]:
profile = get_profile(user)
if profile is None:
return None
return profile.is_approved
@options(desc="Visibility")
def get_visibility(self, user) -> str:
profile = get_profile(user)
if profile is None:
return ""
return profile.visibility.value
def get_profile(user: User) -> Optional[Profile]:
try:
return user.profile
except Profile.DoesNotExist:
return None
@admin.register(models.MessagingLog)
class MessagingLogAdmin(admin.ModelAdmin):
list_display = [
"sender_email",
"recipient_email",
"recipient_type",
"send_action_uuid",
"time",
]
list_filter = [
"recipient_type",
("time", DateRangeFilter),
]
search_fields = ["sender", "recipient"]
admin.site.register(models.FeedbackURLConfig, SingletonModelAdmin)
| 26.433735 | 80 | 0.667274 | [
"MIT"
] | rtcharity/eahub.org | eahub/base/admin.py | 2,194 | Python |
class Pessoa():
def criar_lista(self, n, id_ade, sexo, saude):
lista = []
qunt = int(input('quantas pessoas são: '))
for c in range(qunt):
n = input('digite seu nome: ')
lista.append(n)
idade = int(input('digite sua idade: '))
while idade < 0:
print('idade invalida, digite novamente:')
idade = int(input('digite sua idade: '))
sexo = input('digite seu sexo: F ou M ').upper()
while sexo != 'F' and sexo != 'M':
print('sexo invalido, digite novamente: ')
sexo = input('digite seu sexo: F ou M ').upper()
saude = input('diga como está sua saúde: boa ou ruim ').upper()
while saude != 'BOA' and saude != 'RUIM':
print('opção de saude invalida. Digite novamente')
saude = input('diga como está sua saúde: boa ou ruim ').upper()
if idade < 18:
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif sexo == 'F':
print('voce nao está apta a cumprir o serviço militar obrigatório!')
continue
elif saude == 'RUIM':
print('voce nao está apta a cumprir o serviço militar obrigatório! Cuide da sua saude primeiro.')
continue
else:
print('parabens, voce ESTÁ apta a cumprir o serviço militar obrigatório!')
pessoa = Pessoa()
pessoa.criar_lista(None, int, None, None)
| 39.875 | 113 | 0.523511 | [
"MIT"
] | Felipe-Gs/Dupla-2-F | ex3.py | 1,614 | Python |
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from tcms.core.utils import string_to_list
from tcms.core.forms.fields import UserField
from tcms.management.models import Product, Version, Build
from tcms.testplans.models import TestPlan
from tcms.testcases.models import TestCase
# =========== Forms for search/filter ==============
class SearchProductForm(forms.Form):
"""
Includes *only* fields used in search.html b/c
the actual search is now done via JSON RPC.
"""
name_product = forms.CharField(label='Product', max_length=100, required=False) | 34.25 | 83 | 0.734307 | [
"MIT"
] | YangKaiting/kiwitcms-telemetry-failed-test-cases | telemetryPlugin/forms.py | 685 | Python |
# pylint: disable=no-member
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
from playthrough-bot.models import ModelBase, get_engine
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
config.set_main_option("sqlalchemy.url", str(get_engine().url))
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = ModelBase.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
render_as_batch=config.get_main_option("sqlalchemy.url").startswith(
"sqlite:"
),
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 26.885057 | 80 | 0.70714 | [
"MIT"
] | Rain288/playthrough-bot | src/playthrough-bot/models/alembic/env.py | 2,339 | Python |
"""
Experimemtal code for trimming primers & polyA tails from high error rate long reads
"""
import os, sys, pdb
from csv import DictWriter
from collections import namedtuple
from multiprocessing import Process
from Bio.Seq import Seq
from Bio import SeqIO
import parasail
ScoreTuple = namedtuple('ScoreTuple', ['score5', 'end5', 'score3', 'end3', 'endA'])
# for ONT using Clontech
#SEQ_5P = 'AAGCAGTGGTATCAACGCAGAGTACATGGGG'
#SEQ_3P_REV = 'GTATCAACGCAGAGTAC'
ISOSEQ_5P = 'GCAATGAAGTCGCAGGGTTGGG'
ISOSEQ_3P = 'GTACTCTGCGTTGATACCACTGCTT'
#SEQ_5P = 'GCAATGAAGTCGCAGGGTTGGGG'
#SEQ_5P = 'CAGGAAACAGCTATGACC'
#SEQ_3P_REV = 'AAGCAGTGGTATCAACGCAGAGTAC'
#SEQ_3P_REV = 'ACTGGCCGTCGTTTTAC'
MINSCORE_5P = 20
MINSCORE_3P = 20
MIN_A_LEN = 20
SCOREMAT = parasail.matrix_create("ACGT", 2, -5)
def trim5p3p_helper(r, seq_5p, seq_3p_rev):
"""
Search for 5' and 3' in the first and last 100 bp window
"""
s1 = str(r.seq[:100])
s2 = str(r.reverse_complement().seq[:100])
o1 = parasail.sg_qx_trace(s1, seq_5p, 3, 1, SCOREMAT)
o2 = parasail.sg_qe_db_trace(s2, seq_3p_rev, 3, 1, SCOREMAT)
lenA = None
if o2.score >= MINSCORE_3P:
lenA = trimA(s2[o2.end_query + 1:])
if MIN_A_LEN == 0:
end3 = len(r.seq) - o2.end_query - 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
elif lenA is not None:
end3 = len(r.seq) - o2.end_query - 1
endA = end3 - lenA + 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=endA)
else:
end3 = len(r.seq) - o2.end_query - 1
return ScoreTuple(score5=o1.score, end5=o1.end_query, score3=o2.score, end3=end3, endA=end3)
def trimA(rev_seq):
if len(rev_seq) == 0:
return None
n_rev_seq = len(rev_seq)
mismatch = 0
i = 0
while mismatch < 2 and i < n_rev_seq:
if rev_seq[i]!='T':
mismatch += 1
i += 1
i -= 1
if i >= MIN_A_LEN:
return i
else:
return None
def trim5p3p_multithreaded(fastq_filename, output_prefix, seq_5p, seq_3p_rev, chunks):
# first figure out how many records there are and record positions
num_lines = 0
for line in open(fastq_filename, 'r'): num_lines += 1
num_records = num_lines // 4
chunk_size = (num_records//chunks) + (num_records%chunks>0)
print("{0} has {1} records, {2} per chunk".format(fastq_filename, num_records, chunk_size))
pools = []
records = []
count = 0
i = 1
for r in SeqIO.parse(open(fastq_filename), 'fastq'):
count += 1
records.append(r)
if count >= chunk_size:
p = Process(target=trim5p3p, args=(records, output_prefix+'.'+str(i), seq_5p, seq_3p_rev))
p.start()
print("Starting worker {0}...".format(i))
pools.append(p)
records = []
count = 0
i += 1
p = Process(target=trim5p3p, args=(records, output_prefix + '.' + str(i), seq_5p, seq_3p_rev))
p.start()
print("Starting worker {0}...".format(i))
pools.append(p)
for p in pools:
p.join()
# now combine all the files
f_FL = open(output_prefix+'.fl.fasta', 'w')
f_FL_clips = open(output_prefix+'.fl.clips', 'w')
f_nFL = open(output_prefix+'.nfl.fasta', 'w')
f_csv = open(output_prefix+'.csv', 'w')
for j in range(1, i+1):
p = output_prefix + '.' + str(j)
with open(p + '.fl.fasta') as h:
f_FL.write(h.read())
print("writing {0} into {1}...".format(h.name, f_FL.name))
with open(p + '.fl.clips') as h:
f_FL_clips.write(h.read())
print("writing {0} into {1}...".format(h.name, f_FL_clips.name))
with open(p + '.nfl.fasta') as h:
f_nFL.write(h.read())
print("writing {0} into {1}...".format(h.name, f_nFL.name))
with open(p + '.csv') as h:
f_csv.write(h.read())
print("writing {0} into {1}...".format(h.name, f_csv.name))
os.remove(p + '.fl.fasta')
os.remove(p + '.fl.clips')
os.remove(p + '.nfl.fasta')
os.remove(p + '.csv')
f_csv.close()
f_FL.close()
f_FL_clips.close()
f_nFL.close()
def trim5p3p(records, output_prefix, seq_5p, seq_3p_rev):
f_FL = open(output_prefix+'.fl.fasta', 'w')
f_FL_clips = open(output_prefix+'.fl.clips', 'w')
f_nFL = open(output_prefix+'.nfl.fasta', 'w')
f_csv = open(output_prefix+'.csv', 'w')
writer = DictWriter(f_csv, fieldnames=['id', 'end5', 'end3', 'endA', 'strand'])
writer.writeheader()
for r in records:
r2 = r.reverse_complement()
r2.id = r.id
t1 = trim5p3p_helper(r, seq_5p, seq_3p_rev)
t2 = trim5p3p_helper(r2, seq_5p, seq_3p_rev)
is_fl_flag1 = t1.score5 >= MINSCORE_5P and t1.score3 >= MINSCORE_3P and (MIN_A_LEN == 0 or t1.endA!=t1.end3)
is_fl_flag2 = t2.score5 >= MINSCORE_5P and t2.score3 >= MINSCORE_3P and (MIN_A_LEN == 0 or t2.endA!=t2.end3)
if is_fl_flag1:
if is_fl_flag2:
if t1.score5+t1.score3 > t2.score5+t2.score3:
strand = '+'
else:
strand = '-'
else: # pick t1
strand = '+'
elif is_fl_flag2:
strand = '-'
else:
strand = 'NA'
info = {'id': r.id,
'end5': 'NA',
'end3': 'NA',
'endA': 'NA',
'strand': 'NA'}
if strand == '+':
info['strand'] = '+'
info['end5'] = t1.end5
info['end3'] = t1.end3
info['endA'] = t1.endA
f_FL.write(">{0}\n{1}\n".format(r.id, r.seq[t1.end5:t1.endA]))
f_FL_clips.write(">{0}_5p strand:+ score:{1}\n{2}\n".format(r.id, t1.score5, r.seq[:t1.end5]))
f_FL_clips.write(">{0}_3p strand:+ score:{1}\n{2}\n".format(r.id, t1.score3, r.seq[t1.endA:]))
elif strand == '-':
info['strand'] = '-'
info['end5'] = t2.end5
info['end3'] = t2.end3
info['endA'] = t2.endA
f_FL.write(">{0}\n{1}\n".format(r2.id, r2.seq[t2.end5:t2.endA]))
f_FL_clips.write(">{0}_5p strand:- score:{1}\n{2}\n".format(r.id, t2.score5, r2.seq[:t2.end5]))
f_FL_clips.write(">{0}_3p strand:- score:{1}\n{2}\n".format(r.id, t2.score3, r2.seq[t2.endA:]))
else:
# non-fL, but we still wanna trim away the stuff
if t1.score5+t1.score3 > t2.score5+t2.score3:
f_nFL.write(">{0} strand:+?\n{1}\n".format(r.id, r.seq[t1.end5:t1.endA]))
else:
f_nFL.write(">{0} strand:-?\n{1}\n".format(r2.id, r2.seq[t2.end5:t2.endA]))
writer.writerow(info)
f_csv.close()
f_FL.close()
f_FL_clips.close()
f_nFL.close()
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("fastq_filename")
parser.add_argument("output_prefix")
parser.add_argument("-p", "--primer_fasta", default=None, help="Primer fasta file (if not given, use IsoSeq defaults)")
parser.add_argument("-n", "--chunks", default=10, type=int, help="Number of chunks (CPUs) to use, default 10")
args = parser.parse_args()
if args.primer_fasta is None:
seq_5p = ISOSEQ_5P
seq_3p = ISOSEQ_3P
print(f"Using Iso-Seq default 5' primer sequence: {seq_5p}")
print(f"Using Iso-Seq default 3' primer sequence: {seq_3p}")
else:
reader = SeqIO.parse(open(args.primer_fasta), 'fasta')
r = next(reader)
if r.seqid!='5p':
print("ERROR: the first entry in {0} should be >5p! Abort!".format(args.primer_fasta))
sys.exit(-1)
seq_5p = str(r.seq)
r = next(reader)
if r.seqid!='3p':
print("ERROR: the second entry in {0} should be >3p! Abort!".format(args.primer_fasta))
sys.exit(-1)
seq_3p = str(r.seq)
print(f"Reading in 5' primer sequence: {seq_5p}")
print(f"Reading in 3' primer sequence: {seq_3p}")
seq_3p_rev = str(Seq(seq_3p).reverse_complement())
trim5p3p_multithreaded(args.fastq_filename, args.output_prefix, seq_5p, seq_3p_rev, args.chunks)
| 36.344978 | 123 | 0.58104 | [
"BSD-3-Clause-Clear"
] | ArthurDondi/cDNA_Cupcake | beta/trim_primers.py | 8,323 | Python |
#!/usr/bin/env python
"""
Tests for ohsome client
"""
import os
import pandas as pd
from nose.tools import raises
import geojson
import geopandas as gpd
import ohsome
@raises(ohsome.OhsomeException)
def test_handle_multiple_responses_throw_timeouterror():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClientParallel()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values, timeout=2)
del client
def test_elements_count():
"""
Tests counting elements within a bounding box for two timestamps
:return:
"""
# GIVEN
bboxes = [8.67066,49.41423,8.68177,49.4204]
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
del client
# THEN
assert expected.equals(result)
def test_elements_count_group_by_key():
"""
Tests counting elements within a bounding box and grouping them by keys
:return:
"""
#GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
groupByKeys = ["building"]
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z", "2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [482.0, 628.0, 53.0, 256.0]
keys = ["remainder", "remainder", "building", "building"]
expected = pd.DataFrame({"key": keys, "timestamp": timestamps, "value": counts})
expected.set_index(["key", "timestamp"], inplace=True)
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.key.post(bboxes=bboxes, groupByKeys=groupByKeys, time=time)
results = response.as_dataframe()
# THEN
assert expected.equals(results)
def test_elemets_count_ratio():
"""
Tests count ratio
:return:
"""
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["building"]
keys2 = ["addr:city"]
values = [""]
values2 = [""]
expected = 365.0
client = ohsome.OhsomeClient()
response = client.elements.count.ratio.post(bboxes=bboxes, time=time, keys=keys, keys2=keys2,
values=values, values2=values2)
#results = response.as_dataframe()
# Cache is disabled
"""
def test_use_cache_dir():
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2018-01-01/P1Y"
keys = ["building"]
values = [""]
cache_dir = "./tmp"
timestamps = ["2010-01-01T00:00:00Z", "2011-01-01T00:00:00Z"]
counts = [53.0, 256.0]
expected = pd.DataFrame({"timestamp": timestamps, "value": counts})
# WHEN
client = ohsome.OhsomeClient(cache_dir=cache_dir)
assert os.path.exists(cache_dir)
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_dataframe()
#del client
"""
@raises(AssertionError)
def test_elements_count_exception():
"""
Tests whether a TypeError is raised if the result cannot be converted to a geodataframe object
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01/2011-01-01/P1Y"
keys = ["building"]
values = [""]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.post(bboxes=bboxes, time=time, keys=keys, values=values)
response.as_geodataframe()
def test_elements_geometry():
"""
Tests whether the result of an elements/geometry query can be converted to a geodataframe
:return:
"""
# GIVEN
bboxes = "8.67066,49.41423,8.68177,49.4204"
time = "2010-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bboxes=bboxes, time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 9
def test_to_file_assert_filetype():
"""
Asserts whether an error is thrown if the output file is not json or geojson
:return:
"""
output_file = "./out.shp"
def test_format_coordinates():
"""
Asserts that coordinates of a MultiPolygon are concerted correctly
:return:
"""
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"geometry": {"coordinates": [[[[13,51], [13,51.1], [13.1,51.1], [13.1,51], [13,51]],
[[13,51], [14,51.1], [14.1,51.1], [14.1,51], [14,51]]]],
"type": "MultiPolygon"}}])
time = "2018-01-01"
keys = ["landuse"]
values = ["grass"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.geometry.post(bpolys=ohsome.format_coordinates(bpolys), time=time, keys=keys, values=values)
result = response.as_geodataframe()
del client
# THEN
assert len(result.geometry) == 74
def test_format_geodataframe():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
time = "2018-01-01"
keys = ["amenity"]
values = [""]
format = "geojson"
properties = ["tags", "metadata"]
# WHEN
client = ohsome.OhsomeClient()
response = client.elements.count.groupBy.boundary.post(bpolys=bpolys_df, time=time, keys=keys, values=values,
format=format, properties=properties)
result = response.as_geodataframe()
del client
# THEN
assert result["value"][0] == 538
def test_parallel_user():
# GIVEN
bpolys = geojson.FeatureCollection([{"type": "Feature",
"properties": {"id": 0},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}},
{"type": "Feature",
"properties": {"id": 1},
"geometry": {"coordinates": [
[[[13, 51], [13, 51.1], [13.1, 51.1], [13.1, 51], [13, 51]]],
[[[14, 51], [14, 51.1], [14.1, 51.1], [14.1, 51], [14, 51]]]],
"type": "MultiPolygon"}}
])
bpolys_df = gpd.GeoDataFrame().from_features(bpolys)
timeperiod = "2017-01-01,2018-01-01"
keys = ["amenity"]
values = [""]
format = "json"
properties = ["metadata"]
# WHEN
client = ohsome.OhsomeClientParallel(chunksize=1)
response = client.users.count.groupBy.boundary.post(bpolys=bpolys_df, time=timeperiod, keys=keys, values=values,
format=format, properties=properties)
result = response.as_dataframe()
del client
# THEN
assert result["value"][0] == 33.
| 32.628 | 125 | 0.55388 | [
"BSD-3-Clause"
] | redfrexx/osm_association_rules | src/ohsome/tests/test_ohsome_client.py | 8,157 | Python |
import os, sys, glob, argparse
import logging
import types
from collections import OrderedDict
import torch
import torch.nn.functional as F
import utils
import models
import main as entry
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
def export_onnx(args):
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
print("load pretrained from %s" % args.old)
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
print("load pretrained ==> last epoch: %d" % checkpoint.get('epoch', 0))
print("load pretrained ==> last best_acc: %f" % checkpoint.get('best_acc', 0))
print("load pretrained ==> last learning_rate: %f" % checkpoint.get('learning_rate', 0))
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None))
except RuntimeError:
print("Loading pretrained model failed")
else:
print("no pretrained file exists({}), init model with default initlizer".
format(args.old))
onnx_model = torch.nn.Sequential(OrderedDict([
('network', model),
('softmax', torch.nn.Softmax()),
]))
onnx_path = "onnx/" + model_name
if not os.path.exists(onnx_path):
os.makedirs(onnx_path)
onnx_save = onnx_path + "/" + model_name + '.onnx'
input_names = ["input"]
dummy_input = torch.zeros((1, 3, args.input_size, args.input_size))
output_names = ['prob']
torch.onnx.export(
onnx_model,
dummy_input,
onnx_save,
verbose=True,
input_names=input_names,
output_names=output_names,
opset_version=7,
keep_initializers_as_inputs=True
)
def inference(args):
from models.quant import custom_conv
def init(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,
args=None, force_fp=False, feature_stride=1):
super(custom_conv, self).__init__(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.args = args
self.force_fp = True
custom_conv.__init__ = init
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
def forward(self, x):
print(x.shape, self.weight.shape, self.kernel_size, self.stride, self.padding, self.dilation, self.groups)
output = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return output
for m in model.modules():
if isinstance(m, torch.nn.Conv2d):
m.forward = types.MethodType(forward, m)
input = torch.rand(1, 3, args.input_size, args.input_size)
model.forward(input)
def get_parameter():
parser = entry.get_parser()
parser.add_argument('--old', type=str, default='')
parser.add_argument('--new', type=str, default='')
parser.add_argument('--mapping_from', '--mf', type=str, default='')
parser.add_argument('--mapping_to', '--mt', type=str, default='')
parser.add_argument('--verbose_list', default='ratio,sep', type=str)
args = parser.parse_args()
if isinstance(args.verbose_list, str):
args.verbose_list = [x.strip() for x in args.verbose_list.split(',')]
if isinstance(args.keyword, str):
args.keyword = [x.strip() for x in args.keyword.split(',')]
return args
def main():
args = get_parameter()
args.weights_dir = os.path.join(args.weights_dir, args.model)
utils.check_folder(args.weights_dir)
if os.path.exists(args.log_dir):
utils.setup_logging(os.path.join(args.log_dir, 'tools.txt'), resume=True)
config = dict()
for i in args.keyword:
config[i] = True
if 'export_onnx' in config.keys():
export_onnx(args)
if 'inference' in config.keys():
inference(args)
if 'verbose' in config.keys():
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if 'model' in checkpoint:
checkpoint = checkpoint['model']
for name, value in checkpoint.items():
if ('quant_activation' in name or 'quant_weight' in name) and name.split('.')[-1] in args.verbose_list:
print(name, value.shape, value.requires_grad)
print(value.data)
elif "all" in args.verbose_list:
if 'num_batches_tracked' not in name:
if isinstance(value, torch.Tensor):
print(name, value.shape, value.requires_grad)
elif isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
print(name, value, type(value))
else:
print(name, type(value))
if 'load' in config.keys() or 'save' in config.keys():
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
if utils.check_file(args.old):
raw = 'raw' in config.keys()
if torch.cuda.is_available():
checkpoint = torch.load(args.old)
else: # force cpu mode
checkpoint = torch.load(args.old, map_location='cpu')
try:
utils.load_state_dict(model, checkpoint.get('state_dict', None) if not raw else checkpoint, verbose=False)
except RuntimeError:
print("Loading pretrained model failed")
print("Loading pretrained model OK")
if 'save' in config.keys() and args.new != '':
torch.save(model.state_dict(), args.new)
print("Save pretrained model into %s" % args.new)
else:
print("file not exist %s" % args.old)
if 'update' in config.keys():
mapping_from = []
mapping_to = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
if os.path.isfile(args.mapping_to):
with open(args.mapping_to) as f:
mapping_to = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
mapping_to = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_to]
mapping_to = [ i for i in mapping_to if len(i) > 0 and i[0] != '#']
if len(mapping_to) != len(mapping_from) or len(mapping_to) == 0 or len(mapping_from) == 0:
mapping = None
logging.info('no valid mapping')
else:
mapping = dict()
for i, k in enumerate(mapping_from):
if '{' in k and '}' in k and '{' in mapping_to[i] and '}' in mapping_to[i]:
item = k.split('{')
for v in item[1].strip('}').split(","):
v = v.strip()
mapping[item[0] + v] = mapping_to[i].split('{')[0] + v
else:
mapping[k] = mapping_to[i]
raw = 'raw' in config.keys()
if not os.path.isfile(args.old):
args.old = args.pretrained
utils.import_state_dict(args.old, args.new, mapping, raw, raw_prefix=args.case)
if 'det-load' in config.keys():
from third_party.checkpoint import DetectionCheckpointer
model_name = args.model
if model_name in models.model_zoo:
model, args = models.get_model(args)
else:
print("model(%s) not support, available models: %r" % (model_name, models.model_zoo))
return
split = os.path.split(args.old)
checkpointer = DetectionCheckpointer(model, split[0], save_to_disk=True)
checkpointer.resume_or_load(args.old, resume=True)
checkpointer.save(split[1])
if 'swap' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from = [ i.strip().strip('\n').strip('"').strip("'") for i in mapping_from]
mapping_from = [ i for i in mapping_from if len(i) > 0 and i[0] != '#']
lists = args.verbose_list
for i in lists:
item = i.split('/')
interval = (int)(item[0])
index = item[1].split('-')
index = [(int)(x) for x in index]
if len(mapping_from) % interval == 0 and len(index) <= interval:
mapping_to = mapping_from.copy()
for j, k in enumerate(index):
k = k % interval
mapping_to[j::interval] = mapping_from[k::interval]
mapping_to= [ i + '\n' for i in mapping_to]
with open(args.mapping_from + "-swap", 'w') as f:
f.writelines(mapping_to)
f.close()
if 'sort' in config.keys():
mapping_from = []
if os.path.isfile(args.mapping_from):
with open(args.mapping_from) as f:
mapping_from = f.readlines()
f.close()
mapping_from.sort()
with open(args.mapping_from + "-sort", 'w') as f:
f.writelines(mapping_from)
f.close()
if 'verify-data' in config.keys() or 'verify-image' in config.keys():
if 'verify-image' in config.keys():
lists = args.verbose_list
else:
with open(os.path.join(args.root, 'train.txt')) as f:
lists = f.readlines()
f.close()
from PIL import Image
from threading import Thread
print("going to check %d files" % len(lists))
def check(lists, start, end, index):
for i, item in enumerate(lists[start:end]):
try:
items = item.split()
if len(items) >= 1:
path = items[0].strip().strip('\n')
else:
print("skip line %s" % i)
continue
path = os.path.join(args.root, os.path.join("train", path))
imgs = Image.open(path)
imgs.resize((256,256))
if index == 0:
print(i, end ="\r", file=sys.stderr)
except (RuntimeError, IOError):
print("\nError when read image %s" % path)
print("\nFinish checking", index)
#lists = lists[45000:]
num = min(len(lists), 20)
for i in range(num):
start = len(lists) // num * i
end = min(start + len(lists) // num, len(lists))
th = Thread(target=check, args=(lists, start, end, i))
th.start()
if __name__ == '__main__':
main()
| 40.234694 | 158 | 0.563108 | [
"BSD-2-Clause"
] | billhhh/model-quantization-1 | tools.py | 11,829 | Python |
from selenium import webdriver
#import itertools
from openpyxl import Workbook, load_workbook
import re
import datetime
driver = webdriver.Firefox()
driver.get("https://www.worldometers.info/coronavirus/")
countries = []
cases = []
newCases = []
data = []
casesInt = []
newCasesInt = []
cells = []
cellsB = []
datez = datetime.datetime.now()
nowDate = datez.strftime("%d%b%y")
for country in range(2,22):
countries.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(country) + "]/td[1]").text)
for case in range(2,22):
cases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(case) + "]/td[2]").text)
for newCase in range(2,22):
newCases.append(driver.find_element_by_xpath("//table/tbody[1]/tr[" + str(newCase) + "]/td[3]").text)
data = dict(zip(countries, zip(cases, newCases)))
#print(data)
for case in cases:
case = re.sub(r'\D', '', case)
casesInt.append(int(case))
for newCase in newCases:
if newCase:
newCase = re.sub(r'\D', '', newCase)
newCasesInt.append(int(newCase))
else:
newCasesInt.append(1)
percentages = []
for caseInt,newCase in zip(casesInt, newCasesInt):
result = caseInt - newCase
percentage = round((newCase/result)*100, 2)
percentages.append(percentage)
#for country, percentage in zip(countries, percentages):
# print(country, ":", percentage)
wb = Workbook()
wb = load_workbook(filename='corona.xlsx')
ws = wb.active
#for countries column
for i in range(2,22):
i = str(i)
appendValue = 'A' + i
appendValueB = 'B' + i
cells.append(appendValue)
cellsB.append(appendValueB)
for i in range(20):
ws['A' + str(i+2)] = countries[i]
ws['B' + str(i+2)] = percentages[i]
wb.save(filename="corona" + nowDate + ".xlsx") | 27.672131 | 103 | 0.695498 | [
"MIT"
] | stephengarn/coronavirus | corona.py | 1,688 | Python |
import glob
import os
import librosa
import numpy as np
import tensorflow as tf
import sounddevice
from sklearn.preprocessing import StandardScaler
duration = 0.1 # seconds
sample_rate=44100
'''0 = air_conditioner
1 = car_horn
2 = children_playing
3 = dog_bark
4 = drilling
5 = engine_idling
6 = gun_shot
7 = jackhammer
8 = siren
9 = street_music'''
def extract_features():
X = sounddevice.rec(int(duration * sample_rate), samplerate=sample_rate, channels=1)
sounddevice.wait()
X= np.squeeze(X)
stft = np.abs(librosa.stft(X))
mfccs = np.array(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=8).T)
chroma = np.array(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T)
mel = np.array(librosa.feature.melspectrogram(X, sr=sample_rate).T)
contrast = np.array(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T)
tonnetz = np.array(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T)
ext_features = np.hstack([mfccs,chroma,mel,contrast,tonnetz])
features = np.vstack([features,ext_features])
return features
model_path = "model"
fit_params = np.load('fit_params.npy')
sc = StandardScaler()
sc.fit(fit_params)
n_dim = 161
n_classes = 10
n_hidden_units_one = 256
n_hidden_units_two = 256
sd = 1 / np.sqrt(n_dim)
learning_rate = 0.01
X = tf.placeholder(tf.float32,[None,n_dim])
Y = tf.placeholder(tf.float32,[None,n_classes])
W_1 = tf.Variable(tf.random_normal([n_dim,n_hidden_units_one], mean = 0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean = 0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X,W_1) + b_1)
W_2 = tf.Variable(tf.random_normal([n_hidden_units_one,n_hidden_units_two], mean = 0, stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean = 0, stddev=sd))
h_2 = tf.nn.sigmoid(tf.matmul(h_1,W_2) + b_2)
W = tf.Variable(tf.random_normal([n_hidden_units_two,n_classes], mean = 0, stddev=sd))
b = tf.Variable(tf.random_normal([n_classes], mean = 0, stddev=sd))
y_ = tf.nn.softmax(tf.matmul(h_2,W) + b)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
y_true, y_pred = None, None
with tf.Session() as sess:
saver.restore(sess, model_path)
print "Model loaded"
sess.run(tf.global_variables())
while 1:
feat = extract_features()
feat = sc.transform(feat)
y_pred = sess.run(tf.argmax(y_, 1), feed_dict={X: feat})
print y_pred
| 28.08046 | 97 | 0.715104 | [
"MIT"
] | GianlucaPaolocci/SMProject | classiPi.py | 2,443 | Python |
import epaper2in13
from machine import Pin,SPI
from time import sleep_ms
# SPI #2 on ESP32
spi = SPI(2,baudrate=2000000, polarity=0, phase=0) # miso=Pin(12), mosi=Pin(23), sck=Pin(18))
cs = Pin(5)
dc = Pin(2)
rst = Pin(15)
busy = Pin(4)
e = epaper2in13.EPD(spi, cs, dc, rst, busy)
e.init(e.FULL_UPDATE)
y_start = 6 # Y addresses start at 6 due to the memory layout
import framebuf
buf = bytearray(e.width * e.height // 8)
fb = framebuf.FrameBuffer(buf, e.height, e.width, framebuf.MONO_VLSB)
# --------------------
fb.fill(0)
fb.text('MicroPython!', 2, y_start + 2, 0xffff)
fb.rect(0, y_start, 250, 122, 0xffff)
e.set_frame_memory(buf,0,0,e.width,e.height)
e.display_frame()
sleep_ms(2000) # wait for 2 seconds before doing a partial update
# --------------------
e.init(e.PART_UPDATE)
fb = framebuf.FrameBuffer(buf, 200, 32, framebuf.MONO_VLSB)
fb.fill(0x0)
for i in range(0,32/2-1,2):
fb.rect(i, i, 200-i*2, 32-i*2, 0xffff)
e.set_frame_memory(buf,8,32,32,200) # 8px from bottom, 25px from left
e.display_frame()
| 23 | 93 | 0.671498 | [
"MIT"
] | piratebriggs/micropython-waveshare-epaper | examples/2in13-hello-world/test.py | 1,035 | Python |
import datetime
import os
import typing
from dataclasses import dataclass
import pandas
import pytest
from dataclasses_json import dataclass_json
import flytekit
from flytekit import ContainerTask, SQLTask, dynamic, kwtypes, maptask
from flytekit.common.translator import get_serializable
from flytekit.core import context_manager, launch_plan, promise
from flytekit.core.condition import conditional
from flytekit.core.context_manager import ExecutionState, Image, ImageConfig
from flytekit.core.node import Node
from flytekit.core.promise import NodeOutput, Promise, VoidPromise
from flytekit.core.resources import Resources
from flytekit.core.task import TaskMetadata, task
from flytekit.core.testing import patch, task_mock
from flytekit.core.type_engine import RestrictedTypeError, TypeEngine
from flytekit.core.workflow import workflow
from flytekit.interfaces.data.data_proxy import FileAccessProvider
from flytekit.models.core import types as _core_types
from flytekit.models.interface import Parameter
from flytekit.models.task import Resources as _resource_models
from flytekit.models.types import LiteralType
from flytekit.types.schema import FlyteSchema, SchemaOpenMode
def test_default_wf_params_works():
@task
def my_task(a: int):
wf_params = flytekit.current_context()
assert wf_params.execution_id == "ex:local:local:local"
my_task(a=3)
def test_simple_input_output():
@task
def my_task(a: int) -> typing.NamedTuple("OutputsBC", b=int, c=str):
ctx = flytekit.current_context()
assert ctx.execution_id == "ex:local:local:local"
return a + 2, "hello world"
assert my_task(a=3) == (5, "hello world")
def test_simple_input_no_output():
@task
def my_task(a: int):
pass
assert my_task(a=3) is None
ctx = context_manager.FlyteContext.current_context()
with ctx.new_compilation_context() as ctx:
outputs = my_task(a=3)
assert isinstance(outputs, VoidPromise)
def test_single_output():
@task
def my_task() -> str:
return "Hello world"
assert my_task() == "Hello world"
ctx = context_manager.FlyteContext.current_context()
with ctx.new_compilation_context() as ctx:
outputs = my_task()
assert ctx.compilation_state is not None
nodes = ctx.compilation_state.nodes
assert len(nodes) == 1
assert outputs.is_ready is False
assert outputs.ref.node is nodes[0]
def test_engine_file_output():
basic_blob_type = _core_types.BlobType(format="", dimensionality=_core_types.BlobType.BlobDimensionality.SINGLE,)
fs = FileAccessProvider(local_sandbox_dir="/tmp/flytetesting")
with context_manager.FlyteContext.current_context().new_file_access_context(file_access_provider=fs) as ctx:
# Write some text to a file not in that directory above
test_file_location = "/tmp/sample.txt"
with open(test_file_location, "w") as fh:
fh.write("Hello World\n")
lit = TypeEngine.to_literal(ctx, test_file_location, os.PathLike, LiteralType(blob=basic_blob_type))
# Since we're using local as remote, we should be able to just read the file from the 'remote' location.
with open(lit.scalar.blob.uri, "r") as fh:
assert fh.readline() == "Hello World\n"
# We should also be able to turn the thing back into regular python native thing.
redownloaded_local_file_location = TypeEngine.to_python_value(ctx, lit, os.PathLike)
with open(redownloaded_local_file_location, "r") as fh:
assert fh.readline() == "Hello World\n"
def test_wf1():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a=y, b=b)
return x, d
assert len(my_wf._nodes) == 2
assert my_wf._nodes[0].id == "n0"
assert my_wf._nodes[1]._upstream_nodes[0] is my_wf._nodes[0]
assert len(my_wf._output_bindings) == 2
assert my_wf._output_bindings[0].var == "o0"
assert my_wf._output_bindings[0].binding.promise.var == "t1_int_output"
nt = typing.NamedTuple("SingleNT", t1_int_output=float)
@task
def t3(a: int) -> nt:
return (a + 2,)
assert t3.python_interface.output_tuple_name == "SingleNT"
assert t3.interface.outputs["t1_int_output"] is not None
def test_wf1_run():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a=y, b=b)
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello world")
@workflow
def my_wf2(a: int, b: str) -> (int, str):
tup = t1(a=a)
d = t2(a=tup.c, b=b)
return tup.t1_int_output, d
x = my_wf2(a=5, b="hello ")
assert x == (7, "hello world")
def test_wf1_with_overrides():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a).with_overrides(name="x")
d = t2(a=y, b=b).with_overrides()
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello world")
def test_wf1_with_list_of_inputs():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: typing.List[str]) -> str:
return " ".join(a)
@workflow
def my_wf(a: int, b: str) -> (int, str):
xx, yy = t1(a=a)
d = t2(a=[b, yy])
return xx, d
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
@workflow
def my_wf2(a: int, b: str) -> int:
x, y = t1(a=a)
t2(a=[b, y])
return x
x = my_wf2(a=5, b="hello")
assert x == 7
def test_wf_output_mismatch():
with pytest.raises(AssertionError):
@workflow
def my_wf(a: int, b: str) -> (int, str):
return a
with pytest.raises(AssertionError):
@workflow
def my_wf2(a: int, b: str) -> int:
return a, b
@workflow
def my_wf3(a: int, b: str) -> int:
return (a,)
my_wf3(a=10, b="hello")
def test_promise_return():
"""
Testing that when a workflow is local executed but a local wf execution context already exists, Promise objects
are returned wrapping Flyte literals instead of the unpacked dict.
"""
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@workflow
def mimic_sub_wf(a: int) -> (str, str):
x, y = t1(a=a)
u, v = t1(a=x)
return y, v
ctx = context_manager.FlyteContext.current_context()
with ctx.new_execution_context(mode=ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION) as ctx:
a, b = mimic_sub_wf(a=3)
assert isinstance(a, promise.Promise)
assert isinstance(b, promise.Promise)
assert a.val.scalar.value.string_value == "world-5"
assert b.val.scalar.value.string_value == "world-7"
def test_wf1_with_sql():
sql = SQLTask(
"my-query",
query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10",
inputs=kwtypes(ds=datetime.datetime),
outputs=kwtypes(results=FlyteSchema),
metadata=TaskMetadata(retries=2),
)
@task
def t1() -> datetime.datetime:
return datetime.datetime.now()
@workflow
def my_wf() -> FlyteSchema:
dt = t1()
return sql(ds=dt)
with task_mock(sql) as mock:
mock.return_value = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
def test_wf1_with_sql_with_patch():
sql = SQLTask(
"my-query",
query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10",
inputs=kwtypes(ds=datetime.datetime),
outputs=kwtypes(results=FlyteSchema),
metadata=TaskMetadata(retries=2),
)
@task
def t1() -> datetime.datetime:
return datetime.datetime.now()
@workflow
def my_wf() -> FlyteSchema:
dt = t1()
return sql(ds=dt)
@patch(sql)
def test_user_demo_test(mock_sql):
mock_sql.return_value = pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})
assert (my_wf().open().all() == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]})).all().all()
# Have to call because tests inside tests don't run
test_user_demo_test()
def test_wf1_with_map():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@task
def t2(a: typing.List[int], b: typing.List[str]) -> (int, str):
ra = 0
for x in a:
ra += x
rb = ""
for x in b:
rb += x
return ra, rb
@workflow
def my_wf(a: typing.List[int]) -> (int, str):
x, y = maptask(t1, metadata=TaskMetadata(retries=1))(a=a)
return t2(a=x, b=y)
x = my_wf(a=[5, 6])
assert x == (15, "world-7world-8")
def test_wf1_compile_time_constant_vars():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a="This is my way", b=b)
return x, d
x = my_wf(a=5, b="hello ")
assert x == (7, "hello This is my way")
def test_wf1_with_constant_return():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
t2(a="This is my way", b=b)
return x, "A constant output"
x = my_wf(a=5, b="hello ")
assert x == (7, "A constant output")
@workflow
def my_wf2(a: int, b: str) -> int:
t1(a=a)
t2(a="This is my way", b=b)
return 10
assert my_wf2(a=5, b="hello ") == 10
def test_wf1_with_dynamic():
@task
def t1(a: int) -> str:
a = a + 2
return "world-" + str(a)
@task
def t2(a: str, b: str) -> str:
return b + a
@dynamic
def my_subwf(a: int) -> typing.List[str]:
s = []
for i in range(a):
s.append(t1(a=i))
return s
@workflow
def my_wf(a: int, b: str) -> (str, typing.List[str]):
x = t2(a=b, b=b)
v = my_subwf(a=a)
return x, v
v = 5
x = my_wf(a=v, b="hello ")
assert x == ("hello hello ", ["world-" + str(i) for i in range(2, v + 2)])
with context_manager.FlyteContext.current_context().new_serialization_settings(
serialization_settings=context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={},
)
) as ctx:
with ctx.new_execution_context(mode=ExecutionState.Mode.TASK_EXECUTION) as ctx:
dynamic_job_spec = my_subwf.compile_into_workflow(ctx, my_subwf._task_function, a=5)
assert len(dynamic_job_spec._nodes) == 5
def test_list_output():
@task
def t1(a: int) -> str:
a = a + 2
return "world-" + str(a)
@workflow
def lister() -> typing.List[str]:
s = []
# FYI: For users who happen to look at this, keep in mind this is only run once at compile time.
for i in range(10):
s.append(t1(a=i))
return s
assert len(lister.interface.outputs) == 1
binding_data = lister._output_bindings[0].binding # the property should be named binding_data
assert binding_data.collection is not None
assert len(binding_data.collection.bindings) == 10
def test_comparison_refs():
def dummy_node(node_id) -> Node:
n = Node(
node_id,
metadata=None,
bindings=[],
upstream_nodes=[],
flyte_entity=SQLTask(name="x", query_template="x", inputs={}),
)
n._id = node_id
return n
px = Promise("x", NodeOutput(var="x", node=dummy_node("n1")))
py = Promise("y", NodeOutput(var="y", node=dummy_node("n2")))
def print_expr(expr):
print(f"{expr} is type {type(expr)}")
print_expr(px == py)
print_expr(px < py)
print_expr((px == py) & (px < py))
print_expr(((px == py) & (px < py)) | (px > py))
print_expr(px < 5)
print_expr(px >= 5)
def test_comparison_lits():
px = Promise("x", TypeEngine.to_literal(None, 5, int, None))
py = Promise("y", TypeEngine.to_literal(None, 8, int, None))
def eval_expr(expr, expected: bool):
print(f"{expr} evals to {expr.eval()}")
assert expected == expr.eval()
eval_expr(px == py, False)
eval_expr(px < py, True)
eval_expr((px == py) & (px < py), False)
eval_expr(((px == py) & (px < py)) | (px > py), False)
eval_expr(px < 5, False)
eval_expr(px >= 5, True)
eval_expr(py >= 5, True)
def test_wf1_branches():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = (
conditional("test1")
.if_(x == 4)
.then(t2(a=b))
.elif_(x >= 5)
.then(t2(a=y))
.else_()
.fail("Unable to choose branch")
)
f = conditional("test2").if_(d == "hello ").then(t2(a="It is hello")).else_().then(t2(a="Not Hello!"))
return x, f
x = my_wf(a=5, b="hello ")
assert x == (7, "Not Hello!")
x = my_wf(a=2, b="hello ")
assert x == (4, "It is hello")
def test_wf1_branches_no_else():
with pytest.raises(NotImplementedError):
def foo():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = conditional("test1").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y))
conditional("test2").if_(x == 4).then(t2(a=b)).elif_(x >= 5).then(t2(a=y)).else_().fail("blah")
return x, d
foo()
def test_wf1_branches_failing():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: str) -> str:
return a
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = (
conditional("test1")
.if_(x == 4)
.then(t2(a=b))
.elif_(x >= 5)
.then(t2(a=y))
.else_()
.fail("All Branches failed")
)
return x, d
with pytest.raises(ValueError):
my_wf(a=1, b="hello ")
def test_cant_use_normal_tuples():
with pytest.raises(RestrictedTypeError):
@task
def t1(a: str) -> tuple:
return (a, 3)
def test_wf1_df():
@task
def t1(a: int) -> pandas.DataFrame:
return pandas.DataFrame(data={"col1": [a, 2], "col2": [a, 4]})
@task
def t2(df: pandas.DataFrame) -> pandas.DataFrame:
return df.append(pandas.DataFrame(data={"col1": [5, 10], "col2": [5, 10]}))
@workflow
def my_wf(a: int) -> pandas.DataFrame:
df = t1(a=a)
return t2(df=df)
x = my_wf(a=20)
assert isinstance(x, pandas.DataFrame)
result_df = x.reset_index(drop=True) == pandas.DataFrame(
data={"col1": [20, 2, 5, 10], "col2": [20, 4, 5, 10]}
).reset_index(drop=True)
assert result_df.all().all()
def test_lp_serialize():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
a = a + 2
return a, "world-" + str(a)
@task
def t2(a: str, b: str) -> str:
return b + a
@workflow
def my_subwf(a: int) -> (str, str):
x, y = t1(a=a)
u, v = t1(a=x)
return y, v
lp = launch_plan.LaunchPlan.create("serialize_test1", my_subwf)
lp_with_defaults = launch_plan.LaunchPlan.create("serialize_test2", my_subwf, default_inputs={"a": 3})
serialization_settings = context_manager.SerializationSettings(
project="proj",
domain="dom",
version="123",
image_config=ImageConfig(Image(name="name", fqn="asdf/fdsa", tag="123")),
env={},
)
sdk_lp = get_serializable(serialization_settings, lp)
assert len(sdk_lp.default_inputs.parameters) == 0
assert len(sdk_lp.fixed_inputs.literals) == 0
sdk_lp = get_serializable(serialization_settings, lp_with_defaults)
assert len(sdk_lp.default_inputs.parameters) == 1
assert len(sdk_lp.fixed_inputs.literals) == 0
# Adding a check to make sure oneof is respected. Tricky with booleans... if a default is specified, the
# required field needs to be None, not False.
parameter_a = sdk_lp.default_inputs.parameters["a"]
parameter_a = Parameter.from_flyte_idl(parameter_a.to_flyte_idl())
assert parameter_a.default is not None
def test_wf_container_task():
@task
def t1(a: int) -> (int, str):
return a + 2, str(a) + "-HELLO"
t2 = ContainerTask(
"raw",
image="alpine",
inputs=kwtypes(a=int, b=str),
input_data_dir="/tmp",
output_data_dir="/tmp",
command=["cat"],
arguments=["/tmp/a"],
)
def wf(a: int):
x, y = t1(a=a)
t2(a=x, b=y)
with task_mock(t2) as mock:
mock.side_effect = lambda a, b: None
assert t2(a=10, b="hello") is None
wf(a=10)
def test_wf_container_task_multiple():
square = ContainerTask(
name="square",
input_data_dir="/var/inputs",
output_data_dir="/var/outputs",
inputs=kwtypes(val=int),
outputs=kwtypes(out=int),
image="alpine",
command=["sh", "-c", "echo $(( {{.Inputs.val}} * {{.Inputs.val}} )) | tee /var/outputs/out"],
)
sum = ContainerTask(
name="sum",
input_data_dir="/var/flyte/inputs",
output_data_dir="/var/flyte/outputs",
inputs=kwtypes(x=int, y=int),
outputs=kwtypes(out=int),
image="alpine",
command=["sh", "-c", "echo $(( {{.Inputs.x}} + {{.Inputs.y}} )) | tee /var/flyte/outputs/out"],
)
@workflow
def raw_container_wf(val1: int, val2: int) -> int:
return sum(x=square(val=val1), y=square(val=val2))
with task_mock(square) as square_mock, task_mock(sum) as sum_mock:
square_mock.side_effect = lambda val: val * val
assert square(val=10) == 100
sum_mock.side_effect = lambda x, y: x + y
assert sum(x=10, y=10) == 20
assert raw_container_wf(val1=10, val2=10) == 200
def test_wf_tuple_fails():
with pytest.raises(RestrictedTypeError):
@task
def t1(a: tuple) -> (int, str):
return a[0] + 2, str(a) + "-HELLO"
def test_wf_typed_schema():
schema1 = FlyteSchema[kwtypes(x=int, y=str)]
@task
def t1() -> schema1:
s = schema1()
s.open().write(pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}))
return s
@task
def t2(s: FlyteSchema[kwtypes(x=int, y=str)]) -> FlyteSchema[kwtypes(x=int)]:
df = s.open().all()
return df[s.column_names()[:-1]]
@workflow
def wf() -> FlyteSchema[kwtypes(x=int)]:
return t2(s=t1())
w = t1()
assert w is not None
df = w.open(override_mode=SchemaOpenMode.READ).all()
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}).reset_index(
drop=True
)
assert result_df.all().all()
df = t2(s=w.as_readonly())
assert df is not None
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2]}).reset_index(drop=True)
assert result_df.all().all()
x = wf()
df = x.open().all()
result_df = df.reset_index(drop=True) == pandas.DataFrame(data={"x": [1, 2]}).reset_index(drop=True)
assert result_df.all().all()
def test_wf_schema_to_df():
schema1 = FlyteSchema[kwtypes(x=int, y=str)]
@task
def t1() -> schema1:
s = schema1()
s.open().write(pandas.DataFrame(data={"x": [1, 2], "y": ["3", "4"]}))
return s
@task
def t2(df: pandas.DataFrame) -> int:
return len(df.columns.values)
@workflow
def wf() -> int:
return t2(df=t1())
x = wf()
assert x == 2
def test_dict_wf_with_constants():
@task
def t1(a: int) -> typing.NamedTuple("OutputsBC", t1_int_output=int, c=str):
return a + 2, "world"
@task
def t2(a: typing.Dict[str, str]) -> str:
return " ".join([v for k, v in a.items()])
@workflow
def my_wf(a: int, b: str) -> (int, str):
x, y = t1(a=a)
d = t2(a={"key1": b, "key2": y})
return x, d
x = my_wf(a=5, b="hello")
assert x == (7, "hello world")
def test_dict_wf_with_conversion():
@task
def t1(a: int) -> typing.Dict[str, str]:
return {"a": str(a)}
@task
def t2(a: dict) -> str:
print(f"HAHAH {a}")
return " ".join([v for k, v in a.items()])
@workflow
def my_wf(a: int) -> str:
return t2(a=t1(a=a))
with pytest.raises(TypeError):
my_wf(a=5)
def test_wf_with_empty_dict():
@task
def t1() -> typing.Dict:
return {}
@task
def t2(d: typing.Dict):
assert d == {}
@workflow
def wf():
d = t1()
t2(d=d)
wf()
def test_wf_with_catching_no_return():
@task
def t1() -> typing.Dict:
return {}
@task
def t2(d: typing.Dict):
assert d == {}
@task
def t3(s: str):
pass
with pytest.raises(AssertionError):
@workflow
def wf():
d = t1()
# The following statement is wrong, this should not be allowed to pass to another task
x = t2(d=d)
# Passing x is wrong in this case
t3(s=x)
wf()
def test_wf_custom_types_missing_dataclass_json():
with pytest.raises(AssertionError):
@dataclass
class MyCustomType(object):
pass
@task
def t1(a: int) -> MyCustomType:
return MyCustomType()
def test_wf_custom_types():
@dataclass_json
@dataclass
class MyCustomType(object):
x: int
y: str
@task
def t1(a: int) -> MyCustomType:
return MyCustomType(x=a, y="t1")
@task
def t2(a: MyCustomType, b: str) -> (MyCustomType, int):
return MyCustomType(x=a.x, y=f"{a.y} {b}"), 5
@workflow
def my_wf(a: int, b: str) -> (MyCustomType, int):
return t2(a=t1(a=a), b=b)
c, v = my_wf(a=10, b="hello")
assert v == 5
assert c.x == 10
assert c.y == "t1 hello"
def test_arbit_class():
class Foo(object):
pass
with pytest.raises(ValueError):
@task
def t1(a: int) -> Foo:
return Foo()
def test_dataclass_more():
@dataclass_json
@dataclass
class Datum(object):
x: int
y: str
z: typing.Dict[int, str]
@task
def stringify(x: int) -> Datum:
return Datum(x=x, y=str(x), z={x: str(x)})
@task
def add(x: Datum, y: Datum) -> Datum:
x.z.update(y.z)
return Datum(x=x.x + y.x, y=x.y + y.y, z=x.z)
@workflow
def wf(x: int, y: int) -> Datum:
return add(x=stringify(x=x), y=stringify(x=y))
wf(x=10, y=20)
def test_environment():
@task(environment={"FOO": "foofoo", "BAZ": "baz"})
def t1(a: int) -> str:
a = a + 2
return "now it's " + str(a)
@workflow
def my_wf(a: int) -> str:
x = t1(a=a)
return x
serialization_settings = context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={"FOO": "foo", "BAR": "bar"},
)
with context_manager.FlyteContext.current_context().new_compilation_context():
sdk_task = get_serializable(serialization_settings, t1)
assert sdk_task.container.env == {"FOO": "foofoo", "BAR": "bar", "BAZ": "baz"}
def test_resources():
@task(requests=Resources(cpu="1"), limits=Resources(cpu="2", mem="400M"))
def t1(a: int) -> str:
a = a + 2
return "now it's " + str(a)
@task(requests=Resources(cpu="3"))
def t2(a: int) -> str:
a = a + 200
return "now it's " + str(a)
@workflow
def my_wf(a: int) -> str:
x = t1(a=a)
return x
serialization_settings = context_manager.SerializationSettings(
project="test_proj",
domain="test_domain",
version="abc",
image_config=ImageConfig(Image(name="name", fqn="image", tag="name")),
env={},
)
with context_manager.FlyteContext.current_context().new_compilation_context():
sdk_task = get_serializable(serialization_settings, t1)
assert sdk_task.container.resources.requests == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "1")
]
assert sdk_task.container.resources.limits == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "2"),
_resource_models.ResourceEntry(_resource_models.ResourceName.MEMORY, "400M"),
]
sdk_task2 = get_serializable(serialization_settings, t2)
assert sdk_task2.container.resources.requests == [
_resource_models.ResourceEntry(_resource_models.ResourceName.CPU, "3")
]
assert sdk_task2.container.resources.limits == []
def test_wf_explicitly_returning_empty_task():
@task
def t1():
...
@workflow
def my_subwf():
return t1() # This forces the wf _local_execute to handle VoidPromises
assert my_subwf() is None
| 27.333333 | 117 | 0.5814 | [
"Apache-2.0"
] | ThomVett/flytek | tests/flytekit/unit/core/test_type_hints.py | 26,978 | Python |
# coding=utf-8
# *** WARNING: this file was generated by pulumigen. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ... import meta as _meta
__all__ = ['CSIStorageCapacityArgs', 'CSIStorageCapacity']
@pulumi.input_type
class CSIStorageCapacityArgs:
def __init__(__self__, *,
storage_class_name: pulumi.Input[str],
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']] = None,
node_topology: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']] = None):
"""
The set of arguments for constructing a CSIStorageCapacity resource.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input['_meta.v1.ObjectMetaArgs'] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input['_meta.v1.LabelSelectorArgs'] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
pulumi.set(__self__, "storage_class_name", storage_class_name)
if api_version is not None:
pulumi.set(__self__, "api_version", 'storage.k8s.io/v1beta1')
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if kind is not None:
pulumi.set(__self__, "kind", 'CSIStorageCapacity')
if maximum_volume_size is not None:
pulumi.set(__self__, "maximum_volume_size", maximum_volume_size)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if node_topology is not None:
pulumi.set(__self__, "node_topology", node_topology)
@property
@pulumi.getter(name="storageClassName")
def storage_class_name(self) -> pulumi.Input[str]:
"""
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
return pulumi.get(self, "storage_class_name")
@storage_class_name.setter
def storage_class_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_class_name", value)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> Optional[pulumi.Input[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@api_version.setter
def api_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "api_version", value)
@property
@pulumi.getter
def capacity(self) -> Optional[pulumi.Input[str]]:
"""
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
"""
return pulumi.get(self, "capacity")
@capacity.setter
def capacity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "capacity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="maximumVolumeSize")
def maximum_volume_size(self) -> Optional[pulumi.Input[str]]:
"""
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
"""
return pulumi.get(self, "maximum_volume_size")
@maximum_volume_size.setter
def maximum_volume_size(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "maximum_volume_size", value)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]:
"""
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['_meta.v1.ObjectMetaArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter(name="nodeTopology")
def node_topology(self) -> Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]:
"""
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
return pulumi.get(self, "node_topology")
@node_topology.setter
def node_topology(self, value: Optional[pulumi.Input['_meta.v1.LabelSelectorArgs']]):
pulumi.set(self, "node_topology", value)
class CSIStorageCapacity(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None,
storage_class_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_version: APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
:param pulumi.Input[str] capacity: Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
:param pulumi.Input[str] kind: Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
:param pulumi.Input[str] maximum_volume_size: MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
:param pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']] metadata: Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
:param pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']] node_topology: NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
:param pulumi.Input[str] storage_class_name: The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: CSIStorageCapacityArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
:param str resource_name: The name of the resource.
:param CSIStorageCapacityArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(CSIStorageCapacityArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
api_version: Optional[pulumi.Input[str]] = None,
capacity: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
maximum_volume_size: Optional[pulumi.Input[str]] = None,
metadata: Optional[pulumi.Input[pulumi.InputType['_meta.v1.ObjectMetaArgs']]] = None,
node_topology: Optional[pulumi.Input[pulumi.InputType['_meta.v1.LabelSelectorArgs']]] = None,
storage_class_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs)
__props__.__dict__["api_version"] = 'storage.k8s.io/v1beta1'
__props__.__dict__["capacity"] = capacity
__props__.__dict__["kind"] = 'CSIStorageCapacity'
__props__.__dict__["maximum_volume_size"] = maximum_volume_size
__props__.__dict__["metadata"] = metadata
__props__.__dict__["node_topology"] = node_topology
if storage_class_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_class_name'")
__props__.__dict__["storage_class_name"] = storage_class_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="kubernetes:storage.k8s.io/v1alpha1:CSIStorageCapacity")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(CSIStorageCapacity, __self__).__init__(
'kubernetes:storage.k8s.io/v1beta1:CSIStorageCapacity',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'CSIStorageCapacity':
"""
Get an existing CSIStorageCapacity resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = CSIStorageCapacityArgs.__new__(CSIStorageCapacityArgs)
__props__.__dict__["api_version"] = None
__props__.__dict__["capacity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["maximum_volume_size"] = None
__props__.__dict__["metadata"] = None
__props__.__dict__["node_topology"] = None
__props__.__dict__["storage_class_name"] = None
return CSIStorageCapacity(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="apiVersion")
def api_version(self) -> pulumi.Output[Optional[str]]:
"""
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
"""
return pulumi.get(self, "api_version")
@property
@pulumi.getter
def capacity(self) -> pulumi.Output[Optional[str]]:
"""
Capacity is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
The semantic is currently (CSI spec 1.2) defined as: The available capacity, in bytes, of the storage that can be used to provision volumes. If not set, that information is currently unavailable and treated like zero capacity.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="maximumVolumeSize")
def maximum_volume_size(self) -> pulumi.Output[Optional[str]]:
"""
MaximumVolumeSize is the value reported by the CSI driver in its GetCapacityResponse for a GetCapacityRequest with topology and parameters that match the previous fields.
This is defined since CSI spec 1.4.0 as the largest size that may be used in a CreateVolumeRequest.capacity_range.required_bytes field to create a volume with the same parameters as those in GetCapacityRequest. The corresponding value in the Kubernetes API is ResourceRequirements.Requests in a volume claim.
"""
return pulumi.get(self, "maximum_volume_size")
@property
@pulumi.getter
def metadata(self) -> pulumi.Output[Optional['_meta.v1.outputs.ObjectMeta']]:
"""
Standard object's metadata. The name has no particular meaning. It must be be a DNS subdomain (dots allowed, 253 characters). To ensure that there are no conflicts with other CSI drivers on the cluster, the recommendation is to use csisc-<uuid>, a generated name, or a reverse-domain name which ends with the unique CSI driver name.
Objects are namespaced.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="nodeTopology")
def node_topology(self) -> pulumi.Output[Optional['_meta.v1.outputs.LabelSelector']]:
"""
NodeTopology defines which nodes have access to the storage for which capacity was reported. If not set, the storage is not accessible from any node in the cluster. If empty, the storage is accessible from all nodes. This field is immutable.
"""
return pulumi.get(self, "node_topology")
@property
@pulumi.getter(name="storageClassName")
def storage_class_name(self) -> pulumi.Output[str]:
"""
The name of the StorageClass that the reported capacity applies to. It must meet the same requirements as the name of a StorageClass object (non-empty, DNS subdomain). If that object no longer exists, the CSIStorageCapacity object is obsolete and should be removed by its creator. This field is immutable.
"""
return pulumi.get(self, "storage_class_name")
| 68.014368 | 415 | 0.713634 | [
"Apache-2.0"
] | Teshel/pulumi-kubernetes | sdk/python/pulumi_kubernetes/storage/v1beta1/CSIStorageCapacity.py | 23,669 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-15 15:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ContestAnalyzerOnline', '0006_auto_20171015_1445'),
]
operations = [
migrations.DeleteModel(
name='Comment',
),
]
| 19.736842 | 61 | 0.64 | [
"MIT"
] | rogercaminal/HamToolsManager | ContestAnalyzerOnline/utils/migrations/0007_delete_comment.py | 375 | Python |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import logging
import struct
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_POST
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document2
from libsentry.privilege_checker import MissingSentryPrivilegeException
from notebook.api import _get_statement
from notebook.models import Notebook
from metadata.optimizer_client import OptimizerApi, NavOptException, _get_table_name, _clean_query
from metadata.conf import OPTIMIZER
from desktop.auth.backend import is_admin
LOG = logging.getLogger(__name__)
try:
from beeswax.api import get_table_stats
from beeswax.design import hql_query
from metastore.views import _get_db
except ImportError, e:
LOG.warn("Hive lib not enabled")
def error_handler(view_fn):
def decorator(*args, **kwargs):
try:
return view_fn(*args, **kwargs)
except Http404, e:
raise e
except NavOptException, e:
LOG.exception(e)
response = {
'status': -1,
'message': e.message
}
except MissingSentryPrivilegeException, e:
LOG.exception(e)
response = {
'status': -1,
'message': 'Missing privileges for %s' % force_unicode(str(e))
}
except Exception, e:
LOG.exception(e)
response = {
'status': -1,
'message': force_unicode(str(e))
}
return JsonResponse(response, status=500)
return decorator
@require_POST
@error_handler
def get_tenant(request):
response = {'status': -1}
cluster_id = request.POST.get('cluster_id')
api = OptimizerApi(request.user)
data = api.get_tenant(cluster_id=cluster_id)
if data:
response['status'] = 0
response['data'] = data['tenant']
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def top_tables(request):
response = {'status': -1}
database = request.POST.get('database', 'default')
limit = request.POST.get('len', 1000)
api = OptimizerApi(user=request.user)
data = api.top_tables(database_name=database, page_size=limit)
tables = [{
'eid': table['eid'],
'database': _get_table_name(table['name'])['database'],
'name': _get_table_name(table['name'])['table'],
'popularity': table['workloadPercent'],
'column_count': table['columnCount'],
'patternCount': table['patternCount'],
'total': table['total'],
'is_fact': table['type'] != 'Dimension'
} for table in data['results']
]
response['top_tables'] = tables
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def table_details(request):
response = {'status': -1}
database_name = request.POST.get('databaseName')
table_name = request.POST.get('tableName')
api = OptimizerApi(request.user)
data = api.table_details(database_name=database_name, table_name=table_name)
if data:
response['status'] = 0
response['details'] = data
else:
response['message'] = 'Optimizer: %s' % data['details']
return JsonResponse(response)
@require_POST
@error_handler
def query_compatibility(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
target_platform = request.POST.get('targetPlatform')
query = request.POST.get('query')
api = OptimizerApi(request.user)
data = api.query_compatibility(source_platform=source_platform, target_platform=target_platform, query=query)
if data:
response['status'] = 0
response['query_compatibility'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def query_risk(request):
response = {'status': -1}
query = json.loads(request.POST.get('query'))
source_platform = request.POST.get('sourcePlatform')
db_name = request.POST.get('dbName')
api = OptimizerApi(request.user)
data = api.query_risk(query=query, source_platform=source_platform, db_name=db_name)
if data:
response['status'] = 0
response['query_risk'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def similar_queries(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform')
query = json.loads(request.POST.get('query'))
api = OptimizerApi(request.user)
data = api.similar_queries(source_platform=source_platform, query=query)
if data:
response['status'] = 0
response['similar_queries'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_filters(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
column_name = request.POST.get('columnName') # Unused
api = OptimizerApi(request.user)
data = api.top_filters(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_joins(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_joins(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_aggs(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_aggs(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_databases(request):
response = {'status': -1}
api = OptimizerApi(request.user)
data = api.top_databases()
if data:
response['status'] = 0
response['values'] = data['results']
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
@require_POST
@error_handler
def top_columns(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('dbTables'), '[]')
api = OptimizerApi(request.user)
data = api.top_columns(db_tables=db_tables)
if data:
response['status'] = 0
response['values'] = data
else:
response['message'] = 'Optimizer: %s' % data
return JsonResponse(response)
def _convert_queries(queries_data):
queries = []
for query_data in queries_data:
try:
snippet = query_data['snippets'][0]
if 'guid' in snippet['result']['handle']: # Not failed query
original_query_id = '%s:%s' % struct.unpack(b"QQ", base64.decodestring(snippet['result']['handle']['guid'])) # unpack_guid uses '%016x:%016x' while optmizer api uses '%s:%s'.
execution_time = snippet['result']['executionTime'] * 100 if snippet['status'] in ('available', 'expired') else -1
statement = _clean_query(_get_statement(query_data))
queries.append((original_query_id, execution_time, statement, snippet.get('database', 'default').strip()))
except Exception, e:
LOG.warning('Skipping upload of %s: %s' % (query_data['uuid'], e))
return queries
@require_POST
@error_handler
def upload_history(request):
response = {'status': -1}
if is_admin(request.user):
api = OptimizerApi(request.user)
histories = []
upload_stats = {}
if request.POST.get('sourcePlatform'):
n = min(request.POST.get('n', OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get()))
source_platform = request.POST.get('sourcePlatform', 'hive')
histories = [(source_platform, Document2.objects.get_history(doc_type='query-%s' % source_platform, user=request.user)[:n])]
elif OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get() > 0:
histories = [
(source_platform, Document2.objects.filter(type='query-%s' % source_platform, is_history=True, is_managed=False, is_trashed=False).order_by('-last_modified')[:OPTIMIZER.QUERY_HISTORY_UPLOAD_LIMIT.get()])
for source_platform in ['hive', 'impala']
]
for source_platform, history in histories:
queries = _convert_queries([Notebook(document=doc).get_data() for doc in history])
upload_stats[source_platform] = api.upload(data=queries, data_type='queries', source_platform=source_platform)
response['upload_history'] = upload_stats
response['status'] = 0
else:
response['message'] = _('Query history upload requires Admin privileges or feature is disabled.')
return JsonResponse(response)
@require_POST
@error_handler
def upload_query(request):
response = {'status': -1}
source_platform = request.POST.get('sourcePlatform', 'default')
query_id = request.POST.get('query_id')
if OPTIMIZER.AUTO_UPLOAD_QUERIES.get() and source_platform in ('hive', 'impala') and query_id:
try:
doc = Document2.objects.document(request.user, doc_id=query_id)
query_data = Notebook(document=doc).get_data()
queries = _convert_queries([query_data])
source_platform = query_data['snippets'][0]['type']
api = OptimizerApi(request.user)
response['query_upload'] = api.upload(data=queries, data_type='queries', source_platform=source_platform)
except Document2.DoesNotExist:
response['query_upload'] = _('Skipped as task query')
else:
response['query_upload'] = _('Skipped')
response['status'] = 0
return JsonResponse(response)
@require_POST
@error_handler
def upload_table_stats(request):
response = {'status': -1}
db_tables = json.loads(request.POST.get('db_tables'), '[]')
source_platform = json.loads(request.POST.get('sourcePlatform', '"hive"'))
with_ddl = json.loads(request.POST.get('with_ddl', 'false'))
with_table_stats = json.loads(request.POST.get('with_table', 'false'))
with_columns_stats = json.loads(request.POST.get('with_columns', 'false'))
table_ddls = []
table_stats = []
column_stats = []
if not OPTIMIZER.AUTO_UPLOAD_DDL.get():
with_ddl = False
if not OPTIMIZER.AUTO_UPLOAD_STATS.get():
with_table_stats = with_columns_stats = False
for db_table in db_tables:
path = _get_table_name(db_table)
try:
if with_ddl:
db = _get_db(request.user, source_type=source_platform)
query = hql_query('SHOW CREATE TABLE `%(database)s`.`%(table)s`' % path)
handle = db.execute_and_wait(query, timeout_sec=5.0)
if handle:
result = db.fetch(handle, rows=5000)
db.close(handle)
table_ddls.append((0, 0, ' '.join([row[0] for row in result.rows()]), path['database']))
if with_table_stats:
mock_request = MockRequest(user=request.user, source_platform=source_platform)
full_table_stats = json.loads(get_table_stats(mock_request, database=path['database'], table=path['table']).content)
stats = dict((stat['data_type'], stat['comment']) for stat in full_table_stats['stats'])
table_stats.append({
'table_name': '%(database)s.%(table)s' % path, # DB Prefix
'num_rows': stats.get('numRows', -1),
'last_modified_time': stats.get('transient_lastDdlTime', -1),
'total_size': stats.get('totalSize', -1),
'raw_data_size': stats.get('rawDataSize', -1),
'num_files': stats.get('numFiles', -1),
'num_partitions': stats.get('numPartitions', -1),
# bytes_cached
# cache_replication
# format
})
if with_columns_stats:
if source_platform == 'impala':
colum_stats = json.loads(get_table_stats(mock_request, database=path['database'], table=path['table'], column=-1).content)['stats']
else:
colum_stats = [
json.loads(get_table_stats(mock_request, database=path['database'], table=path['table'], column=col).content)['stats']
for col in full_table_stats['columns'][:25]
]
raw_column_stats = [dict([(key, val if val is not None else '') for col_stat in col for key, val in col_stat.iteritems()]) for col in colum_stats]
for col_stats in raw_column_stats:
column_stats.append({
'table_name': '%(database)s.%(table)s' % path, # DB Prefix
'column_name': col_stats['col_name'],
'data_type': col_stats['data_type'],
"num_distinct": int(col_stats.get('distinct_count')) if col_stats.get('distinct_count') != '' else -1,
"num_nulls": int(col_stats['num_nulls']) if col_stats['num_nulls'] != '' else -1,
"avg_col_len": int(float(col_stats['avg_col_len'])) if col_stats['avg_col_len'] != '' else -1,
"max_size": int(float(col_stats['max_col_len'])) if col_stats['max_col_len'] != '' else -1,
"min": col_stats['min'] if col_stats.get('min', '') != '' else -1,
"max": col_stats['max'] if col_stats.get('max', '') != '' else -1,
"num_trues": col_stats['num_trues'] if col_stats.get('num_trues', '') != '' else -1,
"num_falses": col_stats['num_falses'] if col_stats.get('num_falses', '') != '' else -1,
})
except Exception, e:
LOG.exception('Skipping upload of %s: %s' % (db_table, e))
api = OptimizerApi(request.user)
response['status'] = 0
if table_stats:
response['upload_table_stats'] = api.upload(data=table_stats, data_type='table_stats', source_platform=source_platform)
response['upload_table_stats_status'] = 0 if response['upload_table_stats']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
response['status'] = response['upload_table_stats_status']
if column_stats:
response['upload_cols_stats'] = api.upload(data=column_stats, data_type='cols_stats', source_platform=source_platform)
response['upload_cols_stats_status'] = response['status'] if response['upload_cols_stats']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
if response['upload_cols_stats_status'] != 0:
response['status'] = response['upload_cols_stats_status']
if table_ddls:
response['upload_table_ddl'] = api.upload(data=table_ddls, data_type='queries', source_platform=source_platform)
response['upload_table_ddl_status'] = response['status'] if response['upload_table_ddl']['status']['state'] in ('WAITING', 'FINISHED', 'IN_PROGRESS') else -1
if response['upload_table_ddl_status'] != 0:
response['status'] = response['upload_table_ddl_status']
return JsonResponse(response)
@require_POST
@error_handler
def upload_status(request):
response = {'status': -1}
workload_id = request.POST.get('workloadId')
api = OptimizerApi(request.user)
response['upload_status'] = api.upload_status(workload_id=workload_id)
response['status'] = 0
return JsonResponse(response)
class MockRequest():
def __init__(self, user, source_platform):
self.user = user
self.path = '/%s/' % source_platform if source_platform != 'hive' else 'beeswax'
| 31.506876 | 211 | 0.685477 | [
"Apache-2.0"
] | apoorvanand/hue | desktop/libs/metadata/src/metadata/optimizer_api.py | 16,037 | Python |
# -*- coding: utf-8 -*-
"""
walle-web
:copyright: © 2015-2019 walle-web.io
:created time: 2019-02-24 10:47:53
:author: [email protected]
"""
import os
import re
import os.path as osp
import git as PyGit
from git import Repo as PyRepo
class Repo:
path = None
def __init__(self, path=None):
self.path = path
def is_git_dir(self):
'''
判断是否为git目录
@param path:
@return:
'''
d = self.path + '/.git'
if osp.isdir(d):
if osp.isdir(osp.join(d, 'objects')) and osp.isdir(osp.join(d, 'refs')):
headref = osp.join(d, 'HEAD')
return osp.isfile(headref) or \
(osp.islink(headref) and
os.readlink(headref).startswith('refs'))
elif (osp.isfile(osp.join(d, 'gitdir')) and
osp.isfile(osp.join(d, 'commondir')) and
osp.isfile(osp.join(d, 'gitfile'))):
return False
return False
def init(self, url):
# 创建目录
if not os.path.exists(self.path):
os.makedirs(self.path)
# git clone
if self.is_git_dir():
return self.pull()
else:
return self.clone(url)
def clone(self, url):
'''
检出项目
@param branch:
@param kwargs:
@return:
'''
return PyRepo.clone_from(url, self.path)
def pull(self):
'''
更新项目
@param branch:
@param kwargs:
@return:
'''
repo = PyRepo(self.path)
return repo.remote().pull()
def checkout_2_branch(self, branch):
PyRepo(self.path).git.checkout(branch)
def checkout_2_commit(self, branch, commit):
'''
@todo 未完成
@param branch:
@param commit:
@return:
'''
PyRepo(self.path).git.checkout(branch)
# PyRepo(self.path).head.set_reference(branch)
# 方法有问题,只是做了reset,没有checkout
PyRepo(self.path).head.set_commit(commit)
def checkout_2_tag(self, tag):
PyRepo(self.path).git.checkout(tag)
def branches(self):
'''
获取所有分支
@param branch:
@param kwargs:
@return:
'''
# 去除 origin/HEAD -> 当前指向
# 去除远端前缀
branches = PyRepo(self.path).remote().refs
# fixbug https://github.com/meolu/walle-web/issues/705
return [str(branch).strip().lstrip('origin').lstrip('/') for branch in branches if
not str(branch).strip().startswith('origin/HEAD')]
def tags(self):
'''
获取所有tag
@param branch:
@param kwargs:
@return:
'''
return [str(tag) for tag in PyRepo(self.path).tags]
def commits(self, branch):
'''
获取分支的commits
@param branch:
@param kwargs:
@return:
'''
self.checkout_2_branch(branch)
commit_log = PyGit.Git(self.path).log('--pretty=%h #@_@# %an #@_@# %s', max_count=50)
commit_list = commit_log.split('\n')
commits = []
for commit in commit_list:
if not re.search('^.+ #@_@# .+ #@_@# .*$', commit):
continue
commit_dict = commit.split(' #@_@# ')
from flask import current_app
current_app.logger.info(commit_dict)
commits.append({
'id': commit_dict[0],
'name': commit_dict[1],
'message': commit_dict[2],
})
return commits
| 24.744828 | 93 | 0.510033 | [
"Apache-2.0"
] | lgq9220/walle-web | walle/service/git/repo.py | 3,713 | Python |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
import logging
import os
import time
import isodate
from testlib import mini_poster
logger = logging.getLogger(__name__)
def utc_now():
return datetime.datetime.now(isodate.UTC)
class CrashVerifier:
def raw_crash_key(self, crash_id):
return 'v2/raw_crash/{entropy}/{date}/{crashid}'.format(
entropy=crash_id[0:3],
date='20' + crash_id[-6:],
crashid=crash_id
)
def dump_names_key(self, crash_id):
return 'v1/dump_names/{crashid}'.format(
crashid=crash_id
)
def dump_key(self, crash_id, name):
if name in (None, '', 'upload_file_minidump'):
name = 'dump'
return 'v1/{name}/{crashid}'.format(
name=name,
crashid=crash_id
)
def verify_stored_data(self, crash_id, raw_crash, dumps, s3conn):
# Verify the raw crash file made it
key = self.raw_crash_key(crash_id)
assert key in s3conn.list_objects(prefix=key)
# Verify the dump_names file made it
key = self.dump_names_key(crash_id)
assert key in s3conn.list_objects(prefix=key)
# Verify the dumps made it
for name, dump in dumps.items():
key = self.dump_key(crash_id, name)
assert key in s3conn.list_objects(prefix=key)
def verify_published_data(self, crash_id, pubsub):
# Verify crash id was published--this might pick up a bunch of stuff,
# so we just verify it's one of the things we picked up
if 'PUBSUB_EMULATOR_HOST' in os.environ:
crash_ids = [crash_id.decode('utf-8') for crash_id in pubsub.list_crashids()]
assert crash_id in crash_ids
else:
print('SKIPPING PUBLISH CHECK--NOT USING EMULATOR')
def content_to_crashid(content):
if not isinstance(content, str):
content = str(content, encoding='utf-8')
crash_id = content.strip()
crash_id = crash_id[len('CrashID=bp-'):]
return crash_id
# Gives Antenna time to save things before we check
SLEEP_TIME = 5
class TestPostCrash:
def test_regular(self, posturl, s3conn, pubsub, crash_generator):
"""Post a valid crash and verify the contents made it to S3."""
raw_crash, dumps = crash_generator.generate()
crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps)
resp = mini_poster.post_crash(posturl, crash_payload, dumps)
# Sleep to give Antenna time to save things
time.sleep(SLEEP_TIME)
crash_id = content_to_crashid(resp.content)
logger.debug('Crash ID is: %s', crash_id)
logger.debug('S3conn: %s', s3conn.get_config())
# Verify stored and published crash data
verifier = CrashVerifier()
verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn)
verifier.verify_published_data(crash_id, pubsub)
def test_compressed_crash(self, posturl, s3conn, pubsub, crash_generator):
"""Post a compressed crash and verify contents made it to S3."""
raw_crash, dumps = crash_generator.generate()
crash_payload = mini_poster.assemble_crash_payload_dict(raw_crash, dumps)
resp = mini_poster.post_crash(posturl, crash_payload, compressed=True)
# Sleep to give Antenna time to save things
time.sleep(SLEEP_TIME)
crash_id = content_to_crashid(resp.content)
logger.debug('Crash ID is: %s', crash_id)
logger.debug('S3conn: %s', s3conn.get_config())
# Verify stored and published crash data
verifier = CrashVerifier()
verifier.verify_stored_data(crash_id, raw_crash, dumps, s3conn)
verifier.verify_published_data(crash_id, pubsub)
| 33.649573 | 89 | 0.665481 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Mozilla-GitHub-Standards/ca053cb8c97310481ca4524f115cd80002b8bbd773c6bdc00eb9955dd3d48e83 | tests/systemtest/test_post_crash.py | 3,937 | Python |
import requests
import argparse
import logging
import coloredlogs
import threading
from flask import Flask, request, jsonify
from flask_swagger import swagger
from waitress import serve
import subprocess
import json
from kafka import KafkaConsumer
from threading import Thread
from threading import Timer
from datetime import timedelta
import psycopg2
import time
app = Flask(__name__)
logger = logging.getLogger("DCSRestClient")
signalling_metric_infrastructure = {'expId': 'internal', 'topic': 'signalling.metric.infrastructure'}
signalling_metric_application = {'expId': 'internal', 'topic': 'signalling.metric.application'}
signalling_kpi = {'expId': 'internal', 'topic': 'signalling.kpi'}
dcm_port = "8090"
dcm_subscribe_url = "/dcm/subscribe"
dcm_unsubscribe_url = "/dcm/unsubscribe"
dcs_dashboard_url = "http://127.0.0.1:8080/portal/dcs/dashboard"
signalling_start = False
@app.route('/', methods=['GET'])
def server_status():
"""
Get status.
---
describe: get status
responses:
200:
description: OK
"""
logger.info("GET /")
return '', 200
@app.route("/spec", methods=['GET'])
def spec():
"""
Get swagger specification.
---
describe: get swagger specification
responses:
swagger:
description: swagger specification
"""
swag = swagger(app)
swag['info']['version'] = "1.0"
swag['info']['title'] = "DCS REST API"
return jsonify(swag)
def kafka_consumer_refresh_dashboard_handler(topic, value):
logger.info("Creating Kafka Consumer for %s topic", topic)
consumer = KafkaConsumer(
topic,
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
message_received = False
while not message_received:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", topic, message)
message_received = True
time.sleep(5)
logger.info("Creating dashboard for topic: %s", topic)
r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
# This call seems that is not needed as the dashboard is generated when data is present.
#time.sleep(2)
#logger.info("Refreshing dashboard for %s topic", topic)
#subprocess.call(['/bin/bash', '/usr/bin/dcs/refresh_dashboard.sh', topic])
logger.info("Closing Kafka Consumer for %s topic", topic)
consumer.close()
def index_cleaner(topic, value):
logger.info("Time to delete the dashboard for topic %s", topic)
r = requests.delete(dcs_dashboard_url, json={'records': [ { 'value': json.loads(value) }]})
logger.info("Response: Code %s", r)
logger.info("Time to delete the Elasticsearch index for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'yes'])
def kafka_consumer_signalling_topic_handler(signalling_topic_data):
logger.info("Creating Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer = KafkaConsumer(
signalling_topic_data["topic"],
bootstrap_servers=[dcm_ip_address + ":9092"],
auto_offset_reset='earliest',
enable_auto_commit=True,
group_id=None,
value_deserializer=lambda x: json.loads(x.decode('utf-8')))
while signalling_start:
message = consumer.poll(timeout_ms=1000)
if message != {}:
logger.info("Message received in %s topic: %s", signalling_topic_data["topic"], message)
for tp, messages in message.items():
for msg in messages:
logger.info("Value: %s", msg.value)
topic = json.loads(msg.value)["topic"]
if json.loads(msg.value)["action"] == "subscribe":
logger.info("Create Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/create_logstash_pipeline.sh', topic])
# Dashboard creation is commented because it will be created when data is published in the topic.
#r = requests.post(dcs_dashboard_url, json={'records': [ { 'value': json.loads(msg.value) }]})
#logger.info("Response: Code %s", r)
# Create Kafka consumer to wait for the first message received in the topic and, then, refresh the dashboard.
thread = threading.Thread(target = kafka_consumer_refresh_dashboard_handler, args = [topic, msg.value])
thread.start()
# Finally, save topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Inserting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("INSERT INTO pipeline VALUES ( %s )", (topic,))
connection.commit()
logger.info("Topic %s inserted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
elif json.loads(msg.value)["action"] == "unsubscribe":
logger.info("Delete Logstash pipeline for topic %s", topic)
subprocess.call(['/bin/bash', '/usr/bin/dcs/delete_logstash_pipeline.sh', topic, 'no'])
# Schedule the removal of Kibana dashboard and Elasticsearch index (retention time of 14 days)
scheduled_thread = threading.Timer(timedelta(days=14).total_seconds(), index_cleaner, args = [topic, msg.value])
# This call is for testing purposes, to be commented when unused:
#scheduled_thread = threading.Timer(timedelta(seconds=30).total_seconds(), index_cleaner, args = [topic, msg.value])
scheduled_thread.start()
logger.info("Data removal for topic %s scheduled in 14 days", topic)
# Finally, delete topic in DB
try:
connection = psycopg2.connect(user = "eve", password = eve_db_password, host = "localhost", port = "5432", dbname="pipelines")
logger.info("Deleting %s topic in database", topic)
cursor = connection.cursor()
cursor.execute("DELETE FROM pipeline WHERE topic = %s", (topic,))
connection.commit()
logger.info("Topic %s deleted in database", topic)
cursor.close()
connection.close()
except (Exception, psycopg2.Error) as error:
logger.error("Error while connecting to PostgreSQL: ", error)
else:
logger.error("Action not allowed")
logger.info("Closing Kafka Consumer for %s topic", signalling_topic_data["topic"])
consumer.close()
def start_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Starting %s topic", signalling_topic_data["topic"])
logger.info("Sending POST request to %s", url_subscribe)
# Send the request to the DCM.
r = requests.post(url_subscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Create Kafka consumer.
global signalling_start
signalling_start = True
thread = threading.Thread(target = kafka_consumer_signalling_topic_handler, args = [signalling_topic_data])
thread.start()
@app.route('/portal/dcs/start_signalling/', methods=['POST'])
def start_dcs():
"""
Start signalling topics.
---
describe: start signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - POST /portal/dcs/start_signalling/")
try:
start_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
start_consuming_signalling_topic(json.dumps(signalling_metric_application))
start_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def stop_consuming_signalling_topic(signalling_topic_data):
signalling_topic_data = json.loads(signalling_topic_data)
logger.info("Stopping %s topic", signalling_topic_data["topic"])
logger.info("Sending DELETE request to %s", url_unsubscribe)
# Send the request to the DCM.
r = requests.delete(url_unsubscribe, json=signalling_topic_data)
logger.info("Response: Code %s", r)
# Delete Kafka consumer.
global signalling_start
# Put signalling_start to False, and then threads will finish their execution.
signalling_start = False
@app.route('/portal/dcs/stop_signalling/', methods=['DELETE'])
def stop_dcs():
"""
Stop signalling topics.
---
describe: stop signalling topics
responses:
201:
description: accepted request
400:
description: error processing the request
"""
logger.info("Request received - DELETE /portal/dcs/stop_signalling/")
try:
stop_consuming_signalling_topic(json.dumps(signalling_metric_infrastructure))
stop_consuming_signalling_topic(json.dumps(signalling_metric_application))
stop_consuming_signalling_topic(json.dumps(signalling_kpi))
except Exception as e:
logger.error("Error while parsing request")
logger.exception(e)
return str(e), 400
return '', 201
def checkValidPort(value):
ivalue = int(value)
# RFC 793
if ivalue < 0 or ivalue > 65535:
raise argparse.ArgumentTypeError("%s is not a valid port" % value)
return value
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--dcm_ip_address",
help='DCM IP address, default IP is localhost',
default='localhost')
parser.add_argument(
"--eve_db_password",
help='DB password for eve user')
parser.add_argument(
"--port",
type=checkValidPort,
help='The port you want to use as an endpoint, default port is 8091',
default="8091")
parser.add_argument(
"--log",
help='Sets the Log Level output, default level is "info"',
choices=[
"info",
"debug",
"error",
"warning"],
nargs='?',
default='info')
args = parser.parse_args()
numeric_level = getattr(logging, str(args.log).upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
coloredlogs.install(
fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
level=numeric_level)
logging.basicConfig(filename='/var/log/dcs_rest_client.log')
logging.getLogger("DCSRestClient").setLevel(numeric_level)
logging.getLogger("requests.packages.urllib3").setLevel(logging.ERROR)
args = parser.parse_args()
logger.info("Serving DCSRestClient on port %s", str(args.port))
global dcm_ip_address
dcm_ip_address= str(args.dcm_ip_address)
global url_subscribe
url_subscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_subscribe_url
global url_unsubscribe
url_unsubscribe = "http://" + dcm_ip_address + ":" + dcm_port + dcm_unsubscribe_url
global eve_db_password
eve_db_password= str(args.eve_db_password)
#TODO: advanced feature - connect to the database and make sure that Logstash pipelines are created for the topics saved in the DB.
serve(app, host='0.0.0.0', port=args.port)
| 42.132203 | 154 | 0.629898 | [
"Apache-2.0"
] | 5GEVE/5geve-wp4-dcs-signalling-topic-handler | dcs_rest_client.py | 12,429 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 21 19:14:52 2020
@author: prachi
"""
import pickle
import numpy as np
der='swbd_diar/exp_new/callhome/plda_oracle/der.scp'
der_pickle = 'swbd_diar/exp_new/callhome/plda_oracle/derdict'
der=open(der,'r').readlines()
DER={}
for line in der[2:-1]:
fname = line.split()[0]
val = float(line.split()[1])
DER[fname] = val
pickleobj=open(der_pickle,'wb')
pickle.dump(DER,pickleobj)
pickleobj.close()
| 20.875 | 62 | 0.658683 | [
"Apache-2.0"
] | iiscleap/self_supervised_AHC | services/gen_der_dict.py | 501 | Python |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
from six import StringIO
from pants.base.workunit import WorkUnitLabel
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.invalidation_report import InvalidationReport
from pants.reporting.plaintext_reporter import LabelFormat, PlainTextReporter, ToolOutputFormat
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report
from pants.reporting.reporter import ReporterDestination
from pants.reporting.reporting_server import ReportingServerManager
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import relative_symlink, safe_mkdir
class Reporting(Subsystem):
options_scope = 'reporting'
@classmethod
def register_options(cls, register):
super(Reporting, cls).register_options(register)
register('--invalidation-report', type=bool,
help='Write a formatted report on the invalid objects to the specified path.')
register('--reports-dir', advanced=True, metavar='<dir>',
default=os.path.join(register.bootstrap.pants_workdir, 'reports'),
help='Write reports to this dir.')
register('--template-dir', advanced=True, metavar='<dir>', default=None,
help='Find templates for rendering in this dir.')
register('--console-label-format', advanced=True, type=dict,
default=PlainTextReporter.LABEL_FORMATTING,
help='Controls the printing of workunit labels to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=LabelFormat.keys()))
register('--console-tool-output-format', advanced=True, type=dict,
default=PlainTextReporter.TOOL_OUTPUT_FORMATTING,
help='Controls the printing of workunit tool output to the console. Workunit types are '
'{workunits}. Possible formatting values are {formats}'.format(
workunits=WorkUnitLabel.keys(), formats=ToolOutputFormat.keys()))
def initialize(self, run_tracker, start_time=None):
"""Initialize with the given RunTracker.
TODO: See `RunTracker.start`.
"""
run_id = run_tracker.initialize()
run_dir = os.path.join(self.get_options().reports_dir, run_id)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
relative_symlink(run_dir, os.path.join(self.get_options().reports_dir, 'latest'))
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
errfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(
outfile=outfile, errfile=errfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=self.get_options().template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager().socket
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:{}/run/{}'.format(port, run_id))
# And start tracking the run.
run_tracker.start(report, start_time)
def _get_invalidation_report(self):
return InvalidationReport() if self.get_options().invalidation_report else None
@staticmethod
def _consume_stringio(f):
f.flush()
buffered_output = f.getvalue()
f.close()
return buffered_output
def update_reporting(self, global_options, is_quiet, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
removed_reporter = run_tracker.report.remove_reporter('capturing')
buffered_out = self._consume_stringio(removed_reporter.settings.outfile)
buffered_err = self._consume_stringio(removed_reporter.settings.errfile)
log_level = Report.log_level_from_string(global_options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = global_options.colors and (os.getenv('TERM') != 'dumb')
timing = global_options.time
cache_stats = global_options.time # TODO: Separate flag for this?
if is_quiet:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color,
timing=timing, cache_stats=cache_stats))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, errfile=sys.stderr,
color=color, indent=True, timing=timing, cache_stats=cache_stats,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
console_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if global_options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(global_options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(global_options.logdir, '{}.log'.format(run_id)), 'w')
errfile = open(os.path.join(global_options.logdir, '{}.err.log'.format(run_id)), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, errfile=errfile,
color=False, indent=True, timing=True, cache_stats=True,
label_format=self.get_options().console_label_format,
tool_output_format=self.get_options().console_tool_output_format)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_out, dest=ReporterDestination.OUT)
logfile_reporter.emit(buffered_err, dest=ReporterDestination.ERR)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
invalidation_report = self._get_invalidation_report()
if invalidation_report:
run_id = run_tracker.run_info.get_info('id')
outfile = os.path.join(self.get_options().reports_dir, run_id, 'invalidation-report.csv')
invalidation_report.set_filename(outfile)
return invalidation_report
| 49.416667 | 109 | 0.705928 | [
"Apache-2.0"
] | GoingTharn/pants | src/python/pants/reporting/reporting.py | 7,709 | Python |
import numpy as np
import pygame as pg
from numba import njit
def main():
size = np.random.randint(20,60) # size of the map
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
lx, ly, lz = (size*20, size*30, 1000)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock = np.random.uniform(2, size-3 ), np.random.uniform(2, size-3), 0, 0, 0
maph[int(enx)][int(eny)] = 0
shoot, sx, sy, sdir = 1, -1, -1, rot
res, res_o = 5, [96, 112, 160, 192, 224, 260, 300, 340, 400, 480, 540, 600, 800]
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
running = True
pg.init()
font = pg.font.SysFont("Arial", 18)
font2 = pg.font.SysFont("Impact", 48)
screen = pg.display.set_mode((800, 600))
rr, gg, bb = np.linspace(0,0.8, width*height), np.linspace(0.5,.1, width*height), np.linspace(1,0.1, width*height)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
surf = pg.transform.scale(surf, (750, 550))
screen.blit(surf, (25, 25))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,95))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,105))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,100))
screen.blit(font2.render(" Loading, please wait... ", 1, pg.Color("black"), pg.Color("grey")),(50,300))
pg.display.update()
clock = pg.time.Clock()
pg.mouse.set_visible(False)
et = 0.1
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
sstart, timer, count, autores, smooth = None, 0, -100, 1, 0
pause = 0
pg.mixer.set_num_channels(3)
ambient = pg.mixer.Sound('soundfx/HauntSilentPartner.mp3')
ambient.set_volume(0.5)
runfx = pg.mixer.Sound('soundfx/run.mp3')
shotfx = pg.mixer.Sound('soundfx/slap.mp3')
killfx = pg.mixer.Sound('soundfx/shutdown.mp3')
respawnfx = pg.mixer.Sound('soundfx/respawn.mp3')
successfx = pg.mixer.Sound('soundfx/success.mp3')
failfx = pg.mixer.Sound('soundfx/fail.mp3')
pg.mixer.Channel(0).play(ambient, -1)
pg.mixer.Channel(1).play(respawnfx)
run = 1
score = 0
ticks = pg.time.get_ticks()/100000
while running:
count += 1
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
if not pause:
pause = 1
pg.mixer.Channel(1).play(respawnfx)
endmsg = " Game paused. Current score: " + str(score)
else:
endmsg = " Thanks for playing! Total score: " + str(score)
pg.mixer.Channel(1).play(killfx)
running = False
if sstart == None and(event.type == pg.MOUSEBUTTONDOWN or event.type == pg.MOUSEBUTTONUP):
shoot = 1
if event.type == pg.KEYDOWN:
if event.key == ord('p'): # pause
if not pause:
pause = 1
endmsg = " Game paused. Current score: " + str(score)
elif (int(posx) != exitx or int(posy) != exity):
pause = 0
if pause and event.key == ord('n'): # new game
pause = 0
size = np.random.randint(20,60)
posx, posy, posz = 1.5, np.random.uniform(1, size -1), 0.5
rot, rot_v = (np.pi/4, 0)
mr, mg, mb, maph, mapr, exitx, exity, mapt, maps = maze_generator(int(posx), int(posy), size)
enx, eny, seenx, seeny, lock, run = 0, 0, 0, 0, 0, 1
shoot, sx, sy, sstart = 0, -1, -1, None
mplayer = np.zeros([size, size])
et = 0.1
enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
count = -100
if autores:
width, height, mod, inc, rr, gg, bb = adjust_resol(24)
pg.mixer.Channel(1).play(respawnfx)
if event.key == ord('t'): # toggle auto resolution
autores = not(autores)
if event.key == ord('y'): # toggle auto resolution
smooth = not(smooth)
if not autores:
if event.key == ord('q'): # manually change resolution
if res > 0 :
res = res-1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if event.key == ord('e'):
if res < len(res_o)-1 :
res = res+1
width, height, mod, inc, rr, gg, bb = adjust_resol(res_o[res])
if not pause:
rr, gg, bb = super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, mplayer, exitx, exity, mapr, mapt, maps, rr, gg, bb, enx, eny, sx, sy, size)
pixels = np.dstack((rr,gg,bb))
pixels = np.reshape(pixels, (height,width,3))
surf = pg.surfarray.make_surface((np.rot90(pixels*255)).astype('uint8'))
if shoot or smooth:
surf = pg.transform.smoothscale(surf, (800, 600))
else:
surf = pg.transform.scale(surf, (800, 600))
screen.blit(surf, (0, 0))
## fpss = int(clock.get_fps())pg.time.get_ticks()/100000
fpss = int(1000/(pg.time.get_ticks() - ticks*100000))
fps = font.render(str(fpss)+' w: '+ str(width) + ' Score: '+str(score), 1, pg.Color("coral"))
screen.blit(fps,(10,0))
if autores and count > 10: #auto adjust render resolution
if fpss < 50 and width > 100:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
if fpss > 65 and width < 728:
count = 0
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*1.1))
# player's movement
if (int(posx) == exitx and int(posy) == exity):
endmsg = " You escaped safely! "
pg.mixer.Channel(1).play(successfx)
score += 1
pause = 1
pressed_keys = pg.key.get_pressed()
et = clock.tick()/500
if et > 0.5:
et = 0.5
if shoot or sstart != None:
if sstart == None:
pg.mixer.Channel(2).play(shotfx)
if fpss < 60 and autores:
count = -50
width, height, mod, inc, rr, gg, bb = adjust_resol(int(width*0.8))
sstart = pg.time.get_ticks()
elif pg.time.get_ticks() - sstart > 500:
shoot, sx, sy, sstart = 0, -1, -1, None
if enx == 0:
if not run:
pg.mixer.Channel(1).play(killfx)
run = 1
if np.random.uniform() > 0.999:
cos, sin = np.cos(rot), np.sin(rot)
for ee in range(100):
enx = np.clip(np.random.normal(posx, 5), 1, size-2)
eny = np.clip(np.random.normal(posy, 5), 1, size-2)
dtp = (enx-posx)**2 + (eny-posy)**2
if maph[int(enx)][int(eny)] == 0 and dtp > 16 and dtp < 49:
break
if maph[int(enx)][int(eny)] != 0:
enx, eny = 0, 0
else:
seenx, seeny, lock = enx, eny, 0
screen.blit(font2.render(" Enemy Respawning! ", 1, pg.Color("red"), pg.Color("grey")),(300,50))
pg.mixer.Channel(1).play(respawnfx)
else:
dtp = (enx-posx)**2 + (eny-posy)**2
if dtp < 1:
score -= 1
endmsg = " You died! Current score: " + str(score)
pg.mixer.Channel(1).play(failfx)
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
pause = 1
surf = pg.surfarray.make_surface((np.rot90(255-pixels*255)).astype('uint8'))
surf = pg.transform.smoothscale(surf, (800, 600))
screen.blit(surf, (0, 0))
elif dtp > 300:
enx, eny, seenx, seeny, lock = 0, 0, 0, 0, 0
run = 0
ticks = pg.time.get_ticks()/100000
lx = size/2 + 1000*np.cos(ticks)
ly = size/2 + 1000*np.sin(ticks)
posx, posy, rot, rot_v, shoot = movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart)
pg.mouse.set_pos([400, 300])
mplayer = np.zeros([size, size])
enx, eny, mplayer, et, shoot, sx, sy, sdir,seenx, seeny, lock = agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock)
if run and (seenx == posx or seeny == posy):
run = False
pg.mixer.Channel(1).play(runfx)
else:
clock.tick(30)
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("red")),(45,45))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("blue")),(55,55))
screen.blit(font2.render(" FinFET's PyTracing Maze ", 1, pg.Color("white")),(50,50))
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
if (int(posx) == exitx and int(posy) == exity):
screen.blit(font2.render(" Your current score is "+str(score), 1, pg.Color("grey"), (80, 34, 80)),(50,390))
else:
screen.blit(font2.render(" Press P to continue ", 1, pg.Color("grey"), (80, 34, 80)),(50,390))
screen.blit(font2.render(" Press N for a new game ", 1, pg.Color("grey"), (45, 34, 100)),(50,460))
screen.blit(font2.render(" Press ESC to leave ", 1, pg.Color("grey"), (13, 34, 139)),(50,530))
pg.display.update()
screen.blit(font2.render(endmsg, 1, pg.Color("salmon"), (100, 34, 60)),(50,320))
pg.mixer.fadeout(1000)
pg.display.update()
print(endmsg)
pg.time.wait(2000)
pg.quit()
def maze_generator(x, y, size):
mr = np.random.uniform(0,1, (size,size))
mg = np.random.uniform(0,1, (size,size))
mb = np.random.uniform(0,1, (size,size))
mapr = np.random.choice([0, 0, 0, 0, 1], (size,size))
maps = np.random.choice([0, 0, 0, 0, 1], (size,size))
mapt = np.random.choice([0, 0, 0, 1, 2], (size,size))
maptemp = np.random.choice([0,0, 1], (size,size))
maph = np.random.uniform(0.25, 0.99, (size,size))
maph[np.where(maptemp == 0)] = 0
maph[0,:], maph[size-1,:], maph[:,0], maph[:,size-1] = (1,1,1,1)
maps[0,:], maps[size-1,:], maps[:,0], maps[:,size-1] = (0,0,0,0)
maph[x][y], mapr[x][y] = (0, 0)
count = 0
while 1:
testx, testy = (x, y)
if np.random.uniform() > 0.5:
testx = testx + np.random.choice([-1, 1])
else:
testy = testy + np.random.choice([-1, 1])
if testx > 0 and testx < size -1 and testy > 0 and testy < size -1:
if maph[testx][testy] == 0 or count > 5:
count = 0
x, y = (testx, testy)
maph[x][y], mapr[x][y] = (0, 0)
if x == size-2:
exitx, exity = (x, y)
break
else:
count = count+1
return mr, mg, mb, maph, mapr, exitx, exity, mapt, maps
def movement(pressed_keys,posx, posy, rot, rot_v, maph, et, shoot, sstart):
x, y = (posx, posy)
p_mouse = pg.mouse.get_pos()
rot, rot_v = rot - np.clip((p_mouse[0]-400)/200, -0.2, .2), rot_v -(p_mouse[1]-300)/400
rot_v = np.clip(rot_v, -1, 1)
if pressed_keys[pg.K_UP] or pressed_keys[ord('w')]:
x, y = (x + et*np.cos(rot), y + et*np.sin(rot))
if pressed_keys[pg.K_DOWN] or pressed_keys[ord('s')]:
x, y = (x - et*np.cos(rot), y - et*np.sin(rot))
if pressed_keys[pg.K_LEFT] or pressed_keys[ord('a')]:
x, y = (x - et*np.sin(rot), y + et*np.cos(rot))
if pressed_keys[pg.K_RIGHT] or pressed_keys[ord('d')]:
x, y = (x + et*np.sin(rot), y - et*np.cos(rot))
if maph[int(x)][int(y)] == 0:
posx, posy = (x, y)
if not shoot and sstart == None and pressed_keys[pg.K_SPACE]:
shoot = 1
return posx, posy, rot, rot_v, shoot
@njit(fastmath=True)
def super_fast(width, height, mod, inc, posx, posy, posz, rot, rot_v, mr, mg, mb, lx, ly, lz, maph, exitx, exity, mapr, mapt, maps, pr, pg, pb, enx, eny, sx, sy, size):
texture=[[ .95, .99, .97, .8], # brick wall
[ .97, .95, .96, .85],
[.8, .85, .8, .8],
[ .93, .8, .98, .96],
[ .99, .8, .97, .95],
[.8, .85, .8, .8]]
idx = 0
for j in range(height): #vertical loop
rot_j = rot_v + np.deg2rad(24 - j/mod)
sinzo = inc*np.sin(rot_j)
coszo = inc*np.sqrt(abs(np.cos(rot_j)))
for i in range(width): #horizontal vision loop
rot_i = rot + np.deg2rad(i/mod - 30)
x, y, z = (posx, posy, posz)
sin, cos, sinz = coszo*np.sin(rot_i), coszo*np.cos(rot_i), sinzo
modr = 1
cx, cy, c1r, c2r, c3r = 1, 1, 1, 1, 1
shot, enem, mapv = 0, 0, 0
dtp = np.random.uniform(0.002,0.01)
while 1:
if (mapv == 0 or (sinz > 0 and (z > mapv or (mapv==6 and (z>0.4 or z <0.2)) or(z > 0.57 and mapv > 1)))): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
if (z > 1 or z < 0): # check ceiling and floor
break
mapv = maph[int(x)][int(y)]
if mapv > 1 and z < 0.57:
if mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
break
if mapv == 3 or mapv == 9:
enem = 1
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
break
if z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
break
if z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
break
if mapv > 5 and z < 0.4 and z > 0.2:
if ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2 < dtp):#0.01):
shot = 1
break
if mapv > z and mapv < 2: # check walls
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
if (mapr[int(x)][int(y)]): # spherical mirror
if (modr == 1):
cx, cy = int(x), int(y)
modr = modr*0.7
if (modr < 0.2):
break
if (mapv - z <= abs(sinz) ): ## horizontal surface
sinz = -sinz
else:
nx = (x-int(x)-0.5)/0.5; ny = (y-int(y)-0.5)/0.5; nz =(z-int(z)-0.5)/0.5
dot = 2*(cos*nx + sin*ny + sinz*nz)
cos = (cos - nx*dot); sin = (sin - ny*dot); sinz = (sinz - nz*dot)
x += cos; y += sin; z += sinz
else:
break
elif mapr[int(x)][int(y)]: # check reflections
if modr == 1:
cx, cy = int(x), int(y)
modr = modr*0.7
if modr < 0.2:
break
if abs(z-maph[int(x)][int(y)]) < abs(sinz):
sinz = -sinz
elif maph[int(x+cos)][int(y-sin)] == maph[int(x)][int(y)]:
cos = -cos
else:
sin = -sin
else:
break
if z > 1: # ceiling
deltaDistZ = (lz-z)*deltaDistZ
x += deltaDistZ*rayDirX; y += deltaDistZ*rayDirY; z = lz
dtol = np.sqrt((x-lx)**2+(y-ly)**2)
if dtol < 50: #light source
shot = 1
c1, c2, c3 = 1, 1, 0.5
else:
angle = np.rad2deg(np.arctan((y-ly)/(x-lx)))/np.random.uniform(12,15)
sh = (0.8+ abs(angle - int(angle))/5)/(dtol/1000)
if sh > 1:
sh = 1
if int(angle)%2 == 1:
c1, c2, c3 = 0.8*(1-sh), 0.86*(1-sh/4), (1-sh/10)
else:
c1, c2, c3 = 0.8*(1-sh), 0.9*(1-sh/4), (1-sh/10)
if sx != -1:
c1, c2, c3 = 0.7*c1, 0.7*c2, 0.7*c3
elif z < 0: # floor
z = 0
if int(x*2)%2 == int(y*2)%2:
c1, c2, c3 = .8,.8,.8
else:
if int(x) == exitx and int(y) == exity: #exit
c1, c2, c3 = 0,0,.6
else:
c1, c2, c3 = .1,.1,.1
elif mapv < 2: # walls
c1, c2, c3 = mr[int(x)][int(y)], mg[int(x)][int(y)], mg[int(x)][int(y)]
if mapt[int(x)][int(y)]: # textured walls
if y%1 < 0.05 or y%1 > 0.95:
ww = int((x*3)%1*4)
else:
ww = int((y*3)%1*4)
if x%1 < 0.95 and x%1 > 0.05 and y%1 < 0.95 and y%1 > 0.05:
zz = int(x*5%1*6)
else:
zz = int(z*5%1*6)
text = texture[zz][ww]
c1, c2, c3 = c1*text, c2*text, c3*text
if mapv - z <= abs(sinz):
z = mapv
elif not maps[int(x)][int(y)]:
if int(x-cos) != int(x):
x = max(int(x-cos), int(x))
modr = modr*0.80
else:
y = max(int(y-sin), int(y))
modr = modr*0.9
else:
if shot:
sh = ((x-sx)**2 + (y-sy)**2 + (z-0.3)**2)/0.012
c1, c2, c3 = 1, 0.6*sh+0.2 , 0.2*sh+0.1 # shot
elif z> 0.45:
c1, c2, c3 = 0.6, 0.3, 0.3 # Head
elif z > 0.3:
c1, c2, c3 = 0.3, 0.5, 0.5 # Chest
else:
if enem:
c1, c2, c3 = 1, 0.2, 0.2 # Roller red
else:
c1, c2, c3 = 0.2, 0.2, 1 # Roller blue
if modr <= 0.7 and not shot:
c1r, c2r, c3r = mr[cx][cy], mg[cx][cy], mg[cx][cy]
if not shot and z < 1:
dtp = np.sqrt((x-posx)**2+(y-posy)**2+(z-posz)**2)
if dtp > 7:
modr = modr/np.log((dtp-6)/4+np.e)
if z < 1: # shadows
if sx != -1 and maph[int(sx)][int(sy)] > 1:
shot, c3 = 1, c3 * 0.9
dtol = np.sqrt((x-sx)**2+(y-sy)**2+(z-0.35)**2)
cos, sin, sinz = .01*(sx-x)/dtol, .01*(sy-y)/dtol, .01*(0.35-z)/dtol
else:
dtol = np.sqrt((x-lx)**2+(y-ly)**2+(z-lz)**2)
cos, sin, sinz = .01*(lx-x)/dtol, .01*(ly-y)/dtol, .01*(lz-z)/dtol
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if z < mapv and mapv < 1 and not maps[int(x)][int(y)]:
modr = modr*0.39
while modr > 0.45:
if (mapv == 0) or not shot and ((z > mapv) or (z > 0.57 and mapv > 1)): ## LoDev DDA for optimization
norm = np.sqrt(cos**2 + sin**2 + sinz**2)
rayDirX, rayDirY, rayDirZ = cos/norm + 1e-16, sin/norm + 1e-16, sinz/norm + 1e-16
mapX, mapY = int(x), int(y)
deltaDistX, deltaDistY, deltaDistZ= abs(1/rayDirX), abs(1/rayDirY), abs(1/rayDirZ)
if (rayDirX < 0):
stepX, sideDistX = -1, (x - mapX) * deltaDistX
else:
stepX, sideDistX = 1, (mapX + 1.0 - x) * deltaDistX
if (rayDirY < 0):
stepY, sideDistY = -1, (y - mapY) * deltaDistY
else:
stepY, sideDistY = 1, (mapY + 1 - y) * deltaDistY
if (rayDirZ < 0):
sideDistZ = z*deltaDistZ;
else:
sideDistZ = (1-z)*deltaDistZ
while (1):
if (sideDistX < sideDistY):
sideDistX += deltaDistX; mapX += stepX
dist = sideDistX; side = 0
if mapX < 1 or mapX > size-2:
break
else:
sideDistY += deltaDistY; mapY += stepY
dist = sideDistY; side = 1
if mapY < 1 or mapY > size-2:
break
if (maph[mapX][mapY] != 0):
break
if (side):
dist = dist - deltaDistY
else:
dist = dist - deltaDistX
if (dist > sideDistZ):
dist = sideDistZ
x = x + rayDirX*dist - cos/2
y = y + rayDirY*dist - sin/2
z = z + rayDirZ*dist - sinz/2
## end of LoDev DDA
x += cos; y += sin; z += sinz
mapv = maph[int(x)][int(y)]
if shot:
if mapv > 5 or (sinz > 0 and z > 0.35) or (sinz < 0 and z < 0.35):
break
elif z >1:
break
if z < 0.57 and mapv > 1:
if mapv == 3 or mapv == 9:
if z> 0.45 and (x-enx)**2 + (y-eny)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-enx)**2 + (y-eny)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-enx)**2 + (y-eny)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
elif mapv == 2 or mapv == 8:
if z> 0.45 and (x-posx)**2 + (y-posy)**2 + (z-0.5)**2 < 0.005 :
modr = modr*0.67
elif z < 0.45 and z > 0.3 and (x-posx)**2 + (y-posy)**2 < (z/10 - 0.02):
modr = modr*0.67
elif z < 0.3 and (x-posx)**2 + (y-posy)**2 + (z-0.15)**2 < 0.023 :
modr = modr*0.67
if mapv > 0 and z <= mapv and mapv < 2:
if maps[int(x)][int(y)]: # check spheres
if ((x-int(x)-0.5)**2 + (y-int(y)-0.5)**2 + (z-int(z)-0.5)**2 < 0.25):
modr = modr*0.9
else:
modr = modr*0.9
pr[idx] = modr*np.sqrt(c1*c1r)
pg[idx] = modr*np.sqrt(c2*c2r)
pb[idx] = modr*np.sqrt(c3*c3r)
idx += 1
return pr, pg, pb
def adjust_resol(width):
height = int(0.75*width)
mod = width/64
inc = 0.02/mod
rr = np.random.uniform(0,1,width * height)
gg = np.random.uniform(0,1,width * height)
bb = np.random.uniform(0,1,width * height)
## print('Resolution: ', width, height)
return width, height, mod, inc, rr, gg, bb
@njit(fastmath=True)
def agents(enx, eny, maph, posx, posy, rot, et, shoot, sx, sy, sdir, mplayer, seenx, seeny, lock):
if enx != 0:
if not lock or np.random.uniform(0,1) > 0.99:
dtp = np.sqrt((enx-posx)**2 + (eny-posy)**2)
cos, sin = (posx-enx)/dtp, (posy-eny)/dtp
x, y = enx, eny
for i in range(300):
x += 0.04*cos; y += 0.04*sin
if maph[int(x)][int(y)] != 0:
lock = 0
break
if(int(x) == int(posx) and int(y) == int(posy)):
seenx, seeny = posx, posy
lock = 1
break
if int(enx) == int(seenx) and int(eny) == int(seeny):
if not lock:
if shoot:
seenx, seeny = np.random.uniform(enx, posx), np.random.uniform(eny, posy)
else:
seenx, seeny = np.random.normal(enx, 2), np.random.normal(eny, 2)
else:
seenx, seeny = np.random.normal(posx, 2), np.random.normal(posy, 2)
dtp = np.sqrt((enx-seenx)**2 + (eny-seeny)**2)
cos, sin = (seenx-enx)/dtp, (seeny-eny)/dtp
x, y = enx + et*(cos+np.random.normal(0,.5)), eny + et*(sin+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
if np.random.uniform(0,1) > 0.5:
x, y = enx - et*(sin+np.random.normal(0,.5)), eny + et*(cos+np.random.normal(0,.5))
else:
x, y = enx + et*(sin+np.random.normal(0,.5)), eny - et*(cos+np.random.normal(0,.5))
if maph[int(x)][int(y)] == 0:
enx, eny = x, y
else:
seenx, seeny = enx+np.random.normal(0,3), eny+np.random.normal(0,3)
lock = 0
mplayer[int(enx)][int(eny)] = 3
mplayer[int(posx)][int(posy)] = 2
if shoot:
if sx == -1:
sdir = rot+np.random.uniform(-.1,.1)
sx, sy = posx + .5*np.cos(sdir), posy + .5*np.sin(sdir)
sx, sy = sx + 5*et*np.cos(sdir), sy + 5*et*np.sin(sdir)
if enx != 0 and (sx - enx)**2 + (sy - eny)**2 < 0.02:
shoot, sx, sy, enx, eny, seenx, seeny = 0, -1, -1, 0, 0, 0, 0
if maph[int(sx)][int(sy)] != 0:
shoot, sx, sy = 0, -1, -1
else:
mplayer[int(sx)][int(sy)] += 6
mplayer = maph + mplayer
return(enx, eny, mplayer, et, shoot, sx, sy, sdir, seenx, seeny, lock)
if __name__ == '__main__':
main()
| 47.337778 | 192 | 0.393234 | [
"MIT"
] | FinFetChannel/PytracingMaze | RayTracingMazeEnem.py | 31,953 | Python |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# Analog to digital converter example.
# Will loop forever printing ADC channel 1 raw and mV values every second.
# NOTE the ADC can only read voltages in the range of ~900mV to 1800mV!
import time
import board
import busio
import adafruit_lis3dh
# Uncomment if using SPI
# import digitalio
# Hardware I2C setup. Use the CircuitPlayground built-in accelerometer if available;
# otherwise check I2C pins.
if hasattr(board, "ACCELEROMETER_SCL"):
i2c = busio.I2C(board.ACCELEROMETER_SCL, board.ACCELEROMETER_SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
else:
i2c = busio.I2C(board.SCL, board.SDA)
lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c)
# Hardware SPI setup:
# spi = busio.SPI(board.SCK, board.MOSI, board.MISO)
# cs = digitalio.DigitalInOut(board.D5) # Set to correct CS pin!
# lis3dh = adafruit_lis3dh.LIS3DH_SPI(spi, cs)
# PyGamer I2C Setup:
# i2c = busio.I2C(board.SCL, board.SDA)
# lis3dh = adafruit_lis3dh.LIS3DH_I2C(i2c, address=0x19)
# Loop forever printing ADC readings.
while True:
# Read raw ADC value. Specify which ADC to read: 1, 2, or 3.
adc1_raw = lis3dh.read_adc_raw(1)
# Or read the ADC value in millivolts:
adc1_mV = lis3dh.read_adc_mV(1)
print("ADC 1 = {} ({} mV)".format(adc1_raw, adc1_mV))
time.sleep(1)
| 32.431818 | 85 | 0.715487 | [
"Apache-2.0"
] | IanSMoyes/SpiderPi | Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/lis3dh_adc.py | 1,427 | Python |
import datetime
from datetime import datetime, timedelta
from time import sleep
from app.search import add_to_index, delete_index, create_index, query_index
from app import db
from app.models import Post, User
from tests.BaseDbTest import BaseDbTest
class SearchTest(BaseDbTest):
index_name = "test_index"
def setUp(self):
super(SearchTest, self).setUp()
create_index(SearchTest.index_name)
def tearDown(self):
super(SearchTest, self).tearDown()
delete_index(SearchTest.index_name)
def test_index_posts(self):
# create four users
u1 = User(username='john', email='[email protected]')
u2 = User(username='susan', email='[email protected]')
db.session.add_all([u1, u2])
# create four posts
now = datetime.utcnow()
p1 = Post(body="post post1 from john", author=u1,
timestamp=now + timedelta(seconds=1))
p2 = Post(body="post post2 from susan", author=u2,
timestamp=now + timedelta(seconds=4))
p3 = Post(body="post post3 from john", author=u1,
timestamp=now + timedelta(seconds=3))
p4 = Post(body="post post4 from john", author=u1,
timestamp=now + timedelta(seconds=2))
db.session.add_all([p1, p2, p3, p4])
db.session.commit()
add_to_index(SearchTest.index_name, p1)
add_to_index(SearchTest.index_name, p2)
add_to_index(SearchTest.index_name, p3)
add_to_index(SearchTest.index_name, p4)
sleep(1)
ids, total = query_index(SearchTest.index_name, "post1", 1, 20)
self.assertEqual(1, total)
self.assertEqual(p1.id, ids[0])
ids, total = query_index(SearchTest.index_name, "post", 1, 20)
self.assertEqual(4, total) | 32.357143 | 76 | 0.640177 | [
"MIT"
] | cuongbm/microblog | tests/SearchTest.py | 1,812 | Python |
# ---------------------------------------------------------------------
# CPE check
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Python modules
import datetime
# NOC modules
from noc.services.discovery.jobs.base import DiscoveryCheck
from noc.sa.models.managedobject import ManagedObject
from noc.sa.models.profile import Profile
class CPECheck(DiscoveryCheck):
"""
CPE check
@todo: Remove stale CPE
"""
name = "cpe"
required_script = "get_cpe"
required_capabilities = ["CPE | Controller"]
def handler(self):
self.logger.info("Checking CPEs")
now = datetime.datetime.now()
result = self.object.scripts.get_cpe()
for cpe in result:
if cpe["status"] != "active":
self.logger.debug(
"[%s|%s] CPE status is '%s'. Skipping",
cpe["id"],
cpe["global_id"],
cpe["status"],
)
continue
mo = self.find_cpe(cpe["global_id"])
if mo:
changes = self.update_if_changed(
mo,
{
"controller": self.object,
"local_cpe_id": cpe["id"],
"global_cpe_id": cpe["global_id"],
"address": cpe["ip"],
"last_seen": now,
},
)
if changes:
self.logger.info(
"[%s|%s] Changed: %s",
cpe["id"],
cpe["global_id"],
", ".join("%s='%s'" % c for c in changes),
)
else:
name = cpe.get("name") or "cpe-%s" % cpe["global_id"]
if ManagedObject.objects.filter(name=name).exists():
name = "cpe-%s" % cpe["global_id"]
self.logger.info("[%s|%s] Created CPE %s", cpe["id"], cpe["global_id"], name)
mo = ManagedObject(
name=name,
pool=self.object.pool,
profile=Profile.get_by_id(Profile.get_generic_profile_id()),
object_profile=self.object.object_profile.cpe_profile
or self.object.object_profile,
administrative_domain=self.object.administrative_domain,
scheme=self.object.scheme,
segment=self.object.segment,
auth_profile=self.object.object_profile.cpe_auth_profile
or self.object.auth_profile,
address=cpe.get("ip") or "0.0.0.0",
controller=self.object,
last_seen=now,
local_cpe_id=cpe["id"],
global_cpe_id=cpe["global_id"],
)
mo.save()
@classmethod
def find_cpe(cls, global_id):
try:
return ManagedObject.objects.get(global_cpe_id=global_id)
except ManagedObject.DoesNotExist:
return None
| 36.910112 | 93 | 0.448706 | [
"BSD-3-Clause"
] | nocproject/noc | services/discovery/jobs/box/cpe.py | 3,285 | Python |
"""The tests for the Entity component helper."""
# pylint: disable=protected-access
import asyncio
from collections import OrderedDict
import logging
import unittest
from unittest.mock import patch, Mock
from datetime import timedelta
import pytest
import homeassistant.core as ha
import homeassistant.loader as loader
from homeassistant.exceptions import PlatformNotReady
from homeassistant.components import group
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import setup_component, async_setup_component
from homeassistant.helpers import discovery
import homeassistant.util.dt as dt_util
from tests.common import (
get_test_home_assistant, MockPlatform, MockModule, mock_coro,
async_fire_time_changed, MockEntity, MockConfigEntry,
mock_entity_platform, mock_integration)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
class TestHelpersEntityComponent(unittest.TestCase):
"""Test homeassistant.helpers.entity_component module."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize a test Home Assistant instance."""
self.hass = get_test_home_assistant()
def tearDown(self): # pylint: disable=invalid-name
"""Clean up the test Home Assistant instance."""
self.hass.stop()
def test_setting_up_group(self):
"""Set up the setting of a group."""
setup_component(self.hass, 'group', {'group': {}})
component = EntityComponent(_LOGGER, DOMAIN, self.hass,
group_name='everyone')
# No group after setup
assert len(self.hass.states.entity_ids()) == 0
component.add_entities([MockEntity()])
self.hass.block_till_done()
# group exists
assert len(self.hass.states.entity_ids()) == 2
assert self.hass.states.entity_ids('group') == ['group.everyone']
group = self.hass.states.get('group.everyone')
assert group.attributes.get('entity_id') == \
('test_domain.unnamed_device',)
# group extended
component.add_entities([MockEntity(name='goodbye')])
self.hass.block_till_done()
assert len(self.hass.states.entity_ids()) == 3
group = self.hass.states.get('group.everyone')
# Ordered in order of added to the group
assert group.attributes.get('entity_id') == \
('test_domain.goodbye', 'test_domain.unnamed_device')
def test_setup_loads_platforms(self):
"""Test the loading of the platforms."""
component_setup = Mock(return_value=True)
platform_setup = Mock(return_value=None)
mock_integration(self.hass,
MockModule('test_component', setup=component_setup))
# mock the dependencies
mock_integration(self.hass,
MockModule('mod2', dependencies=['test_component']))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not component_setup.called
assert not platform_setup.called
component.setup({
DOMAIN: {
'platform': 'mod2',
}
})
self.hass.block_till_done()
assert component_setup.called
assert platform_setup.called
def test_setup_recovers_when_setup_raises(self):
"""Test the setup if exceptions are happening."""
platform1_setup = Mock(side_effect=Exception('Broken'))
platform2_setup = Mock(return_value=None)
mock_entity_platform(self.hass, 'test_domain.mod1',
MockPlatform(platform1_setup))
mock_entity_platform(self.hass, 'test_domain.mod2',
MockPlatform(platform2_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
assert not platform1_setup.called
assert not platform2_setup.called
component.setup(OrderedDict([
(DOMAIN, {'platform': 'mod1'}),
("{} 2".format(DOMAIN), {'platform': 'non_exist'}),
("{} 3".format(DOMAIN), {'platform': 'mod2'}),
]))
self.hass.block_till_done()
assert platform1_setup.called
assert platform2_setup.called
@patch('homeassistant.helpers.entity_component.EntityComponent'
'._async_setup_platform', return_value=mock_coro())
@patch('homeassistant.setup.async_setup_component',
return_value=mock_coro(True))
def test_setup_does_discovery(self, mock_setup_component, mock_setup):
"""Test setup for discovery."""
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({})
discovery.load_platform(self.hass, DOMAIN, 'platform_test',
{'msg': 'discovery_info'}, {DOMAIN: {}})
self.hass.block_till_done()
assert mock_setup.called
assert ('platform_test', {}, {'msg': 'discovery_info'}) == \
mock_setup.call_args[0]
@patch('homeassistant.helpers.entity_platform.'
'async_track_time_interval')
def test_set_scan_interval_via_config(self, mock_track):
"""Test the setting of the scan interval via configuration."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([MockEntity(should_poll=True)])
mock_entity_platform(self.hass, 'test_domain.platform',
MockPlatform(platform_setup))
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'scan_interval': timedelta(seconds=30),
}
})
self.hass.block_till_done()
assert mock_track.called
assert timedelta(seconds=30) == mock_track.call_args[0][2]
def test_set_entity_namespace_via_config(self):
"""Test setting an entity namespace."""
def platform_setup(hass, config, add_entities, discovery_info=None):
"""Test the platform setup."""
add_entities([
MockEntity(name='beer'),
MockEntity(name=None),
])
platform = MockPlatform(platform_setup)
mock_entity_platform(self.hass, 'test_domain.platform', platform)
component = EntityComponent(_LOGGER, DOMAIN, self.hass)
component.setup({
DOMAIN: {
'platform': 'platform',
'entity_namespace': 'yummy'
}
})
self.hass.block_till_done()
assert sorted(self.hass.states.entity_ids()) == \
['test_domain.yummy_beer', 'test_domain.yummy_unnamed_device']
@asyncio.coroutine
def test_extract_from_service_available_device(hass):
"""Test the extraction of entity from service and device is available."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2', available=False),
MockEntity(name='test_3'),
MockEntity(name='test_4', available=False),
])
call_1 = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_1)))
call_2 = ha.ServiceCall('test', 'service', data={
'entity_id': ['test_domain.test_3', 'test_domain.test_4'],
})
assert ['test_domain.test_3'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call_2)))
@asyncio.coroutine
def test_platform_not_ready(hass):
"""Test that we retry when platform not ready."""
platform1_setup = Mock(side_effect=[PlatformNotReady, PlatformNotReady,
None])
loader.set_component(hass, 'mod1',
MockModule('mod1'))
loader.set_component(hass, 'mod1.test_domain',
MockPlatform(platform1_setup))
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'mod1'
}
})
assert len(platform1_setup.mock_calls) == 1
assert 'test_domain.mod1' not in hass.config.components
utcnow = dt_util.utcnow()
with patch('homeassistant.util.dt.utcnow', return_value=utcnow):
# Should not trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=29))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 1
# Should trigger attempt 2
async_fire_time_changed(hass, utcnow + timedelta(seconds=30))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
assert 'test_domain.mod1' not in hass.config.components
# This should not trigger attempt 3
async_fire_time_changed(hass, utcnow + timedelta(seconds=59))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 2
# Trigger attempt 3, which succeeds
async_fire_time_changed(hass, utcnow + timedelta(seconds=60))
yield from hass.async_block_till_done()
assert len(platform1_setup.mock_calls) == 3
assert 'test_domain.mod1' in hass.config.components
@asyncio.coroutine
def test_extract_from_service_returns_all_if_no_entity_id(hass):
"""Test the extraction of everything from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
(yield from component.async_extract_from_service(call)))
@asyncio.coroutine
def test_extract_from_service_filter_out_non_existing_entities(hass):
"""Test the extraction of non existing entities from service."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['test_domain.test_2', 'test_domain.non_exist']
})
assert ['test_domain.test_2'] == \
[ent.entity_id for ent
in (yield from component.async_extract_from_service(call))]
@asyncio.coroutine
def test_extract_from_service_no_group_expand(hass):
"""Test not expanding a group."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
test_group = yield from group.Group.async_create_group(
hass, 'test_group', ['light.Ceiling', 'light.Kitchen'])
yield from component.async_add_entities([test_group])
call = ha.ServiceCall('test', 'service', {
'entity_id': ['group.test_group']
})
extracted = yield from component.async_extract_from_service(
call, expand_group=False)
assert extracted == [test_group]
@asyncio.coroutine
def test_setup_dependencies_platform(hass):
"""Test we setup the dependencies of a platform.
We're explictely testing that we process dependencies even if a component
with the same name has already been loaded.
"""
loader.set_component(hass, 'test_component',
MockModule('test_component',
dependencies=['test_component2']))
loader.set_component(hass, 'test_component2',
MockModule('test_component2'))
loader.set_component(hass, 'test_component.test_domain', MockPlatform())
component = EntityComponent(_LOGGER, DOMAIN, hass)
yield from component.async_setup({
DOMAIN: {
'platform': 'test_component',
}
})
assert 'test_component' in hass.config.components
assert 'test_component2' in hass.config.components
assert 'test_domain.test_component' in hass.config.components
async def test_setup_entry(hass):
"""Test setup entry calls async_setup_entry on platform."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry,
scan_interval=timedelta(seconds=5)))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
p_hass, p_entry, p_add_entities = mock_setup_entry.mock_calls[0][1]
assert p_hass is hass
assert p_entry is entry
assert component._platforms[entry.entry_id].scan_interval == \
timedelta(seconds=5)
async def test_setup_entry_platform_not_exist(hass):
"""Test setup entry fails if platform doesnt exist."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='non_existing')
assert (await component.async_setup_entry(entry)) is False
async def test_setup_entry_fails_duplicate(hass):
"""Test we don't allow setting up a config entry twice."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
with pytest.raises(ValueError):
await component.async_setup_entry(entry)
async def test_unload_entry_resets_platform(hass):
"""Test unloading an entry removes all entities."""
mock_setup_entry = Mock(return_value=mock_coro(True))
mock_entity_platform(
hass, 'test_domain.entry_domain',
MockPlatform(async_setup_entry=mock_setup_entry))
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
assert await component.async_setup_entry(entry)
assert len(mock_setup_entry.mock_calls) == 1
add_entities = mock_setup_entry.mock_calls[0][1][2]
add_entities([MockEntity()])
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
assert await component.async_unload_entry(entry)
assert len(hass.states.async_entity_ids()) == 0
async def test_unload_entry_fails_if_never_loaded(hass):
"""."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entry = MockConfigEntry(domain='entry_domain')
with pytest.raises(ValueError):
await component.async_unload_entry(entry)
async def test_update_entity(hass):
"""Test that we can update an entity with the helper."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entity = MockEntity()
entity.async_update_ha_state = Mock(return_value=mock_coro())
await component.async_add_entities([entity])
# Called as part of async_add_entities
assert len(entity.async_update_ha_state.mock_calls) == 1
await hass.helpers.entity_component.async_update_entity(entity.entity_id)
assert len(entity.async_update_ha_state.mock_calls) == 2
assert entity.async_update_ha_state.mock_calls[-1][1][0] is True
async def test_set_service_race(hass):
"""Test race condition on setting service."""
exception = False
def async_loop_exception_handler(_, _2) -> None:
"""Handle all exception inside the core loop."""
nonlocal exception
exception = True
hass.loop.set_exception_handler(async_loop_exception_handler)
await async_setup_component(hass, 'group', {})
component = EntityComponent(_LOGGER, DOMAIN, hass, group_name='yo')
for i in range(2):
hass.async_create_task(component.async_add_entities([MockEntity()]))
await hass.async_block_till_done()
assert not exception
async def test_extract_all_omit_entity_id(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service')
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') in caplog.text
async def test_extract_all_use_match_all(hass, caplog):
"""Test extract all with None and *."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([
MockEntity(name='test_1'),
MockEntity(name='test_2'),
])
call = ha.ServiceCall('test', 'service', {'entity_id': 'all'})
assert ['test_domain.test_1', 'test_domain.test_2'] == \
sorted(ent.entity_id for ent in
await component.async_extract_from_service(call))
assert ('Not passing an entity ID to a service to target all entities is '
'deprecated') not in caplog.text
| 35.230769 | 78 | 0.673006 | [
"Apache-2.0"
] | BobbyBleacher/home-assistant | tests/helpers/test_entity_component.py | 17,404 | Python |
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class GetBillingAddressResponse:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'result': 'GetBillingAddressResult',
'status': 'str',
'error_message': 'str',
'composedOn': 'long'
}
self.result = None # GetBillingAddressResult
self.status = None # str
self.error_message = None # str
self.composedOn = None # long
| 29.974359 | 77 | 0.656116 | [
"Apache-2.0"
] | groupdocs-legacy-sdk/python | groupdocs/models/GetBillingAddressResponse.py | 1,169 | Python |
from setuptools import setup, find_packages
setup(
name='wtouch',
version='0.0.1',
description='Create a file in current folder.',
url='https://github.com/Frederick-S/wtouch',
packages=find_packages(exclude=['tests']),
entry_points={
'console_scripts': [
'wtouch = wtouch.main:main'
]
},
include_package_data=True,
test_suite="tests"
)
| 23.588235 | 51 | 0.628429 | [
"MIT"
] | Frederick-S/wtouch | setup.py | 401 | Python |
# Generated by Django 3.0.4 on 2020-03-29 13:08
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('analyzer', '0004_auto_20200328_1750'),
]
operations = [
migrations.AddField(
model_name='diseasestats',
name='country',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='analyzer.Country'),
preserve_default=False,
),
migrations.AlterUniqueTogether(
name='diseasestats',
unique_together={('disease_season', 'country', 'stats_date')},
),
]
| 27.4 | 115 | 0.626277 | [
"MIT"
] | 4elovek37/diseases_risk_analysing | analyzer/migrations/0005_auto_20200329_1308.py | 685 | Python |
import sys
import os
if len(sys.argv) > 1:
folder = sys.argv[1]
jsfile = open("./" + folder + ".js", "w+")
images = [f[:len(f)-4] for f in os.listdir("./" + folder) if f.endswith(".svg")]
varnames = []
for i in images:
varname = "svg_" + i.replace('-', '_');
varnames.append(varname)
jsfile.write("import " + varname + " from './" + folder + "/" + i + ".svg';\n")
jsfile.write("\n")
jsfile.write("const " + folder + " = [" + ", ".join(varnames) + "];\n")
jsfile.write("\n")
jsfile.write("export default " + folder + ";")
jsfile.close()
| 33.388889 | 87 | 0.515807 | [
"MIT"
] | pdyxs/WhereTheHeartIs | src/js/components/Map/svg/makeimport.py | 601 | Python |
# encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
from NiaPy.algorithms.basic import MultiStrategyDifferentialEvolution
from NiaPy.util import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
#we will run Differential Evolution for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=1000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = MultiStrategyDifferentialEvolution(NP=50, F=0.5, CR=0.9)
best = algo.run(task=task)
print('%s -> %s' % (best[0].x, best[1]))
| 35.578947 | 99 | 0.755917 | [
"MIT"
] | kozulic/NiaPy | examples/run_msde.py | 676 | Python |
from json import load
import os
import argparse
import random
from copy import deepcopy
import torchvision
import torchvision.transforms as transforms
from torch import nn
import sys
import torch
import numpy as np
import cvxopt
torch.manual_seed(0)
from fedlab.core.client.serial_trainer import SubsetSerialTrainer
from fedlab.utils.aggregator import Aggregators
from fedlab.utils.serialization import SerializationTool
from fedlab.utils.functional import evaluate
from fedlab.utils.functional import get_best_gpu, load_dict
sys.path.append("../")
from models.cnn import CNN_MNIST
def quadprog(Q, q, G, h, A, b):
"""
Input: Numpy arrays, the format follows MATLAB quadprog function: https://www.mathworks.com/help/optim/ug/quadprog.html
Output: Numpy array of the solution
"""
Q = cvxopt.matrix(Q.tolist())
q = cvxopt.matrix(q.tolist(), tc='d')
G = cvxopt.matrix(G.tolist())
h = cvxopt.matrix(h.tolist())
A = cvxopt.matrix(A.tolist())
b = cvxopt.matrix(b.tolist(), tc='d')
sol = cvxopt.solvers.qp(Q, q.T, G.T, h.T, A.T, b)
return np.array(sol['x'])
def optim_lambdas(gradients, lambda0):
epsilon = 0.5
n = len(gradients)
J_t = [grad.numpy() for grad in gradients]
J_t = np.array(J_t)
# target function
Q = 2 * np.dot(J_t, J_t.T)
q = np.array([[0] for i in range(n)])
# equality constrint
A = np.ones(n).T
b = np.array([1])
# boundary
lb = np.array([max(0, lambda0[i] - epsilon) for i in range(n)])
ub = np.array([min(1, lambda0[i] + epsilon) for i in range(n)])
G = np.zeros((2 * n, n))
for i in range(n):
G[i][i] = -1
G[n + i][i] = 1
h = np.zeros((2 * n, 1))
for i in range(n):
h[i] = -lb[i]
h[n + i] = ub[i]
res = quadprog(Q, q, G, h, A, b)
return res
# python standalone.py --sample_ratio 0.1 --batch_size 10 --epochs 5 --partition iid
# configuration
parser = argparse.ArgumentParser(description="Standalone training example")
parser.add_argument("--total_client", type=int, default=10)
parser.add_argument("--com_round", type=int, default=5)
parser.add_argument("--sample_ratio", type=float)
parser.add_argument("--batch_size", type=int)
parser.add_argument("--lr", type=float)
parser.add_argument("--epochs", type=int)
args = parser.parse_args()
# get raw dataset
root = "../datasets/mnist/"
trainset = torchvision.datasets.MNIST(root=root,
train=True,
download=True,
transform=transforms.ToTensor())
testset = torchvision.datasets.MNIST(root=root,
train=False,
download=True,
transform=transforms.ToTensor())
test_loader = torch.utils.data.DataLoader(testset,
batch_size=len(testset),
drop_last=False,
shuffle=False)
# setup
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
gpu = get_best_gpu()
model = CNN_MNIST().cuda(gpu)
# FL settings
num_per_round = int(args.total_client * args.sample_ratio)
aggregator = Aggregators.fedavg_aggregate
total_client_num = args.total_client # client总数
data_indices = load_dict("./mnist_noniid.pkl")
# fedlab setup
local_model = deepcopy(model)
trainer = SubsetSerialTrainer(model=local_model,
dataset=trainset,
data_slices=data_indices,
aggregator=aggregator,
args={
"batch_size": args.batch_size,
"epochs": args.epochs,
"lr": args.lr
})
dynamic_lambdas = np.ones(num_per_round) * 1.0 / num_per_round
# train procedure
to_select = [i for i in range(total_client_num)]
for round in range(args.com_round):
model_parameters = SerializationTool.serialize_model(model)
selection = random.sample(to_select, num_per_round)
parameters = trainer.train(model_parameters=model_parameters,
id_list=selection,
aggregate=False)
gradients = [model_parameters - model for model in parameters]
for i, grad in enumerate(gradients):
gradients[i] = grad / grad.norm()
print(len(gradients))
print(gradients[0].shape)
# calculate lamda
lambda0 = [1.0 / num_per_round for _ in range(num_per_round)]
dynamic_lambdas = torch.Tensor(optim_lambdas(gradients, lambda0)).view(-1)
dt = Aggregators.fedavg_aggregate(gradients, dynamic_lambdas)
serialized_parameters = model_parameters - dt * args.lr
SerializationTool.deserialize_model(model, serialized_parameters)
criterion = nn.CrossEntropyLoss()
loss, acc = evaluate(model, criterion, test_loader)
print("loss: {:.4f}, acc: {:.2f}".format(loss, acc))
| 34.689655 | 123 | 0.615706 | [
"Apache-2.0"
] | KarhouTam/FedLab-benchmarks | fedlab_benchmarks/fedmgda+/standalone.py | 5,034 | Python |
from shoppinglist.models import Ingredient
from rest_framework import serializers
class IngredientSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Ingredient
fields = ('account', 'member', 'ref_date', 'ref_meal',
'ingredient', 'created', 'ingredient_there')
| 35.555556 | 67 | 0.709375 | [
"MIT"
] | christiankuhl/foodplanner | shoppinglist/serializers.py | 320 | Python |
""" NNAPI Systrace parser - tracking of call tree based on trace lines
See contract-between-code-and-parser.txt for the
specification (cases in the specification are referred to with SPEC).
"""
import re
import sys
from parser.naming import (subphases, translate_hidl_mark_to_nn_and_tag,
get_function_name_from_mark, make_tag)
from parser.naming import LAYER_CPU, LAYER_DRIVER, LAYER_RUNTIME, LAYER_APPLICATION
from parser.naming import MARKER_SWITCH, MARKER_SUBTRACT
from parser.naming import PHASE_EXECUTION, PHASE_OVERALL, PHASE_WARMUP, PHASE_BENCHMARK
from parser.tree import SingleThreadCallTree
class AppPhase(object):
""" Class to track the overall phase of the program. Used to split up warmup and benchmark.
Needs to be separate from the call trees to propagate the difference to driver.
"""
def __init__(self):
self.reset()
def current(self):
if self.stack:
return self.stack[-1]
else:
return PHASE_OVERALL
def push(self, phase):
self.stack.append(phase)
def pop(self):
self.stack.pop()
def reset(self):
self.stack = []
class Tracker(object):
""" Class to track the stack trace of a single thread and feed it into a SingleThreadCallTree
as well as keeping track of entry and exit times for functions.
Exposes statistics for a single thread, transforming the call tree as needed.
All statistics are in milliseconds.
Layer Runtime, Phase Execution (LR_PE) is special-cased, see comment in get_stat().
Subphases of Execution are aggregated towards the overall Execution phase as needed.
"""
def __init__(self, tgid, is_driver, app_phase):
self.tgid = tgid
self.is_driver = is_driver
self.app_phase = app_phase
# Match the trace string
# "[NN_LA_PP]funcE1" in "B|<thread1>|[NN_LA_PP]funcE1"
# "[NN_LC_PCO]funcC1" in "B|<thread1>|[SW][NN_LC_PCO]funcC1"
self.matcher = re.compile(r"B\|\d+\|.*\[([^]]+)\]\[?([^]])\]?")
self.reset()
def reset(self):
self.stats = {}
self.items = {}
self.mytree = SingleThreadCallTree()
self.begins_and_ends_ms = {}
self.la_pe_counts = {}
self.debugstring = "\n"
def handle_mark(self, time, mark):
""" Handle a single trace item (scoped entry and exit).
Translates:
- Automatically generated HIDL traces into NNTRACE layers and phases
- SPEC:Switch phase during function into dummy items
- SPEC:Subtracting time when nesting is violated into "subtract"
markers
- CPU/Driver layer distinction based on whether the process is the
driver or an application
This function is called multiple times for a single application run,
afterwards the statistics can be calculated.
"""
if mark[0] == "B":
switch = False
subtract = False
# Workarounds for wrong tracepoints in early versions
# TODO(mikie): remove later
if ("ANeuralNetworksEvent_free" in mark) or ("ANeuralNetworksExecution_free" in mark):
mark = mark.replace("_PT", "_PE")
# Workarounds for trace marker for getSupportedExtensions (fixed in ag/9484333)
if ("getSupportedExtensions" in mark):
mark = mark.replace("_PC", "_PI")
elif ("[SW][NN_LA_PR]executeWithCompilation" in mark):
mark = mark.replace("[SW]", "")
if MARKER_SWITCH in mark:
switch = True
if MARKER_SUBTRACT in mark:
subtract = True
if switch:
# End previous item
self.handle_mark(time, "E")
# Push a placeholder item that will get popped by the 'real' end of the
# previous item.
self.mytree.push_dummy(time)
m = self.matcher.search(mark)
if m is None:
tag = translate_hidl_mark_to_nn_and_tag(mark)
if tag is None:
raise Exception("Couldn't parse mark " + mark)
else:
tag = m.group(1)
[_, layer, phase] = tag.split("_")
if layer == LAYER_APPLICATION and phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.push(phase)
if not self.is_driver:
layer = layer.replace(LAYER_DRIVER, LAYER_CPU)
else:
layer = layer.replace(LAYER_CPU, LAYER_DRIVER)
if layer == LAYER_APPLICATION and phase == PHASE_EXECUTION:
self.la_pe_counts[self.app_phase.current()] = (
self.la_pe_counts.get(self.app_phase.current(), 0) + 1)
self.mytree.push(time, mark, layer, phase, self.app_phase.current(), subtract)
elif mark[0] == "E":
try:
node = self.mytree.pop(time)
if node.is_dummy(): # Placeholder item
pass
else:
if node.layer == LAYER_APPLICATION and node.phase in [PHASE_WARMUP, PHASE_BENCHMARK]:
self.app_phase.pop()
function = node.app_phase + "::" + get_function_name_from_mark(node.mark)
self.begins_and_ends_ms[function] = (self.begins_and_ends_ms.get(function, []) +
[[float(node.start_time_s) * 1000.0,
float(node.end_time_s) * 1000.0]])
except IndexError as e:
raise Exception("Unable to process a trace termination mark, please check that the collected trace are including full application lifecycles.\n") from e
def is_complete(self):
""" Checks if we've seen all end tracepoints for the begin tracepoints.
"""
return self.mytree.current.is_root()
def calculate_stats(self):
assert self.is_complete()
self.mytree.remove_ignored()
self.mytree.remove_dummies()
self.mytree.copy_subtracted_init_and_wrong_la()
self.mytree.add_missing_la_nodes()
# self.mytree.print()
self.mytree.validate_nesting()
def recurse(node, prev_layer, prev_phase, indent, in_pe_layers):
[begun, mark, layer, phase] = [
node.start_time_s, node.mark, node.layer, node.phase()]
time = node.end_time_s
tag = None
elapsed0 = "DETAIL"
elapsed1 = node.elapsed_less_subtracted_ms()
if elapsed1 is None:
raise Exception("Elapsed for {} returned None".format(node.to_str()))
if not node.is_added_detail() and not node.subtract:
tag = node.app_phase + "_" + layer + "_" + phase
elapsed0 = elapsed1
self.stats[tag] = self.stats.get(tag, 0.0) + elapsed0
self.items[tag] = self.items.get(tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + tag]
if phase in subphases[PHASE_EXECUTION]:
if not in_pe_layers.get(layer):
pe_tag = node.app_phase + "_" + make_tag(layer, PHASE_EXECUTION)
self.stats[pe_tag] = self.stats.get(pe_tag, 0.0) + elapsed0
self.items[pe_tag] = self.items.get(pe_tag, []) + [
mark + " " + str(elapsed0) + " " + str(elapsed1) + " " + pe_tag]
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers.get(layer, 0) + 1
for c in node.children:
recurse(c, layer or prev_layer, phase or prev_phase,
indent + " ", in_pe_layers)
if phase == PHASE_EXECUTION:
in_pe_layers[layer] = in_pe_layers[layer] - 1
return
for top in self.mytree.root.children:
recurse(top, None, None, "", {})
self.debugstring = self.mytree.to_str()
# We need to special case the driver execution time because:
# - The existing drivers don't have tracing, so we rely on HIDL traces
# - Best we can do is to take the start of the HIDL server side call as
# the starting point (which includes a bit of overhead, but not much) and
# the start of the callback as the end point (which should be pretty
# accurate)
# Note that the begin and end may be on different threads, hence the
# calculation needs to happen in aggregation rather than here.
def get_ld_pe_begins(self, app_phase):
return self.get_begins(app_phase, "HIDL::IPreparedModel::execute::server")
def get_ld_pe_ends(self, app_phase):
return self.get_begins(app_phase, "HIDL::IExecutionCallback::notify::client")
def get_stat(self, tag, app_phase, special_case_pe=True):
if not self.stats and not self.mytree.is_empty():
self.calculate_stats()
if tag == make_tag(LAYER_RUNTIME, PHASE_EXECUTION) and special_case_pe:
# Execution is exposed as an asynchronous event from the runtime, we
# calculate the runtime time as starting from when the async operation is
# kicked off until wait finishes + synchronous setup and teardown calls.
# This has two limitations:
# - multithreaded usage will not work correctly
# - should the application spend so much time before calling wait that
# execution has already finished, the time would get allocated to the
# runtime incorrectly
async_starts = self.get_begins(app_phase, "ANeuralNetworksExecution_startCompute")
async_ends = self.get_ends(app_phase, "ANeuralNetworksEvent_wait")
elapsed = 0.0
for i in range(0, len(async_starts)):
elapsed = elapsed + (async_ends[i] - async_starts[i])
for sync in ["ANeuralNetworksExecution_create", "ANeuralNetworksExecution_free",
"ANeuralNetworksEvent_create", "ANeuralNetworksEvent_free",
"ANeuralNetworksExecution_setInput", "ANeuralNetworksExecution_setOutput",
"ANeuralNetworksExecution_setInputFromMemory",
"ANeuralNetworksExecution_setOutputFromMemory"]:
sync_starts = self.get_begins(app_phase, sync)
sync_ends = self.get_ends(app_phase, sync)
for i in range(0, len(sync_starts)):
elapsed = elapsed + (sync_ends[i] - sync_starts[i])
return elapsed
return self.stats.get(app_phase + "_" + tag, 0.0)
def get_execution_count(self, app_phase):
# ANeuralNetworksExecution_create is reliable and comes from the runtime,
# but not available pre-P
count = len(self.get_begins(app_phase, "ANeuralNetworksExecution_create"))
if count > 0:
return count
# Application may have added tracepoints
return self.la_pe_counts.get(app_phase, 0)
def get_begins(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[0] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def get_ends(self, app_phase, function):
name = app_phase + "::" + function
return [begin_and_end[1] for begin_and_end in self.begins_and_ends_ms.get(name, [])]
def print_stats(self):
if not self.stats:
self.calculate_stats()
print(self.tgid, "Driver" if self.is_driver else "App")
for tag in self.stats:
print(tag, self.stats[tag])
if self.items.get(tag):
for item in self.items[tag]:
print(" ", item)
else:
print(" ", "calculated only")
def print(self):
self.mytree.print()
| 42.11583 | 160 | 0.660799 | [
"Apache-2.0"
] | PotatoProject-next/ackages_modules_NeuralNetworks | tools/systrace_parser/parser/tracker.py | 10,908 | Python |
from __future__ import print_function
import os
import percy
from percy import errors
from percy import utils
__all__ = ['Runner']
class Runner(object):
def __init__(self, loader=None, config=None, client=None):
self.loader = loader
self.config = config or percy.Config()
self.client = client or percy.Client(config=self.config)
self._current_build = None
self._is_enabled = os.getenv('PERCY_ENABLE', '1') == '1'
# Sanity check environment and auth setup. If in CI and Percy is disabled, print an error.
if self._is_enabled:
try:
self.client.config.access_token
except errors.AuthError:
if self.client.environment.current_ci:
utils.print_error('[percy] Warning: Percy is disabled, no PERCY_TOKEN set.')
self._is_enabled = False
def initialize_build(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
build_resources = []
build_resources = self.loader.build_resources if self.loader else []
sha_to_build_resource = {}
for build_resource in build_resources:
sha_to_build_resource[build_resource.sha] = build_resource
self._current_build = self.client.create_build(resources=build_resources, **kwargs)
try:
missing_resources = self._current_build['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
for missing_resource in missing_resources:
sha = missing_resource['id']
resource = sha_to_build_resource.get(sha)
# This resource should always exist, but if by chance it doesn't we make it safe here.
# A nicer error will be raised by the finalize API when the resource is still missing.
if resource:
print('Uploading new build resource: {}'.format(resource.resource_url))
# Optimization: we don't hold all build resources in memory. Instead we store a
# "local_path" variable that be used to read the file again if it is needed.
if resource.local_path:
with open(resource.local_path, 'rb') as f:
content = f.read()
else:
content = resource.content
self.client.upload_resource(self._current_build['data']['id'], content)
except KeyError:
print(self._current_build)
def snapshot(self, **kwargs):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError('Cannot call snapshot before build is initialized')
root_resource = self.loader.snapshot_resources[0]
build_id = self._current_build['data']['id']
snapshot_data = self.client.create_snapshot(build_id, [root_resource], **kwargs)
missing_resources = snapshot_data['data']['relationships']['missing-resources']
missing_resources = missing_resources.get('data', [])
if missing_resources:
# There can only be one missing resource in this case, the root_resource.
self.client.upload_resource(build_id, root_resource.content)
self.client.finalize_snapshot(snapshot_data['data']['id'])
def finalize_build(self):
# Silently pass if Percy is disabled.
if not self._is_enabled:
return
if not self._current_build:
raise errors.UninitializedBuildError(
'Cannot finalize_build before build is initialized.')
self.client.finalize_build(self._current_build['data']['id'])
self._current_build = None
| 41.094737 | 102 | 0.627561 | [
"MIT"
] | getsentry/python-percy-client | percy/runner.py | 3,904 | Python |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from armi.materials.water import SaturatedWater, SaturatedSteam
"""
unit tests for water materials
"""
class Test_Water(unittest.TestCase):
"""
unit tests for water materials
"""
def test_water_at_freezing(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 0C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = -11.529101
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500.5e3
ref_phi = -0.04
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9.154e3
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(
ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(
ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_boiling(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 100C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 0.101325e6
ref_dp_dT = 3.616e3
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417.65e3
ref_saturated_water_enthalpy = 417.05e3
ref_saturated_steam_enthalpy = 2675.7e3
ref_phi = 1.303e3
ref_saturated_water_entropy = 1.307e3
ref_saturated_steam_entropy = 7.355e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_critcalPoint(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 647.096K
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22.064e6
ref_dp_dT = 268e3
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548e3
ref_saturated_water_enthalpy = 2086.6e3
ref_saturated_steam_enthalpy = 2086.6e3
ref_phi = 3.578e3
ref_saturated_water_entropy = 4.410e3
ref_saturated_steam_entropy = 4.410e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
if __name__ == "__main__":
unittest.main()
| 36.114583 | 88 | 0.675224 | [
"Apache-2.0"
] | youngmit/armi | armi/materials/tests/test_water.py | 6,934 | Python |
# Fix the code so that there's no error!
def count_evens(start, end):
"""Returns the number of even numbers between start and end."""
counter = start
num_evens = 0
while counter <= end:
if counter % 2 == 0:
num_evens += 1
counter += 1
return num_evens
def count_multiples(start, end, divisor):
"""Returns the number of multiples of divisor between start and end."""
counter = start
num_multiples = 0
while counter <= end:
if counter % divisor == 0:
num_multiples += 1
counter += 1
return num_multiples
count_both = count_evens(10, 20) + count_multiples(10, 20, 3)
| 25.625 | 73 | 0.666667 | [
"MIT"
] | annezola/gdi-python | exercise_brokencounts_solution.py | 615 | Python |
"""My nifty plot-level RGB algorithm
"""
# Importing modules. Please add any additional import statements below
import numpy as np
# Definitions
# Please replace these definitions' values with the correct ones
VERSION = '1.0'
# Information on the creator of this algorithm
ALGORITHM_AUTHOR = 'Unknown'
ALGORITHM_AUTHOR_EMAIL = ''
ALGORITHM_CONTRIBUTORS = [""]
ALGORITHM_NAME = 'my nifty one'
ALGORITHM_DESCRIPTION = 'This algorithm calculates the niftyness of RGB plot-level images'
# Citation information for publication (more information in HOW_TO.md)
CITATION_AUTHOR = 'unknown'
CITATION_TITLE = ''
CITATION_YEAR = ''
# The name of one or more variables returned by the algorithm, separated by commas (more information in HOW_TO.md)
# If only one name is specified, no comma's are used.
# Note that variable names cannot have comma's in them: use a different separator instead. Also,
# all white space is kept intact; don't add any extra whitespace since it may cause name comparisons
# to fail.
# !! Replace the content of this string with your variable names
VARIABLE_NAMES = 'size of image channels'
# Variable units matching the order of VARIABLE_NAMES, also comma-separated.
# For each variable name in VARIABLE_NAMES add the unit of measurement the value represents.
# !! Replace the content of this string with your variables' unit
VARIABLE_UNITS = 'pixels'
# Variable labels matching the order of VARIABLE_NAMES, also comma-separated.
# This is an optional definition and can be left empty.
VARIABLE_LABELS = ''
# Optional override for the generation of a BETYdb compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_BETYDB_CSV = True
# Optional override for the generation of a TERRA REF Geostreams compatible csv file
# Set to False to suppress the creation of a compatible file
WRITE_GEOSTREAMS_CSV = True
# Entry point for plot-level RBG algorithm
def calculate(pxarray: np.ndarray):
"""Calculates one or more values from plot-level RGB data
Arguments:
pxarray: Array of RGB data for a single plot
Return:
Returns one or more calculated values
"""
# ALGORITHM: replace the following lines with your algorithm
channel_size = pxarray[:, :, 1].size
# RETURN: replace the following return with your calculated values. Be sure to order them as defined in VARIABLE_NAMES above
return channel_size
| 38.095238 | 128 | 0.768333 | [
"BSD-3-Clause"
] | AgPipeline/plot-base-rgb | .github/workflows/algorithm_rgb.py | 2,400 | Python |
"""Implementation of Rule L042."""
from sqlfluff.core.rules.base import BaseCrawler, LintResult
from sqlfluff.core.rules.doc_decorators import document_configuration
@document_configuration
class Rule_L042(BaseCrawler):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
By default this rule is configured to allow subqueries within `FROM`
clauses but not within `JOIN` clauses. If you prefer a stricter lint
then this is configurable.
NB: Some dialects don't allow CTEs, and for those dialects
this rule makes no sense and should be disabled.
| **Anti-pattern**
.. code-block:: sql
select
a.x, a.y, b.z
from a
join (
select x, z from b
) using(x)
| **Best practice**
.. code-block:: sql
with c as (
select x, z from b
)
select
a.x, a.y, c.z
from a
join c using(x)
"""
config_keywords = ["forbid_subquery_in"]
_config_mapping = {
"join": ["join_clause"],
"from": ["from_clause"],
"both": ["join_clause", "from_clause"],
}
def _eval(self, segment, **kwargs):
"""Join/From clauses should not contain subqueries. Use CTEs instead.
NB: No fix for this routine because it would be very complex to
implement reliably.
"""
parent_types = self._config_mapping[self.forbid_subquery_in]
for parent_type in parent_types:
if segment.is_type(parent_type):
# Get the referenced table segment
table_expression = segment.get_child("table_expression")
if not table_expression:
return None # There isn't one. We're done.
# Get the main bit
table_expression = table_expression.get_child("main_table_expression")
if not table_expression:
return None # There isn't one. We're done.
# If any of the following are found, raise an issue.
# If not, we're fine.
problem_children = [
"with_compound_statement",
"set_expression",
"select_statement",
]
for seg_type in problem_children:
seg = table_expression.get_child(seg_type)
if seg:
return LintResult(
anchor=seg,
description=f"{parent_type} clauses should not contain subqueries. Use CTEs instead",
)
| 31.559524 | 113 | 0.562429 | [
"MIT"
] | Jophish/sqlfluff | src/sqlfluff/core/rules/std/L042.py | 2,651 | Python |
#!/usr/bin/env python
"""
More complex demonstration of what's possible with the progress bar.
"""
import threading
import time
from quo.text import Text
from quo.progress import ProgressBar
def main():
with ProgressBar(
title=Text("<b>Example of many parallel tasks.</b>"),
bottom_toolbar=Text("<b>[Control-L]</b> clear <b>[Control-C]</b> abort"),
) as pb:
def run_task(label, total, sleep_time):
for i in pb(range(total), label=label):
time.sleep(sleep_time)
threads = [
threading.Thread(target=run_task, args=("First task", 50, 0.1)),
threading.Thread(target=run_task, args=("Second task", 100, 0.1)),
threading.Thread(target=run_task, args=("Third task", 8, 3)),
threading.Thread(target=run_task, args=("Fourth task", 200, 0.1)),
threading.Thread(target=run_task, args=("Fifth task", 40, 0.2)),
threading.Thread(target=run_task, args=("Sixth task", 220, 0.1)),
threading.Thread(target=run_task, args=("Seventh task", 85, 0.05)),
threading.Thread(target=run_task, args=("Eight task", 200, 0.05)),
]
for t in threads:
t.daemon = True
t.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in threads:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
| 34.173913 | 82 | 0.592875 | [
"MIT"
] | scalabli/quo | examples/progress/many-parallel-tasks.py | 1,572 | Python |
import math
import lavalink
import ksoftapi
import discord
from discord.ext import commands
class Music(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.kclient = bot.kclient
if not hasattr(bot, 'lavalink'):
bot.lavalink = lavalink.Client(bot.user.id)
bot.lavalink.add_node('localhost', 1616, 'proto', 'in', 'default-node') # Host, Port, Password, Region, Name
bot.add_listener(bot.lavalink.voice_update_handler, 'on_socket_response')
lavalink.add_event_hook(self.track_hook)
def cog_unload(self):
""" Cog unload handler. This removes any event hooks that were registered. """
self.bot.lavalink._event_hooks.clear()
async def cog_command_error(self, ctx, error):
if isinstance(error, commands.CommandInvokeError):
await ctx.send(error.original)
async def track_hook(self, event):
if isinstance(event, lavalink.events.QueueEndEvent):
guild_id = int(event.player.guild_id)
await self.connect_to(guild_id, None)
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
async def cog_before_invoke(self, ctx):
""" Command before-invoke handler. """
guild_check = ctx.guild is not None
if guild_check:
await self.ensure_voice(ctx)
# Ensure that the bot and command author share a mutual voicechannel.
return guild_check
async def ensure_voice(self, ctx):
""" This check ensures that the bot and command author are in the same voicechannel. """
player = self.bot.lavalink.player_manager.create(ctx.guild.id, endpoint=str(ctx.guild.region))
should_connect = ctx.command.name in ('play',)
if not ctx.author.voice or not ctx.author.voice.channel:
raise commands.CommandInvokeError('Join a voice channel first :loud_sound:')
if not player.is_connected:
if not should_connect:
raise commands.CommandInvokeError('Not connected :mute:')
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect or not permissions.speak: # Check user limit too?
raise commands.CommandInvokeError('I need the `CONNECT` and `SPEAK` permissions. :disappointed_relieved:')
player.store('channel', ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
else:
if int(player.channel_id) != ctx.author.voice.channel.id:
raise commands.CommandInvokeError('You need to be in my voice channel :loud_sound:')
async def connect_to(self, guild_id: int, channel_id: str):
""" Connects to the given voicechannel ID. A channel_id of `None` means disconnect. """
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
@commands.command(name='play', aliases=['p', 'sing'])
async def play(self, ctx, *, query):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
query = query.strip('<>')
if not query.startswith('http'):
query = f'ytsearch:{query}'
results = await player.node.get_tracks(query)
if not results or not results['tracks']:
return await ctx.send('Song not found :x: Please try again :mag_right:')
em = discord.Embed(colour=discord.Colour(0x59FFC8))
if results['loadType'] == 'PLAYLIST_LOADED':
tracks = results['tracks']
for track in tracks:
# Add all of the tracks from the playlist to the queue.
player.add(requester=ctx.author.id, track=track)
em.title = 'Playlist Enqueued!'
em.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} tracks'
else:
track = results['tracks'][0]
em.title = 'Track Enqueued'
em.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{track['info']['identifier']}/hqdefault.jpg")
em.add_field(name='Channel', value=track['info']['author'])
if track['info']['isStream']:
duration = 'Live'
else:
duration = lavalink.format_time(track['info']['length']).lstrip('00:')
em.add_field(name='Duration', value=duration)
track = lavalink.models.AudioTrack(track, ctx.author.id, recommended=True)
player.add(requester=ctx.author.id, track=track)
msg = await ctx.send(embed=em)
if not player.is_playing:
await player.play()
await player.reset_equalizer()
await msg.delete(delay=1)
await self.now(ctx)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
@commands.command(name='seek')
async def seek(self, ctx, seconds=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if not seconds:
return await ctx.send('You need to specify the amount of seconds to seek :fast_forward:')
try:
track_time = player.position + int(seconds) * 1000
await player.seek(track_time)
except ValueError:
return await ctx.send('Specify valid amount of seconds :clock3:')
await ctx.send(f'Moved track to **{lavalink.format_time(track_time)}**')
@commands.command(name='skip', aliases=['forceskip', 'fs', 'next'])
async def skip(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
await ctx.send('⏭ | Skipped.')
await player.skip()
@commands.command(name='now', aliases=['current', 'currentsong', 'playing', 'np'])
async def now(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
song = 'Nothing'
if player.current:
if player.current.stream:
dur = 'LIVE'
pos = ''
count = total = 1
else:
count = player.position
pos = lavalink.format_time(count)
total = player.current.duration
dur = lavalink.format_time(total)
if pos == dur: # When called immediatly after enqueue
count = 0
pos = '00:00:00'
dur = dur.lstrip('00:')
pos = pos[-len(dur):]
bar_len = 30 # bar length
filled_len = int(bar_len * count // float(total))
bar = '═' * filled_len + '◈' + '─' * (bar_len - filled_len)
song = f'[{player.current.title}]({player.current.uri})\n`{pos} {bar} {dur}`'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
requester = ctx.guild.get_member(player.current.requester)
em.set_footer(text=f"Requested by: {requester}", icon_url=requester.avatar_url)
await ctx.send(embed=em)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=player.current.title))
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='save', aliases=['star'])
async def savetodm(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if player.current:
if player.current.stream:
dur = 'Live'
else:
dur = lavalink.format_time(player.current.duration).lstrip('00:')
song = f'[{player.current.title}]({player.current.uri})'
em = discord.Embed(colour=discord.Colour(0x59FFC8), description=song)
em.set_author(name="Now Playing 🎵", icon_url="https://i.ibb.co/DGsmTvh/star.gif")
em.set_thumbnail(url=f"http://i.ytimg.com/vi/{player.current.identifier}/hqdefault.jpg")
em.add_field(name='Channel', value=player.current.author)
em.add_field(name='Duration', value=dur)
user = ctx.author
await user.send(embed=em)
await ctx.send(f"Current song has been sent to you {ctx.author.mention} :floppy_disk:")
else:
await ctx.send('Not playing anything :mute:')
@commands.command(name='queue', aliases=['q', 'playlist'])
async def queue(self, ctx, page: int=1):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Queue empty! Why not queue something? :cd:')
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ''
for i, track in enumerate(player.queue[start:end], start=start):
queue_list += f'`{i + 1}.` [**{track.title}**]({track.uri})\n'
embed = discord.Embed(colour=ctx.guild.me.top_role.colour,
description=f'**{len(player.queue)} tracks**\n\n{queue_list}')
embed.set_footer(text=f'Viewing page {page}/{pages}')
await ctx.send(embed=embed)
@commands.command(name='pause', aliases=['resume'])
async def pause(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
if player.paused:
await player.set_pause(False)
await ctx.message.add_reaction('▶')
else:
await player.set_pause(True)
await ctx.message.add_reaction('⏸')
@commands.command(name='volume', aliases=['vol'])
async def volume(self, ctx, volume: int=None):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not volume:
return await ctx.send(f'🔈 | {player.volume}%')
await player.set_volume(volume)
await ctx.send(f'🔈 | Set to {player.volume}%')
@commands.command(name='shuffle')
async def shuffle(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.shuffle = not player.shuffle
await ctx.send('🔀 | Shuffle ' + ('enabled' if player.shuffle else 'disabled'))
@commands.command(name='repeat')
async def repeat(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('Not playing anything :mute:')
player.repeat = not player.repeat
await ctx.send('🔁 | Repeat ' + ('enabled' if player.repeat else 'disabled'))
@commands.command(name='remove', aliases=['dequeue', 'pop'])
async def remove(self, ctx, index: int):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.queue:
return await ctx.send('Nothing queued :cd:')
if index > len(player.queue) or index < 1:
return await ctx.send('Index has to be >=1 and <=queue size')
index = index - 1
removed = player.queue.pop(index)
await ctx.send('Removed **' + removed.title + '** from the queue.')
@commands.command(name='disconnect', aliases=['dis', 'stop', 'leave'])
async def disconnect(self, ctx):
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not ctx.author.voice or (player.is_connected and ctx.author.voice.channel.id != int(player.channel_id)):
return await ctx.send('You\'re not in my voice channel :loud_sound:')
if not player.is_connected:
return await ctx.send('Not connected :mute:')
player.queue.clear()
# Stop the current track so Lavalink consumes less resources.
await player.stop()
# Disconnect from the voice channel.
await self.connect_to(ctx.guild.id, None)
await ctx.send('Disconnected :mute:')
await self.bot.change_presence(status=discord.Status.idle, activity=discord.Game(name="Nothing"))
@commands.command(name='lyrics', aliases=['ly'])
async def get_lyrics(self, ctx, *, query: str=""):
"""Get lyrics of current song"""
if not query:
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if not player.is_playing:
return await ctx.send('I\'m not currently playing anything :warning:')
query = player.current.title
try:
async with ctx.typing():
results = await self.kclient.music.lyrics(query, limit=1)
except ksoftapi.NoResults:
await ctx.send(f'No lyrics found for `{query}`')
else:
lyrics = results[0].lyrics
result = results[0]
embed = discord.Embed(title=f'{result.name} - {result.artist}', color=discord.Color(0xCCFF00), description=lyrics[:2048])
embed.set_thumbnail(url=result.album_art)
embed.set_author(name="Lyrics:")
lyrics = lyrics[2048:]
embeds = [embed] # create embeds' list for long lyrics
while len(lyrics) > 0 and len(embeds) < 10: # limiting embeds to 10
embed = discord.Embed(color=discord.Color(0xCCFF00), description=lyrics[:2048])
lyrics = lyrics[len(embeds)*2048:]
embeds.append(embed)
embeds[-1].set_footer(text="Source: KSoft.Si") # set footer for last embed
for embed in embeds:
await ctx.send(embed=embed)
@commands.command(name='equalizer', aliases=['eq'])
async def equalizer(self, ctx, *args):
"""Equalizer"""
player = self.bot.lavalink.player_manager.get(ctx.guild.id)
if len(args) == 0:
await ctx.send('Specify `band gain` or `preset` to change frequencies :control_knobs:')
elif len(args) == 1:
presets ={
'reset': 'Default',
'bassboost': [0.08, 0.12, 0.2, 0.18, 0.15, 0.1, 0.05, 0.0, 0.02, -0.04, -0.06, -0.08, -0.10, -0.12, -0.14],
'jazz': [-0.13, -0.11, -0.1, -0.1, 0.14, 0.2, -0.18, 0.0, 0.24, 0.22, 0.2, 0.0, 0.0, 0.0, 0.0],
'pop': [-0.02, -0.01, 0.08, 0.1, 0.15, 0.1, 0.03, -0.02, -0.035, -0.05, -0.05, -0.05, -0.05, -0.05, -0.05],
'treble': [-0.1, -0.12, -0.12, -0.12, -0.08, -0.04, 0.0, 0.3, 0.34, 0.4, 0.35, 0.3, 0.3, 0.3, 0.3]
}
preset = args[0].lower()
if preset in ['reset', 'default']:
await player.reset_equalizer()
elif preset in presets:
gain_list = enumerate(presets[preset])
await player.set_gains(*gain_list)
elif preset == '--list':
em = discord.Embed(title=':control_knobs: EQ presets:', color=discord.Color(0xFF6EFF), description='\n'.join(presets.keys()))
return await ctx.send(embed=em)
else:
return await ctx.send('Invalid preset specified :control_knobs:\nType `~eq --list` for all presets')
elif len(args) == 2:
try:
band = int(args[0])
gain = float(args[1])
await player.set_gain(band, gain)
except ValueError:
return await ctx.send('Specify valid `band gain` values :control_knobs:')
else:
return await ctx.send('Specify `band gain` or `preset` :control_knobs:')
# Print final EQ settings
eq_frequencies = [f"`{gain}`" for gain in player.equalizer]
await ctx.send(":level_slider: Current Values:\n" + ' '.join(eq_frequencies))
def setup(bot):
bot.add_cog(Music(bot)) | 42.798956 | 141 | 0.596694 | [
"MIT"
] | 1Prototype1/HexBot | cogs/music.py | 16,422 | Python |
import math
class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
self.min = math.inf
def push(self, x: int) -> None:
self.x = x
self.stack.append(x)
if x < self.min:
self.min = x
def pop(self) -> None:
t = self.stack.pop()
if t == self.min and len(self.stack):
self.min = min(self.stack)
elif t == self.min and not len(self.stack):
self.min = math.inf
def top(self) -> int:
return self.stack[-1]
def getMin(self) -> int:
return self.min
# Your MinStack object will be instantiated and called as such:
# obj = MinStack()
# obj.push(x)
# obj.pop()
# param_3 = obj.top()
# param_4 = obj.getMin()
| 21.184211 | 63 | 0.532919 | [
"MIT"
] | bgoonz/INTERVIEW-PREP-COMPLETE | notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/Interview-Problems/LeetCode/MinStack.py | 805 | Python |
import argparse
import json
import os
from collections import Counter, defaultdict
from helper import _is_token_alnum
THRESHOLD = 0.01
GAP = 10
def get_full_mapping(src_filename, trg_filename, align_filename,
mapping_filename, reverse_src2trg=False, lowercase=True):
""" Get full mapping give align.
Args:
src_filename:
trg_filename:
align_filename:
mapping_filename:
reverse_src2trg:
lowercase:
Returns:
"""
print('src: {}, trg: {}, align: {}, mapping: {}, reverse: {}'.format(
src_filename, trg_filename, align_filename, mapping_filename,
reverse_src2trg))
src2trg_mapping = defaultdict(lambda: defaultdict(int))
processed_line = 0
with open(src_filename) as fs, open(trg_filename) as ft, open(
align_filename) as fa:
for ls, lt, la in zip(fs, ft, fa):
if lowercase:
ls = ls.lower()
lt = lt.lower()
processed_line += 1
ls_words = ls.split()
lt_words = lt.split()
la_aligns = la.split()
src_pos_counter = Counter()
trg_pos_counter = Counter()
valid_src_pos = set()
valid_trg_pos = set()
for align in la_aligns:
# only consider one-to-one mapping
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
# only consider alpha number token
if _is_token_alnum(ls_words[src_pos]):
src_pos_counter[src_pos] += 1
if _is_token_alnum(lt_words[trg_pos]):
trg_pos_counter[trg_pos] += 1
# ignore token that aligned twice
for pos, c in src_pos_counter.items():
if c == 1:
valid_src_pos.add(pos)
for pos, c in trg_pos_counter.items():
if c == 1:
valid_trg_pos.add(pos)
for align in la_aligns:
src_pos, trg_pos = align.split('-')
src_pos = int(src_pos)
trg_pos = int(trg_pos)
if _is_token_alnum(ls_words[src_pos]) and _is_token_alnum(
lt_words[trg_pos]) and (src_pos in valid_src_pos) and (
trg_pos in valid_trg_pos):
if reverse_src2trg:
src2trg_mapping[lt_words[trg_pos]][
ls_words[src_pos]] += 1
else:
src2trg_mapping[ls_words[src_pos]][
lt_words[trg_pos]] += 1
if processed_line % 1000000 == 0:
print('{} done.'.format(processed_line))
with open(mapping_filename, 'w') as fw:
print('dump to {} ...'.format(mapping_filename))
json.dump(src2trg_mapping, fw)
return src2trg_mapping
def refine_dict(full_mapping, clean_dict_filename, threshold, ignore_gap):
""" Clean dictionary based on frequency and gap of frequency.
For example,
{'s1': ['t1': 999, 't2': 199, 't3':1],
's2': ['m1': 2000, 'm2': 100]}
=>
{'s1': ['t1': 999, 't2': 199],
's2': ['m1': 2000]}
Args:
full_mapping:
clean_dict_filename:
threshold:
ignore_gap:
Returns:
"""
print('Refine dict to {}, threshold: {}, ignore_gap: {} ...'.format(
clean_dict_filename, threshold, ignore_gap))
full_mapping = sorted(
full_mapping.items(),
key=lambda x: sum(x[1].values()),
reverse=True)
with open(clean_dict_filename, 'w') as fw:
for idx, src2trg in enumerate(full_mapping):
src = src2trg[0]
trg = sorted(src2trg[1].items(), key=lambda x: x[1], reverse=True)
total_count = sum(c[1] for c in trg)
clean_trg = dict()
p = trg[0][1]
for w, c in trg:
if c / total_count < threshold:
# too rare
break
if (p / c > ignore_gap) and (c / total_count < THRESHOLD * 5):
# large gap
break
p = c
clean_trg.update({w: round(c / total_count, 3)})
fw.write('{}\n'.format(json.dumps({src: clean_trg}, ensure_ascii=False)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Process alignments and do filter')
parser.add_argument('--src_filename',
help='Origin src file name before bsp',
type=str,
required=True)
parser.add_argument('--trg_filename',
help='Origin trg file name before bsp',
type=str,
required=True)
parser.add_argument('--align_filename',
help='align file name by atools',
type=str,
required=True)
parser.add_argument('--dict_filename',
help='clean dict file name',
type=str,
required=True)
parser.add_argument('--threshold',
help='threshold of ignore frequency',
type=float,
default=THRESHOLD)
parser.add_argument('--ignore_gap',
help='gap of ignore frequency',
type=float,
default=GAP)
parser.add_argument(
'--overwrite', dest='overwrite',
action='store_true', help='Overwrite existing output files')
args = parser.parse_args()
if args.overwrite:
print('Overwrite existing file')
src2trg_mapping_filename = '{}.{}'.format(args.align_filename,
'src2trg_mapping')
trg2src_mapping_filename = '{}.{}'.format(args.align_filename,
'trg2src_mapping')
if os.path.isfile(src2trg_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(src2trg_mapping_filename))
with open(src2trg_mapping_filename) as f:
full_src2trg_mapping = json.load(f)
else:
print('creating mapping: {}'.format(src2trg_mapping_filename))
full_src2trg_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
src2trg_mapping_filename,
False)
if os.path.isfile(trg2src_mapping_filename) and (not args.overwrite):
print('loading mapping: {}'.format(trg2src_mapping_filename))
with open(trg2src_mapping_filename) as f:
full_trg2src_mapping = json.load(f)
else:
print('creating mapping: {}'.format(trg2src_mapping_filename))
full_trg2src_mapping = get_full_mapping(args.src_filename,
args.trg_filename,
args.align_filename,
trg2src_mapping_filename,
True)
src2trg_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'src2trg')
refine_dict(full_src2trg_mapping, src2trg_clean_dict_filename,
args.threshold, args.ignore_gap)
trg2src_clean_dict_filename = '{}.{}'.format(args.dict_filename,
'trg2src')
refine_dict(full_trg2src_mapping, trg2src_clean_dict_filename,
args.threshold, args.ignore_gap)
| 37.918269 | 85 | 0.52035 | [
"Apache-2.0"
] | JiangtaoFeng/ParaGen | examples/wmt/tools/align/extract_bilingual_vocabulary.py | 7,887 | Python |
#!/usr/bin/env python
# coding: utf-8
"""
Multi-Sensor Moving Platform Simulation Example
===============================================
This example looks at how multiple sensors can be mounted on a single moving platform and exploiting a defined moving
platform as a sensor target.
"""
# %%
# Building a Simulated Multi-Sensor Moving Platform
# -------------------------------------------------
# The focus of this example is to show how to setup and configure a simulation environment in order to provide a
# multi-sensor moving platform, as such the application of a tracker will not be covered in detail. For more information
# about trackers and how to configure them review of the tutorials and demonstrations is recommended.
#
# This example makes use of Stone Soup :class:`~.MovingPlatform`, :class:`~.MultiTransitionMovingPlatform` and
# :class:`~.Sensor` objects.
#
# In order to configure platforms, sensors and the simulation we will need to import some specific Stone Soup objects.
# As these have been introduced in previous tutorials they are imported upfront. New functionality within this example
# will be imported at the relevant point in order to draw attention to the new features.
# Some general imports and set up
from datetime import datetime
from datetime import timedelta
from matplotlib import pyplot as plt
import numpy as np
# Stone Soup imports:
from stonesoup.types.state import State, GaussianState
from stonesoup.types.array import StateVector
from stonesoup.types.array import CovarianceMatrix
from stonesoup.models.transition.linear import (
CombinedLinearGaussianTransitionModel, ConstantVelocity)
from stonesoup.predictor.particle import ParticlePredictor
from stonesoup.resampler.particle import SystematicResampler
from stonesoup.updater.particle import ParticleUpdater
from stonesoup.measures import Mahalanobis
from stonesoup.hypothesiser.distance import DistanceHypothesiser
from stonesoup.dataassociator.neighbour import GNNWith2DAssignment
from stonesoup.tracker.simple import SingleTargetTracker
# Define the simulation start time
start_time = datetime.now()
# %%
# Create a multi-sensor platform
# ------------------------------
# We have previously demonstrated how to create a :class:`~.FixedPlatform` which exploited a
# :class:`~.RadarRangeBearingElevation` *Sensor* in order to detect and track targets generated within a
# :class:`~.MultiTargetGroundTruthSimulator`.
#
# In this example we are going to create a moving platform which will be mounted with a pair of sensors and moves within
# a 6 dimensional state space according to the following :math:`\mathbf{x}`.
#
# .. math::
# \mathbf{x} = \begin{bmatrix}
# x\\ \dot{x}\\ y\\ \dot{y}\\ z\\ \dot{z} \end{bmatrix}
# = \begin{bmatrix}
# 0\\ 0\\ 0\\ 50\\ 8000\\ 0 \end{bmatrix}
#
# The platform will be initiated with a near constant velocity model which has been parameterised to have zero noise.
# Therefore the platform location at time :math:`k` is given by :math:`F_{k}x_{k-1}` where :math:`F_{k}` is given by:
#
# .. math::
# F_{k} = \begin{bmatrix}
# 1 & \triangle k & 0 & 0 & 0 & 0\\
# 0 & 1 & 0 & 0 & 0 & 0\\
# 0 & 0 & 1 & \triangle k & 0 & 0\\
# 0 & 0 & 0 & 1 & 0 & 0\\
# 0 & 0 & 0 & 0 & 1 & \triangle k \\
# 0 & 0 & 0 & 0 & 0 & 1\\
# \end{bmatrix}
# First import the Moving platform
from stonesoup.platform.base import MovingPlatform
# Define the initial platform position, in this case the origin
initial_loc = StateVector([[0], [0], [0], [50], [8000], [0]])
initial_state = State(initial_loc, start_time)
# Define transition model and position for 3D platform
transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# create our fixed platform
sensor_platform = MovingPlatform(states=initial_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
transition_model=transition_model)
# %%
# With our platform generated we now need to build a set of sensors which will be mounted onto the platform. In this
# case we will exploit a :class:`~.RadarElevationBearingRangeRate` and a :class:`~.PassiveElevationBearing` sensor
# (e.g. an optical sensor, which has no capability to directly measure range).
#
# First we will create a radar which is capable of measuring bearing (:math:`\phi`), elevation (:math:`\theta`), range
# (:math:`r`) and range-rate (:math:`\dot{r}`) of the target platform.
# Import a range rate bearing elevation capable radar
from stonesoup.sensor.radar.radar import RadarElevationBearingRangeRate
# Create a radar sensor
radar_noise_covar = CovarianceMatrix(np.diag(
np.array([np.deg2rad(3), # Elevation
np.deg2rad(3), # Bearing
100., # Range
25.]))) # Range Rate
# radar mountings
radar_mounting_offsets = StateVector([10, 0, 0]) # e.g. nose cone
radar_rotation_offsets = StateVector([0, 0, 0])
# Mount the radar onto the platform
radar = RadarElevationBearingRangeRate(ndim_state=6,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
noise_covar=radar_noise_covar,
mounting_offset=radar_mounting_offsets,
rotation_offset=radar_rotation_offsets,
)
sensor_platform.add_sensor(radar)
# %%
# Our second sensor is a passive sensor, capable of measuring the bearing (:math:`\phi`) and elevation (:math:`\theta`)
# of the target platform. For the purposes of this example we will assume that the passive sensor is an imager.
# The imager sensor model is described by the following equations:
#
# .. math::
# \mathbf{z}_k = h(\mathbf{x}_k, \dot{\mathbf{x}}_k)
#
# where:
#
# * :math:`\mathbf{z}_k` is a measurement vector of the form:
#
# .. math::
# \mathbf{z}_k = \begin{bmatrix} \theta \\ \phi \end{bmatrix}
#
# * :math:`h` is a non - linear model function of the form:
#
# .. math::
# h(\mathbf{x}_k,\dot{\mathbf{x}}_k) = \begin{bmatrix}
# \arcsin(\mathcal{z} /\sqrt{\mathcal{x} ^ 2 + \mathcal{y} ^ 2 +\mathcal{z} ^ 2}) \\
# \arctan(\mathcal{y},\mathcal{x}) \ \
# \end{bmatrix} + \dot{\mathbf{x}}_k
#
# * :math:`\mathbf{z}_k` is Gaussian distributed with covariance :math:`R`, i.e.:
#
# .. math::
# \mathbf{z}_k \sim \mathcal{N}(0, R)
#
# .. math::
# R = \begin{bmatrix}
# \sigma_{\theta}^2 & 0 \\
# 0 & \sigma_{\phi}^2 \\
# \end{bmatrix}
# Import a passive sensor capability
from stonesoup.sensor.passive import PassiveElevationBearing
imager_noise_covar = CovarianceMatrix(np.diag(np.array([np.deg2rad(0.05), # Elevation
np.deg2rad(0.05)]))) # Bearing
# imager mounting offset
imager_mounting_offsets = StateVector([0, 8, -1]) # e.g. wing mounted imaging pod
imager_rotation_offsets = StateVector([0, 0, 0])
# Mount the imager onto the platform
imager = PassiveElevationBearing(ndim_state=6,
mapping=(0, 2, 4),
noise_covar=imager_noise_covar,
mounting_offset=imager_mounting_offsets,
rotation_offset=imager_rotation_offsets,
)
sensor_platform.add_sensor(imager)
# %%
# Notice that we have added sensors to specific locations on the aircraft, defined by the mounting_offset parameter.
# The values in this array are defined in the platforms local coordinate frame of reference. So in this case an offset
# of :math:`[0, 8, -1]` means the sensor is located 8 meters to the right and 1 meter below the center point of the
# platform.
#
# Now that we have mounted the two sensors we can see that the platform object has both associated with it:
sensor_platform.sensors
# %%
# Create a Target Platform
# ------------------------
# There are two ways of generating a target in Stone Soup. Firstly, we can use the inbuilt ground-truth generator
# functionality within Stone Soup, which we demonstrated in the previous example, and creates a random target based on
# our selected parameters. The second method provides a means to generate a target which will perform specific
# behaviours, this is the approach we will take here.
#
# In order to create a target which moves in pre-defined sequences we exploit the fact that platforms can be used as
# sensor targets within a simulation, coupled with the :class:`~.MultiTransitionMovingPlatform` which enables a platform
# to be provided with a pre-defined list of transition models and transition times. The platform will continue to loop
# over the transition sequence provided until the simulation ends.
#
# When simulating sensor platforms it is important to note that within the simulation Stone Soup treats all platforms as
# potential targets. Therefore if we created multiple sensor platforms they would each *sense* all other platforms
# within the simulation (sensor-target geometry dependant).
#
# For this example we will create an air target which will fly a sequence of straight and level followed by a
# coordinated turn in the :math:`x-y` plane. This is configured such that the target will perform each manoeuvre for 8
# seconds, and it will turn through 45 degrees over the course of the turn manoeuvre.
# Import a Constant Turn model to enable target to perform basic manoeuvre
from stonesoup.models.transition.linear import ConstantTurn
straight_level = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(0.), ConstantVelocity(0.), ConstantVelocity(0.)])
# Configure the aircraft turn behaviour
turn_noise_diff_coeffs = np.array([0., 0.])
turn_rate = np.pi/32 # specified in radians per seconds...
turn_model = ConstantTurn(turn_noise_diff_coeffs=turn_noise_diff_coeffs, turn_rate=turn_rate)
# Configure turn model to maintain current altitude
turning = CombinedLinearGaussianTransitionModel(
[turn_model, ConstantVelocity(0.)])
manoeuvre_list = [straight_level, turning]
manoeuvre_times = [timedelta(seconds=8),
timedelta(seconds=8)]
# %%
# Now that we have created a list of manoeuvre behaviours and durations we can build our multi-transition moving
# platform. Because we intend for this platform to be a target we do not need to attach any sensors to it.
# Import a multi-transition moving platform
from stonesoup.platform.base import MultiTransitionMovingPlatform
initial_target_location = StateVector([[0], [-40], [1800], [0], [8000], [0]])
initial_target_state = State(initial_target_location, start_time)
target = MultiTransitionMovingPlatform(transition_models=manoeuvre_list,
transition_times=manoeuvre_times,
states=initial_target_state,
position_mapping=(0, 2, 4),
velocity_mapping=(1, 3, 5),
sensors=None)
# %%
# Creating the simulator
# ----------------------
# Now that we have build our sensor platform and a target platform we need to wrap them in a simulator. Because we do
# not want any additional ground truth objects, which is how most simulators work in Stone Soup, we need to use a
# :class:`~.DummyGroundTruthSimulator` which returns a set of empty ground truth paths with timestamps. These are then
# feed into a :class:`~.PlatformDetectionSimulator` with the two platforms we have already built.
# Import the required simulators
from stonesoup.simulator.simple import DummyGroundTruthSimulator
from stonesoup.simulator.platform import PlatformDetectionSimulator
# %%
# We now need to create an array of timestamps which starts at *datetime.now()* and enable the simulator to run for
# 25 seconds.
times = np.arange(0, 24, 1) # 25 seconds
timestamps = [start_time + timedelta(seconds=float(elapsed_time)) for elapsed_time in times]
truths = DummyGroundTruthSimulator(times=timestamps)
sim = PlatformDetectionSimulator(groundtruth=truths, platforms=[sensor_platform, target])
# %%
# Create a Tracker
# ------------------------------------
# Now that we have setup our sensor platform, target and simulation we need to create a tracker. For this example we
# will use a Particle Filter as this enables us to handle the non-linear nature of the imaging sensor. In this example
# we will use an inflated constant noise model to account for target motion uncertainty.
#
# Note that we don't add a measurement model to the updater, this is because each sensor adds their measurement model to
# each detection they generate. The tracker handles this internally by checking for a measurement model with each
# detection it receives and applying only the relevant measurement model.
target_transition_model = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(5), ConstantVelocity(5), ConstantVelocity(1)])
# First add a Particle Predictor
predictor = ParticlePredictor(target_transition_model)
# Now create a resampler and particle updater
resampler = SystematicResampler()
updater = ParticleUpdater(measurement_model=None,
resampler=resampler)
# Create a particle initiator
from stonesoup.initiator.simple import GaussianParticleInitiator, SinglePointInitiator
single_point_initiator = SinglePointInitiator(
GaussianState([[0], [-40], [2000], [0], [8000], [0]], np.diag([10000, 1000, 10000, 1000, 10000, 1000])),
None)
initiator = GaussianParticleInitiator(number_particles=500,
initiator=single_point_initiator)
hypothesiser = DistanceHypothesiser(predictor, updater, measure=Mahalanobis(), missed_distance=np.inf)
data_associator = GNNWith2DAssignment(hypothesiser)
from stonesoup.deleter.time import UpdateTimeStepsDeleter
deleter = UpdateTimeStepsDeleter(time_steps_since_update=10)
# Create a Kalman single-target tracker
tracker = SingleTargetTracker(
initiator=initiator,
deleter=deleter,
detector=sim,
data_associator=data_associator,
updater=updater
)
# %%
# The final step is to iterate our tracker over the simulation and plot out the results. Because we have a bearing
# only sensor it does not make sense to plot out the detections without animating the resulting plot. This
# animation shows the sensor platform (blue) moving towards the true target position (red). The estimated target
# position is shown in black, radar detections are shown in yellow while the bearing only imager detections are
# coloured green.
from matplotlib import animation
import matplotlib
matplotlib.rcParams['animation.html'] = 'jshtml'
from stonesoup.models.measurement.nonlinear import CartesianToElevationBearingRangeRate
from stonesoup.functions import sphere2cart
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
frames = []
for time, ctracks in tracker:
artists = []
ax.set_xlabel("$East$")
ax.set_ylabel("$North$")
ax.set_ylim(0, 2250)
ax.set_xlim(-1000, 1000)
X = [state.state_vector[0] for state in sensor_platform]
Y = [state.state_vector[2] for state in sensor_platform]
artists.extend(ax.plot(X, Y, color='b'))
for detection in sim.detections:
if isinstance(detection.measurement_model, CartesianToElevationBearingRangeRate):
x, y = detection.measurement_model.inverse_function(detection)[[0, 2]]
color = 'y'
else:
r = 10000000
# extract the platform rotation offsets
_, el_offset, az_offset = sensor_platform.orientation
# obtain measurement angles and map to cartesian
e, a = detection.state_vector
x, y, _ = sphere2cart(r, a + az_offset, e + el_offset)
color = 'g'
X = [sensor_platform.state_vector[0], x]
Y = [sensor_platform.state_vector[2], y]
artists.extend(ax.plot(X, Y, color=color))
X = [state.state_vector[0] for state in target]
Y = [state.state_vector[2] for state in target]
artists.extend(ax.plot(X, Y, color='r'))
for track in ctracks:
X = [state.state_vector[0] for state in track]
Y = [state.state_vector[2] for state in track]
artists.extend(ax.plot(X, Y, color='k'))
frames.append(artists)
animation.ArtistAnimation(fig, frames)
# %%
# To increase your confidence with simulated platform targets it would be good practice to modify the target to fly
# pre-defined shapes, a race track oval for example. You could also experiment with different sensor performance levels
# in order to see at what point the tracker is no longer able to generate a reasonable estimate of the target location.
# %%
# Key points
# ----------
# 1. Platforms, static or moving, can be used as targets for sensor platforms.
# 2. Simulations can be built with only known platform behaviours when you want to test specific scenarios.
# 3. A tracker can be configured to exploit all sensor data created in a simulation.
| 44.452685 | 120 | 0.693976 | [
"MIT"
] | GitRooky/Stone-Soup | docs/examples/Moving_Platform_Simulation.py | 17,381 | Python |
#!/usr/bin/env python
import unittest
from weblogo.seq_io._nexus import Nexus
from . import data_stream
class test_nexus(unittest.TestCase):
def test_create(self):
n = Nexus()
self.assertNotEqual(n, None)
def test_parse_f0(self):
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
# self.output_basics(n)
expected = [
"t1",
"t2 the name",
"isn'that [a] strange name?",
"one should be punished, for (that)!",
"t5",
"t6",
"t7",
"t8",
"t9",
]
taxa = n.taxlabels
self.assertEqual(taxa, expected)
f.close()
def test_parse_protein(self):
f = data_stream("nexus/protein.nex")
Nexus(f)
f.close()
def test_parse_dna(self):
f = data_stream("nexus/dna.nex")
n = Nexus(f)
taxa = n.taxlabels
taxa.sort()
self.assertEqual(len(taxa), 10)
self.assertEqual(taxa[0], "Carp")
self.assertEqual(taxa[-1], "Whale")
f.close()
def test_TreeTest1(self):
"""Test Tree module."""
f = data_stream("nexus/test_Nexus_input.nex")
n = Nexus(f)
t3 = n.trees[2]
n.trees[2]
t3.root_with_outgroup(["t1", "t5"])
# Return node_id of common ancestor if
# taxon_list is monophyletic, -1 otherwise.
self.assertEqual(t3.is_monophyletic(["t1", "t5"]), 13)
t3.split(parent_id=t3.search_taxon("t9"))
f.close()
if __name__ == "__main__":
unittest.main()
| 23.463768 | 62 | 0.536751 | [
"MIT"
] | WebLogo/weblogo | tests/test_nexus.py | 1,619 | Python |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import GPy
from emukit.quadrature.methods.vanilla_bq import VanillaBayesianQuadrature
from emukit.quadrature.loop.quadrature_loop import VanillaBayesianQuadratureLoop
from emukit.core.loop.user_function import UserFunctionWrapper
from emukit.model_wrappers.gpy_quadrature_wrappers import QuadratureRBF, RBFGPy, BaseGaussianProcessGPy
from numpy.testing import assert_array_equal
def func(x):
return np.ones((x.shape[0], 1))
def test_vanilla_bq_loop():
init_size = 5
x_init = np.random.rand(init_size, 2)
y_init = np.random.rand(init_size, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
num_iter = 5
emukit_loop.run_loop(user_function=UserFunctionWrapper(func), stopping_condition=num_iter)
assert emukit_loop.loop_state.X.shape[0] == num_iter + init_size
assert emukit_loop.loop_state.Y.shape[0] == num_iter + init_size
def test_vanilla_bq_loop_initial_state():
x_init = np.random.rand(5, 2)
y_init = np.random.rand(5, 1)
bounds = [(-1, 1), (0, 1)]
gpy_model = GPy.models.GPRegression(X=x_init, Y=y_init, kernel=GPy.kern.RBF(input_dim=x_init.shape[1],
lengthscale=1., variance=1.))
emukit_qrbf = QuadratureRBF(RBFGPy(gpy_model.kern), integral_bounds=bounds)
emukit_model = BaseGaussianProcessGPy(kern=emukit_qrbf, gpy_model=gpy_model)
emukit_method = VanillaBayesianQuadrature(base_gp=emukit_model)
emukit_loop = VanillaBayesianQuadratureLoop(model=emukit_method)
assert_array_equal(emukit_loop.loop_state.X, x_init)
assert_array_equal(emukit_loop.loop_state.Y, y_init)
assert emukit_loop.loop_state.iteration == 0
| 41.428571 | 109 | 0.721121 | [
"Apache-2.0"
] | DavidJanz/emukit | integration_tests/emukit/quadrature/test_vanilla_bq_loop.py | 2,320 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceAntestCaselistQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceAntestCaselistQueryResponse, self).__init__()
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceAntestCaselistQueryResponse, self).parse_response_content(response_content)
if 'data' in response:
self.data = response['data']
| 26.692308 | 114 | 0.706052 | [
"Apache-2.0"
] | Anning01/alipay-sdk-python-all | alipay/aop/api/response/AlipayCommerceAntestCaselistQueryResponse.py | 694 | Python |
from markovp import Markov
from src.forms import Markov_Form
from flask import Flask, render_template, request, redirect, url_for, Blueprint, make_response
home = Blueprint("home", __name__)
@home.route("/")
def index():
#{
form = Markov_Form()
return render_template('form.html', form = form)
#}
# The submission page.
@home.route("/submit", methods=["GET"])
def submit():
#{
if request.method == "GET":
#{
if request.args.get('submit_button'):
#{
# Get form values.
# http://stackoverflow.com/a/20341272/5415895
text = request.args.get("input_text")
# We have to cast text as a string, otherwise C++ complains.
mark = Markov(str(text), 1)
output = mark.generate()
return render_template("output.html", input = str(text), output = output)
#}
else:
#{
# Make sure nobody can access the submit path without submitting.
return redirect(url_for('index'))
#}
#}
else:
#{
return redirect(url_for('index'))
#}
#} | 27.073171 | 94 | 0.583784 | [
"MIT"
] | TexAgg/MarkovTextGenerator | web/src/views/home.py | 1,110 | Python |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
print("x: ", x)
print("bathc_shape:", self.batch_shape)
print("self.shape:", self.shape)
print("output: ", output)
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
| 40.215686 | 80 | 0.673685 | [
"Apache-2.0"
] | ADiegoCAlonso/tensorflow | tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py | 22,561 | Python |
import threading
import sys
class ThreadHandler(object):
def __init__(self, name, callable, *args, **kwargs):
# Set up exception handling
self.exception = None
def wrapper(*args, **kwargs):
try:
callable(*args, **kwargs)
except BaseException:
self.exception = sys.exc_info()
# Kick off thread
thread = threading.Thread(None, wrapper, name, args, kwargs)
thread.setDaemon(True)
thread.start()
# Make thread available to instantiator
self.thread = thread
| 28 | 68 | 0.588435 | [
"BSD-2-Clause"
] | akheron/fabric | fabric/thread_handling.py | 588 | Python |
# -*- coding: utf-8 -*-
# ===============================================================
# Author: Rodolfo Ferro
# Email: [email protected]
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro, for
# his workshop in HackSureste 2019 at Universidad Modelo
# in Mérida. Any explicit usage of this script or its
# contents is granted according to the license provided and
# its conditions.
# ===============================================================
from flask import Flask, jsonify, request, render_template
from iris import iris_classifier
from pprint import pprint
import numpy as np
import requests
import json
# Main app:
app = Flask(__name__)
# Global:
version = 'v0.0'
classifier = iris_classifier()
species = {
'0': 'I. setosa',
'1': 'I. versicolor',
'2': 'I. virginica'
}
# Static website:
@app.route('/')
def index():
return render_template("index.html")
# API MAIN STRUCTURE:
@app.route('/api/' + version, methods=['GET'])
def test():
"""
GET method to test the API.
"""
# Output message:
message = {
"response": [
{
"text": "Hello world!"
}
]
}
return jsonify(message)
@app.route('/api/' + version + '/predict', methods=['POST'])
def predict():
"""
POST method to predict with our classification model.
"""
# Get data from JSON object in POST method:
req_data = request.get_json()
# Parse data from JSON:
sl = req_data['sepal_length']
sw = req_data['sepal_width']
pl = req_data['petal_length']
pw = req_data['petal_width']
# Predict with model:
input_data = np.array([[sl, sw, pl, pw]])
prediction = classifier.predict(input_data)
print(prediction)
# Output message:
message = {"response": [
{"input": {
'sepal_length': sl,
'sepal_width': sw,
'petal_length': pl,
'petal_width': pw
}},
{"prediction": int(prediction[0])},
{"species": species[str(prediction[0])]}]}
return jsonify(message)
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
response = jsonify(message)
response.status_code = 404
return response
if __name__ == '__main__':
app.run(debug=True, port=5000)
| 22.518519 | 65 | 0.581003 | [
"MIT"
] | RodolfoFerro/iris-api | app.py | 2,433 | Python |
from pydantic import BaseModel
from .utils import BaseEvent
class MainPublisherEvent(BaseEvent):
pass
class CheckStatus(MainPublisherEvent):
channel: str
class WaitLiveVideo(MainPublisherEvent):
pass
class WaitStream(MainPublisherEvent):
time: int
class DownloaderEvent(BaseEvent):
pass
class StartDownloading(DownloaderEvent):
id: str
class PlaylistUpdate(DownloaderEvent):
total_size: int
to_load: int
class DownloadedChunk(DownloaderEvent):
pass
class StopDownloading(DownloaderEvent):
pass
class DownloadingProgress(BaseModel): # type: ignore
total_segments: int = 0
total_downloaded_segments: int = 0
last_chunk_size: int = 0
downloaded_segments: int = 0
def chunk_loaded(self) -> None:
self.downloaded_segments += 1
self.total_downloaded_segments += 1
class ExceptionEvent(BaseEvent):
message: str
| 16.232143 | 53 | 0.733773 | [
"MIT"
] | tausackhn/twlived | twlived/events.py | 909 | Python |
from argparse import Action, Namespace
from typing import (List)
from .switch_config import SwitchConfigCLI
from ..switch import SwitchChip
class EraseConfigCLI(SwitchConfigCLI):
"""
The "erase" action that removes all stored items from the EEPROM memory.
"""
def __init__(self, subparsers: Action, switch: SwitchChip) -> None:
super().__init__(subparsers, switch)
self._subparser = self._subparsers.add_parser(
"erase",
help="Erase all configuration",
)
self._subparser.set_defaults(execute=self.apply)
def apply(self, args: Namespace) -> SwitchConfigCLI:
return self
def create_configuration(self) -> List[List[int]]:
return [[101, 0, 0, 0]]
| 28.692308 | 76 | 0.670241 | [
"MIT"
] | ararobotique/botblox-manager-software | botblox_config/data_manager/erase.py | 746 | Python |
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from flatten_dict import flatten
from fedlearner_webconsole.proto.workflow_definition_pb2 import Slot
from fedlearner_webconsole.workflow_template.template_validaor \
import YamlTemplate
class _YamlTemplate(YamlTemplate):
# Which placeholders in the template should be interpreted
idpattern = r'Slot_[a-z0-9_]*'
def substitute(self, mapping):
return super()._substitute(mapping,
fixed_placeholder=None,
ignore_invalid=True)
def format_yaml(yaml, **kwargs):
"""Formats a yaml template.
Example usage:
format_yaml('{"abc": ${x.y}}', x={'y': 123})
output should be '{"abc": 123}'
"""
template = _YamlTemplate(yaml)
try:
return template.substitute(flatten(kwargs or {},
reducer='dot'))
except KeyError as e:
raise RuntimeError(
'Unknown placeholder: {}'.format(e.args[0])) from e
def generate_yaml_template(base_yaml, slots_proto):
"""
Args:
base_yaml: A string representation of one type job's base yaml.
slots_proto: A proto map object representation of modification
template's operable smallest units.
Returns:
string: A yaml_template
"""
slots = {}
for key in slots_proto:
if slots_proto[key].reference_type == Slot.ReferenceType.DEFAULT:
slots[key] = slots_proto[key].default
else:
slots[key] = f'${{{slots_proto[key].reference}}}'
return format_yaml(base_yaml, **slots)
| 34.484375 | 74 | 0.663344 | [
"Apache-2.0"
] | duanbing/fedlearner | web_console_v2/api/fedlearner_webconsole/workflow_template/slots_formatter.py | 2,207 | Python |
from dataclasses import dataclass, field
from enum import Enum
from typing import (
Callable,
Dict,
List,
Optional,
Union
)
import weakref
import threading
import torch
import torch.distributed as dist
from torch.distributed import rpc
from torch.distributed import distributed_c10d
from torch.distributed._sharding_spec import (
ChunkShardingSpec,
EnumerableShardingSpec,
ShardMetadata,
ShardingSpec,
)
from torch.distributed._sharding_spec._internals import (
check_tensor,
get_split_size,
get_chunked_dim_size,
validate_non_overlapping_shards_metadata,
)
from torch.types import Number
from .metadata import TensorProperties, ShardedTensorMetadata
from .shard import Shard
from .utils import (
get_current_process_group,
_flatten_tensor_size,
_parse_and_validate_remote_device,
_validate_output_tensor_for_gather,
build_metadata_from_local_shards,
build_global_metadata
)
# Tracking for sharded tensor objects.
_sharded_tensor_lock = threading.Lock()
_sharded_tensor_current_id = 0
_sharded_tensor_map: Dict[int, 'weakref.ReferenceType[ShardedTensor]'] = {}
# Custom sharded ops
_SHARDED_OPS: Dict[str, Callable] = {}
def _register_sharded_op(op, func):
from inspect import signature
if len(signature(func).parameters) != 4:
raise TypeError(
f'Custom sharded op function expects signature: '
f'(types, args, kwargs, process_group), but received '
f'signature: {signature(func)}')
global _SHARDED_OPS
_SHARDED_OPS[op] = func
def _register_remote_shards(sharded_tensor_id: int, rrefs: List[rpc.RRef[Shard]], rpc_rank: int):
with _sharded_tensor_lock:
if sharded_tensor_id not in _sharded_tensor_map:
raise RuntimeError(
f'Could not find sharded_tensor_id: {sharded_tensor_id} in map: {_sharded_tensor_map.keys()}')
sharded_tensor = _sharded_tensor_map[sharded_tensor_id]()
if sharded_tensor is None:
raise RuntimeError('ShardedTensor weakref has been deallocated')
else:
sharded_tensor._register_remote_shards(rrefs, rpc_rank)
class CreateOp(Enum):
EMPTY = 0
FULL = 1
ONES = 2
RAND = 3
ZEROS = 4
@dataclass
class TensorInitParams(object):
""" Container for list of common params to create new local tensor. """
create_op: CreateOp
# needed when create_op is FULL
# default set to False (not None) since None is incompatible with Number.
fill_value: Number = field(default=False)
tensor_properties: TensorProperties = field(
default=TensorProperties(dtype=torch.get_default_dtype(),
layout=torch.strided,
requires_grad=False,
memory_format=torch.contiguous_format,
pin_memory=False))
class ShardedTensor(object):
"""
ShardedTensor is an abstraction to represent Tensors that are sharded
across multiple devices and multiple processes.
ShardedTensor is initialized in an SPMD like fashion where each rank
initializes the ShardedTensor. The ShardedTensor object on each rank
then only stores the local shard for the Tensor and provides global
metadata for all the shards.
ShardedTensor doesn't provide any Tensor like operations but is a wrapper
providing the Tensor representing the local shard and the global metadata.
Using these, users can build their custom distributed sharded computations
on top of this primitive. The local shards are all initialized using the
create_op specified by tensor_init_params.create_op, e.g., torch.ones, or
torch.empty
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
tensor_init_params (:class: `TensorInitParams`): common params to create tensor.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
.. note:: ShardedTensor uses collectives to do various operations, i.e. it
uses all_gather to do cross rank validations. For NCCL-based processed
groups, internal tensor representations of objects must be moved to the
GPU device before communication takes place. In this case, the device
used is given by ``torch.cuda.current_device()`` and it is the user's
responsiblity to ensure that this is set so that each rank has an
individual GPU, via ``torch.cuda.set_device()``
"""
def __new__(cls, *args, **kwargs):
# Use __new__ for logging purposes.
torch._C._log_api_usage_once("torch.distributed.sharded_tensor")
return super(ShardedTensor, cls).__new__(cls)
def __init__(
self,
sharding_spec: ShardingSpec,
*size,
tensor_init_params: TensorInitParams,
process_group=None,
init_rrefs=False,
):
# prepare initialization, initialize fields like
# _process_group, _local_shards, etc.
self._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
if tensor_init_params.tensor_properties is None:
raise ValueError('tensor_properties must not be None.')
if tensor_init_params.tensor_properties.dtype is None:
tensor_init_params.tensor_properties.dtype = torch.get_default_dtype()
if tensor_init_params.tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
if tensor_init_params.tensor_properties.memory_format != torch.contiguous_format:
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
dims = _flatten_tensor_size(size)
self._sharding_spec = sharding_spec
if isinstance(self._sharding_spec, ChunkShardingSpec):
self._init_chunked(dims, tensor_init_params)
elif isinstance(self._sharding_spec, EnumerableShardingSpec):
self._init_enumerable(dims, tensor_init_params)
else:
raise ValueError(f'Unsupported sharding_spec: {self._sharding_spec}')
# do post initialization (i.e. register sharded_tensor_id, initialize_rpc)
self._post_init()
def _prepare_init(self, process_group=None, init_rrefs=False):
self._init_rrefs = init_rrefs
self._sharded_tensor_id = None
self._process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
self._local_shards: List[Shard] = []
self._remote_shards: Dict[int, List[rpc.RRef[Shard]]] = {}
def _post_init(self):
# Initialize RPC if available.
if self._init_rrefs:
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
self._sharded_tensor_id = _sharded_tensor_current_id
_sharded_tensor_map[self._sharded_tensor_id] = weakref.ref(self)
_sharded_tensor_current_id += 1
if not rpc._is_current_rpc_agent_set():
raise RuntimeError(
'RPC Framework needs to be initialized using'
' torch.distributed.rpc.init_rpc if init_rrefs is set to True')
self._init_rpc()
def __del__(self):
# Clean up the global map.
with _sharded_tensor_lock:
global _sharded_tensor_current_id, _sharded_tensor_map
if self._sharded_tensor_id in _sharded_tensor_map:
_sharded_tensor_map.pop(self._sharded_tensor_id) # type: ignore[call-overload]
def _init_rpc(self):
# Validate PG and RPC ranks match.
pg_rank = dist.get_rank()
rpc_rank = rpc.get_worker_info().id
if pg_rank != rpc_rank:
raise ValueError(
f'Default ProcessGroup and RPC ranks must be '
f'the same for ShardedTensor, found process group rank: '
f'{pg_rank} and RPC rank: {rpc_rank}'
)
self._remote_shards = {}
# Gather all the sharded tensor ids.
worker_infos = rpc._get_current_rpc_agent().get_worker_infos()
rank_to_name = {}
name_to_rank = {}
for worker_info in worker_infos:
rank_to_name[worker_info.id] = worker_info.name
name_to_rank[worker_info.name] = worker_info.id
all_tensor_ids = rpc.api._all_gather(self._sharded_tensor_id)
# Share the local shards to the entire world.
futs = []
rpc_rank = rpc.get_worker_info().id
for rank in range(dist.get_world_size()):
# Skip self.
if rank == dist.get_rank():
continue
if len(self.local_shards()) != 0:
rrefs: List[rpc.RRef[Shard]] = [rpc.RRef(shard) for shard in self.local_shards()]
fut = rpc.rpc_async(
rank,
_register_remote_shards,
args=(all_tensor_ids[rank_to_name[rank]], rrefs, rpc_rank))
futs.append(fut)
torch.futures.wait_all(futs)
# Barrier for all RPCs to finish on all ranks.
rpc.api._all_gather(None)
def gather(
self,
dst: int = 0,
out: Optional[torch.Tensor] = None,
) -> None:
"""
Creates a full :class:`Tensor` on rank ``dst`` by gathering all shards of the
sharded tensor.
The API needs to be called on all ranks in SPMD fashion. All ranks should have
the same ``dst``. ``out`` should be a tensor of the same size as the overall
size of the sharded tensor on ``dst`` and ``None`` on all other ranks.
Args:
dst(int): The rank where full tensor is constructed.
Default: 0
out (:class `torch.Tensor`, optional): The output full tensor.
Must to be provided ONLY on ``dst`` rank.
Default: ``None``
"""
rank = dist.get_rank(self._process_group)
full_size = self.metadata().size
_validate_output_tensor_for_gather(rank, dst, full_size, out)
local_shards = self.local_shards()
world_size = dist.get_world_size(self._process_group)
gathered_shards = [None] * world_size
# will revise this part with CPU support and use dist.gather()
# once NCCL support for gather() is ready
# https://github.com/pytorch/pytorch/issues/66187
dist.all_gather_object(
obj=local_shards,
object_list=gathered_shards,
group=self._process_group,
)
if rank == dst:
dims = len(full_size)
for shards in gathered_shards:
if shards is None:
raise RuntimeError(
'Gathered shards cannot be None on dst rank {dst}'
)
for shard in shards:
metadata = shard.metadata
tensor = shard.tensor
out_narrow_view = out
for dim in range(dims):
out_narrow_view = out_narrow_view.narrow(
dim,
metadata.shard_offsets[dim],
metadata.shard_sizes[dim],
)
out_narrow_view.copy_(tensor)
@classmethod
def _init_from_local_shards(
cls,
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False,
):
# STEP 1: Validate the Shardmetadatas locally
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
world_size = dist.get_world_size(process_group)
local_sharded_tensor_metadata: Optional[ShardedTensorMetadata] = None
global_tensor_size = _flatten_tensor_size(global_size)
if len(local_shards) > 0:
local_sharded_tensor_metadata = \
build_metadata_from_local_shards(local_shards, global_tensor_size, current_rank, process_group)
# STEP 2. Validate metadata across ranks, and build a global sharded tensor
# metadata by gathering local ShardedTensorMetadata
gathered_metadatas: List[Optional[ShardedTensorMetadata]] = []
if world_size > 1:
gathered_metadatas = [None for _ in range(world_size)]
dist.all_gather_object(
gathered_metadatas,
local_sharded_tensor_metadata,
group=process_group
)
else:
gathered_metadatas = [local_sharded_tensor_metadata]
global_sharded_tensor_metadata = build_global_metadata(gathered_metadatas)
# STEP 3: Validation done, create the actual ShardedTensor and populate fields
# prepare initialization
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
# add to metadata and local_shards
sharded_tensor._metadata = global_sharded_tensor_metadata
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(global_sharded_tensor_metadata.shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
@classmethod
def _init_from_local_shards_and_global_metadata(
cls,
local_shards: List[Shard],
sharded_tensor_metadata: ShardedTensorMetadata,
process_group=None,
init_rrefs=False,
) -> "ShardedTensor":
"""
Initialize a ShardedTensor with local shards and a global
ShardedTensorMetadata built on each rank.
Warning: This API is experimental and subject to change. It does
not do cross rank validations, and fully rely on the user
for the correctness of sharded_tensor_metadata on each rank
"""
process_group = (
process_group
if process_group is not None
else distributed_c10d._get_default_group()
)
current_rank = dist.get_rank(process_group)
shards_metadata = sharded_tensor_metadata.shards_metadata
tensor_properties = sharded_tensor_metadata.tensor_properties
if len(shards_metadata) == 0:
raise ValueError("shards_metadata must not be empty!")
if tensor_properties.layout != torch.strided:
raise ValueError('Only torch.strided layout is currently supported')
sharded_tensor = cls.__new__(cls)
sharded_tensor._prepare_init(process_group=process_group, init_rrefs=init_rrefs)
sharded_tensor._metadata = sharded_tensor_metadata
local_shard_metadatas = []
def _raise_if_mismatch(expected, actual, prop_name, rank, is_property=False):
tensor_property_or_metadata = "tensor property" if is_property else "local ShardMetadata"
if expected != actual:
raise ValueError(f"Local shards' tensor {prop_name} property is incompatible with "
f"{tensor_property_or_metadata} on rank {rank}: "
f"{tensor_property_or_metadata} {prop_name}={expected}, "
f"local shard tensor {prop_name}={actual}.")
# collect local shard metadatas from the global sharded_tensor_metadata
for shard_metadata in shards_metadata: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_metadata.placement)
if current_rank == rank:
local_shard_metadatas.append(shard_metadata)
if len(local_shards) != len(local_shard_metadatas):
raise RuntimeError(
f'Number of local shards ({len(local_shards)}) does not match number of local '
f'shards metadata in sharded_tensor_metadata ({len(local_shard_metadatas)}) '
f'on rank ({current_rank}) '
)
for shard in local_shards:
shard_meta = shard.metadata
local_shard_tensor = shard.tensor
rank, local_device = _parse_and_validate_remote_device(sharded_tensor._process_group, shard_meta.placement)
# validate if shard_meta in the metadatas collected from sharded_tensor_metadata
assert shard_meta in local_shard_metadatas, \
"local shard metadata not in sharded_tensor_metadata!"
_raise_if_mismatch(tensor_properties.layout, local_shard_tensor.layout, "layout", current_rank, True)
if not local_shard_tensor.is_contiguous():
raise ValueError('Only torch.contiguous_format memory_format is currently supported')
_raise_if_mismatch(shard_meta.shard_sizes, list(local_shard_tensor.size()), "size", current_rank)
_raise_if_mismatch(tensor_properties.pin_memory, local_shard_tensor.is_pinned(), "pin_memory", current_rank, True)
_raise_if_mismatch(local_device, local_shard_tensor.device, "device", current_rank)
_raise_if_mismatch(tensor_properties.dtype, local_shard_tensor.dtype, "dtype", current_rank, True)
_raise_if_mismatch(
tensor_properties.requires_grad, local_shard_tensor.requires_grad, "requires_grad", current_rank, True)
# check if shards_metadata have overlap shards
validate_non_overlapping_shards_metadata(shards_metadata)
# check if the shards_metadata is compatible with overall size of the sharded tensor.
check_tensor(shards_metadata, list(sharded_tensor_metadata.size))
# done validation, add local_shards
sharded_tensor._local_shards = local_shards
# make a EnumerableShardingSpec for sharded tensors that initialized from this API.
# TODO: make sharding spec a ChunkShardingSpec by inferring from the metadata list.
# see issue https://github.com/pytorch/pytorch/issues/67244
sharded_tensor._sharding_spec = EnumerableShardingSpec(shards_metadata)
# run post initialization, i.e. map registration, rpc initialization
sharded_tensor._post_init()
return sharded_tensor
def _init_chunked(self, dims, tensor_init_params: TensorInitParams, ):
current_rank = dist.get_rank(self._process_group)
sharding_dim = self._sharding_spec.dim # type: ignore[attr-defined]
# Validate the sharding spec.
if not isinstance(sharding_dim, int):
raise ValueError(
f"Sharding dim needs to be an integer, found: {sharding_dim}"
)
if sharding_dim >= len(dims) or sharding_dim < -len(dims):
raise ValueError(f"Invalid sharding dim: {sharding_dim}")
dim_size = dims[sharding_dim]
remote_devices = self._sharding_spec.placements # type: ignore[attr-defined]
chunks = len(remote_devices)
# split_size computed similar to 'torch.chunk'
split_size = get_split_size(dim_size, chunks)
shards_metadata = []
for idx, remote_device in enumerate(remote_devices):
rank, local_device = _parse_and_validate_remote_device(self._process_group, remote_device)
# Adjust the sharding dim for this rank.
sharded_dim_size = get_chunked_dim_size(dim_size, split_size, idx)
if sharded_dim_size > 0:
# Build sharding_metadata.
# deepcopy for modification.
rank_dims = dims.copy()
rank_offsets = [0] * len(dims)
rank_offsets[sharding_dim] = split_size * idx
rank_dims[sharding_dim] = sharded_dim_size
shard_metadata = ShardMetadata(rank_offsets, rank_dims, remote_device)
shards_metadata.append(shard_metadata)
# Build the local shard for the current rank if it is involved in the sharding spec.
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*rank_dims, local_device=local_device, tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def _init_enumerable(self, dims, tensor_init_params: TensorInitParams):
# Validate the sharding spec is compatible with the tensor.
check_tensor(self._sharding_spec.shards, dims) # type: ignore[attr-defined]
current_rank = dist.get_rank(self._process_group)
shards_metadata = []
for shard_metadata in self._sharding_spec.shards: # type: ignore[attr-defined]
rank, local_device = _parse_and_validate_remote_device(self._process_group, shard_metadata.placement)
shards_metadata.append(shard_metadata)
if current_rank == rank:
# Initialize the local shard.
local_shard = _create_tensor_from_params(
*shard_metadata.shard_sizes, local_device=local_device,
tensor_init_params=tensor_init_params)
self._local_shards.append(Shard(local_shard, shard_metadata))
# Build overall metadata
self._metadata = ShardedTensorMetadata(
shards_metadata, dims, tensor_init_params.tensor_properties, )
def sharding_spec(self) -> ShardingSpec:
"""
Returns the ShardingSpec for the tensor.
"""
return self._sharding_spec
def __torch_function__(self, func, types, args=(), kwargs=None):
if func in _SHARDED_OPS:
return _SHARDED_OPS[func](types, args, kwargs, self._process_group)
raise RuntimeError(
f"torch function '{func.__name__}', with args: {args} and "
f"kwargs: {kwargs} not supported for ShardedTensor!")
def metadata(self) -> ShardedTensorMetadata:
"""
Returns a :class:`ShardedTensorMetadata` object corresponding to the
metadata for the entire tensor.
"""
return self._metadata
def local_shards(self) -> List[Shard]:
"""
Returns a list of :class:`Shard' corresponding to the
local shards for this rank. Returns an empty list if the current rank
does not host any shards for this Tensor.
"""
return self._local_shards
def size(self, dim: int = None) -> Union[torch.Size, int]:
"""
Returns a :Union:`[torch.Size, int]` which represents the size of the tensor.
The dimension can be specified.
Args:
dim (int, optional): the dimension over which the size represents.
If specified, it returns the size of the given dimension.
If not, it returns a subclass of tuple.
Default: ``None``
Returns:
A :Union:`[torch.Size, int]` represents the size of the tensor.
"""
size = self._metadata.size
if dim is None:
return size
if dim < 0 or dim >= len(size):
raise ValueError(
f"Argument ``dim`` must be within the range of tensor dimensions [0, {len(size)})"
)
return size[dim]
def is_pinned(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) resides in pinned memory.
"""
return self._metadata.tensor_properties.pin_memory
def is_contiguous(self) -> bool:
"""
Returns True if the sharded tensor (each local shard) is contiguous in memory
in the order specified by memory format.
"""
return self._metadata.tensor_properties.memory_format == torch.contiguous_format
@property
def shape(self):
return self._metadata.size
@property
def requires_grad(self):
return self._metadata.tensor_properties.requires_grad
@property
def dtype(self):
return self._metadata.tensor_properties.dtype
@property
def layout(self):
return self._metadata.tensor_properties.layout
def _register_remote_shards(self, remote_shards: List[rpc.RRef[Shard]], rpc_rank: int):
self._remote_shards[rpc_rank] = remote_shards
def remote_shards(self) -> Dict[int, List[rpc.RRef[Shard]]]:
"""
Returns a Dict[int, RRef] with keys being the RPC rank and values
being RRefs to shards on that rank. Need to initialize the
RPC framework for this functionality.
Raises an exception if ShardedTensor was created with ``init_rrefs=False``
"""
if not self._init_rrefs:
raise RuntimeError(
'ShardedTensor created with init_rrefs=False, no RRefs to remote shards available'
)
return self._remote_shards
def __hash__(self):
return id(self)
def __repr__(self):
return f'ShardedTensor({self._metadata})'
@dataclass
class ProcessGroupState:
"""
State for ser-de of process group
"""
local_rank: int
global_rank: int
local_world_size: int
global_world_size: int
def __getstate__(self):
pg_state = ShardedTensor.ProcessGroupState(
distributed_c10d.get_rank(self._process_group),
distributed_c10d.get_rank(),
distributed_c10d.get_world_size(self._process_group),
distributed_c10d.get_world_size(),
)
return self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs
def __setstate__(self, state):
self._sharded_tensor_id = None
if not distributed_c10d.is_initialized():
raise RuntimeError(
'Need to initialize default process group using '
'"init_process_group" before loading ShardedTensor')
self._local_shards, self._metadata, pg_state, self._sharding_spec, self._init_rrefs = state
# Setup process group
self._process_group = get_current_process_group()
# Validate process group.
local_rank = distributed_c10d.get_rank(self._process_group)
if pg_state.local_rank != local_rank:
raise RuntimeError(
f'Local rank at save time was {pg_state.local_rank}, but at '
f'load time was {local_rank}')
global_rank = distributed_c10d.get_rank()
if pg_state.global_rank != global_rank:
raise RuntimeError(
f'Global rank at save time was {pg_state.global_rank}, but at '
f'load time was {global_rank}')
local_world_size = distributed_c10d.get_world_size(self._process_group)
if pg_state.local_world_size != local_world_size:
raise RuntimeError(
f'Local world size at save time was {pg_state.local_world_size}, '
f'but at load time was {local_world_size}')
global_world_size = distributed_c10d.get_world_size()
if pg_state.global_world_size != global_world_size:
raise RuntimeError(
f'Global world size at save time was {pg_state.global_world_size}, '
f'but at load time was {global_world_size}')
self._post_init()
def _create_tensor_from_params(*size, local_device, tensor_init_params: TensorInitParams):
""" Helper to construct tensor from size, device and common params. """
create_op = tensor_init_params.create_op
dtype = tensor_init_params.tensor_properties.dtype
layout = tensor_init_params.tensor_properties.layout
requires_grad = tensor_init_params.tensor_properties.requires_grad
memory_format = tensor_init_params.tensor_properties.memory_format
pin_memory = tensor_init_params.tensor_properties.pin_memory
if create_op == CreateOp.ONES:
return torch.ones(*size, dtype=dtype, layout=layout,
device=local_device, pin_memory=pin_memory,
requires_grad=requires_grad,)
elif create_op == CreateOp.EMPTY:
return torch.empty(*size, dtype=dtype, layout=layout,
device=local_device, requires_grad=requires_grad,
# NB: memory_format param is not accepted by torch.ones
memory_format=memory_format, pin_memory=pin_memory,)
elif tensor_init_params.create_op == CreateOp.ZEROS:
return torch.zeros(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.RAND:
return torch.rand(*size,
dtype=dtype,
layout=layout,
device=local_device,
pin_memory=pin_memory,
requires_grad=requires_grad,)
elif tensor_init_params.create_op == CreateOp.FULL:
return torch.full(size=size,
fill_value=tensor_init_params.fill_value,
layout=layout,
dtype=dtype,
requires_grad=requires_grad,
device=local_device, )
else:
raise ValueError(f'Unsupported create_op: {tensor_init_params.create_op}')
| 40.637931 | 126 | 0.649163 | [
"Apache-2.0"
] | dannis999/tensorflow | torch/distributed/_sharded_tensor/api.py | 30,641 | Python |
# -*- coding: utf-8 -*-
from CuAsm.CuInsAssemblerRepos import CuInsAssemblerRepos
from CuAsm.CuInsFeeder import CuInsFeeder
def constructReposFromFile(sassname, savname=None, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(arch=arch)#
# Update the repos with instructions from feeder
repos.update(feeder)
# reset the feeder back to start
# feeder.restart()
# verify the repos
# actually the codes is already verifed during repos construction
# repos.verify(feeder)
if savname is not None:
repos.save2file(savname)
return repos
def verifyReposFromFile(sassname, reposfile, arch='sm_75'):
# initialize a feeder with sass
feeder = CuInsFeeder(sassname, arch=arch)
# initialize an empty repos
repos = CuInsAssemblerRepos(reposfile, arch=arch)#
# verify the repos
repos.verify(feeder)
if __name__ == '__main__':
sassname = r"G:\\Temp\\NVSASS\\cudnn64_7.sm_50.sass"
# sassname = r'G:\\Temp\\Program.45.sm_50.sass'
reposfile = r'InsAsmRepos.sm_50.txt'
arch = 'sm_50'
constructReposFromFile(sassname, reposfile, arch=arch)
print('### Construction done!')
# verifyReposFromFile(sassname, reposfile, arch=arch)
# print('### Verification done!')
| 27.307692 | 70 | 0.672535 | [
"MIT"
] | cloudcores/CuAssembler | Tests/test_CuInsAsmRepos_sm50.py | 1,420 | Python |
from cumulusci.core.config import ConnectedAppOAuthConfig
from django.conf import settings
def get_connected_app():
return ConnectedAppOAuthConfig(
{
"callback_url": settings.CONNECTED_APP_CALLBACK_URL,
"client_id": settings.CONNECTED_APP_CLIENT_ID,
"client_secret": settings.CONNECTED_APP_CLIENT_SECRET,
}
)
| 28.538462 | 66 | 0.708895 | [
"BSD-3-Clause"
] | abhishekalgo/metaci | metaci/cumulusci/utils.py | 371 | Python |
import re, multiprocessing
from tqdm import tqdm
import numpy as np
class Cleaner():
def __init__(self, num_threads=1): # right now, it's single threaded
self.num_threads = min(num_threads, int(multiprocessing.cpu_count()/2))
"""
S- ar putea să fie necesar să- l recitiţi.
"""
self.r1 = re.compile(r"([\w]+-)[\s]([\w]+)", re.IGNORECASE)
"""
{LL/ AAAA}
Humalog Mix50 100 U/ ml
"""
self.r2 = re.compile(r"([\w]+/)\s([\w]+)", re.IGNORECASE)
"""
All unicode dashes to normal '-', see https://www.fileformat.info/info/unicode/category/Pd/list.htm
includes bull : • \u2022
"""
self.r3 = re.compile(r"([■\u2022\u007E\u00AD\u058A\u05BE\u1400\u1806\u2010\u2011\u2012\u2013\u2014\u2015\u2053\u207B\u208B\u2212\u2E17\u2E3A\u2E3B\u301C\u3030\u30A0\uFE31\uFE32\uFE63\uFF0D]+)", re.UNICODE)
"""
spaces after comma in numbers: 1, 4% -> 1,4%
"""
self.r4 = re.compile(r"([\d]+,)\s([\d]+)", re.IGNORECASE)
"""
soft hyphens #\u00AD
"""
self.r5 = re.compile(r"[\u00AD]")
"""
remove URLS
"""
self.r6 = re.compile(r'(?:www|http)\S+|<\S+|\w+\/*>')
"""
remove emails
"""
self.r7 = re.compile(r'([^@]+@[^@]+\.[^@]+)')
"""
table separators
"""
self.r8 = re.compile(r'[\─\─]+')
self.r9 = re.compile(r'[\-\-]+')
"""
multiple spaces
"""
self.space = re.compile(' +')
"""
forbiden chars that cause a lot of bad sentences
"""
self.forbidden_chars = "ºþÈ™ÓÑÄÈîƒ"
def process(self, lines, percent_max_numeric=0.7, percent_max_non_ascii=0.40, min_line_length=20, verbose=False, disable_pbar=True):
skipped_because_min_length = np.array([0,0], dtype=np.uint64)
skipped_alpha_count = np.array([0,0], dtype=np.uint64)
skipped_because_max_numeric = np.array([0,0], dtype=np.uint64)
skipped_because_max_non_ascii = np.array([0,0], dtype=np.uint64)
skipped_because_forbidden_chars = np.array([0,0], dtype=np.uint64)
total_original_length = 0
total_clean_length = 0
output = []
for line in tqdm(lines, disable = disable_pbar):
line = line.strip()
# get stats about line
length = len(line)
total_original_length += length
if length < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
line = bytes(line, 'utf-8').decode('utf-8', 'ignore') # strip not utf-8 chars
digit_count = 0
alpha_count = 0
ascii_count = 0
forbidden_char = False
for char in line:
if char in self.forbidden_chars:
forbidden_char = True
break
if char.isnumeric():
digit_count+=1
if char.isalpha():
alpha_count+=1
if char.isascii():
ascii_count+=1
# reject if forbidden char
if forbidden_char:
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
continue
# reject if number of letters is too small
if alpha_count == 0 or alpha_count / length < 0.5:
skipped_alpha_count += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping alpha={:.3f}: [{}]".format(alpha_count / length, line))
continue
# reject if too many numbers
if digit_count / alpha_count >= percent_max_numeric and digit_count > 6:
skipped_because_max_numeric += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping digit={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
# reject if too many non-ascii
if ascii_count / alpha_count < percent_max_non_ascii and length > 15:
skipped_because_max_non_ascii += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping ascii={:.3f}: [{}]".format(digit_count / alpha_count, line))
continue
#skip lines that appear to be ascii tables │
if (line.strip()[0] == '|' and line.count('|') > 2) or (line.strip()[0] == '│' and line.count('│') > 2):
skipped_because_forbidden_chars += np.array([1,length], dtype=np.uint64)
if verbose:
print("Skipping table line: [{}]".format(line))
continue
# clean line
#print("\nbef: {}".format(line))
line = self.r1.sub(r"\1\2", line)
line = self.r2.sub(r"\1\2", line)
line = self.r3.sub("-", line)
line = self.r4.sub(r"\1\2", line)
line = self.r5.sub("", line)
line = self.r6.sub("", line)
line = self.r7.sub("", line)
# separators
line = self.r8.sub("", line)
line = self.r9.sub("", line)
line = line.replace("( ă)", "(ă)")
line = line.replace("ţ", "ț")
line = line.replace("ş", "ș")
line = line.replace("Ţ", "Ț")
line = line.replace("Ş", "Ș")
line = line.replace("â", "â")
#print("aft: {}".format(line))
line = self.space.sub(' ', line).strip()
# check that after processing the line is not too short
if len(line) < min_line_length:
skipped_because_min_length += np.array([1,length], dtype=np.uint64)
continue
total_clean_length += len(line)
output.append(line+"\n")
# pack stats
stats = {}
stats["skipped_because_min_length"] = skipped_because_min_length
stats["skipped_alpha_count"] = skipped_alpha_count
stats["skipped_because_max_numeric"] = skipped_because_max_numeric
stats["skipped_because_max_non_ascii"] = skipped_because_max_non_ascii
stats["skipped_because_forbidden_chars"] = skipped_because_forbidden_chars
stats["total_original_length"] = total_original_length
stats["total_clean_length"] = total_clean_length
return output, stats
def add_stats(self, a, b):
"""
Add two stats dict that are returned by the process function.
This is used for multiple files
:param a: stats dict
:param b: stats dict
:return: stats dict
"""
stats = {}
stats["skipped_because_min_length"] = a["skipped_because_min_length"] + b["skipped_because_min_length"]
stats["skipped_alpha_count"] = a["skipped_alpha_count"] + b["skipped_alpha_count"]
stats["skipped_because_max_numeric"] = a["skipped_because_max_numeric"] + b["skipped_because_max_numeric"]
stats["skipped_because_max_non_ascii"] = a["skipped_because_max_non_ascii"] + b["skipped_because_max_non_ascii"]
stats["skipped_because_forbidden_chars"] = a["skipped_because_forbidden_chars"] + b["skipped_because_forbidden_chars"]
stats["total_original_length"] = a["total_original_length"] + b["total_original_length"]
stats["total_clean_length"] = a["total_clean_length"] + b["total_clean_length"]
return stats
def print_stats(self, stats):
print("\nCleaning statistics:")
print("Total original length (chars) = {}".format(stats["total_original_length"]))
print("Total length after cleaning (chars) = {}".format(stats["total_clean_length"]))
print("Percent data kept = {:.3f} %".format(100.*stats["total_clean_length"]/stats["total_original_length"]))
print("Skipped because line length was below minimum (lines/chars): {} ".format(stats["skipped_because_min_length"]))
print("Skipped because line had forbidden characters (lines/chars): {} ".format(stats["skipped_because_forbidden_chars"]))
print("Skipped because alpha count was below minimum (lines/chars): {} ".format(stats["skipped_alpha_count"]))
print("Skipped because digit count was above maximum (lines/chars): {} ".format(stats["skipped_because_max_numeric"]))
print("Skipped because too many non-ascii characters (lines/chars): {} ".format(stats["skipped_because_max_non_ascii"]))
text = [" - ~~~~~Păstraţi acest prospect. S- ar putea să fie necesar să- l recitiţi.",
"- Dacă aveţi orice întrebări suplimentare, adresaţi- vă medicului dumneavoastră sau farmacistului.\n",
"{LL/ AAAA}\n",
"MANUALUL UTILIZATORULUI\n",
"Vezi textul manualului mai jos.\n",
"303 Informaţii detaliate privind acest medicament sunt disponibile pe website- ul Agenţiei Europene a Medicamentului (EMEA): http: // www. emea. europa. eu /.\n",
"304 PROSPECT: \n",
"INFORMAŢII PENTRU UTILIZATOR",
"Humalog Mix50 100 U/ ml • • • ~~~~",
"Τηλ: +30 210 629 4600 España Lilly S. A.",
"Tel: + 34- 91 663 50 00 France Lilly France S. A. S.",
"Tél: +33 - (0) 1 55 49 34 34 Ireland Eli Lilly and Company (Ireland) Limited Tel: + 353 - (0) 1 661 4377 Ísland Icepharma hf.",
"Sími + 354 540 8000 Italia Eli Lilly Italia S. p. A.",
"Tel: + 39 - 055 42571 Κύπρος Phadisco Ltd Τηλ: +357 22 715000 ",
"Luxembourg/ Luxemburg Eli Lilly Benelux S. A.",
"Tél/ Tel: + 32 - (0) 2 548 84 84 Magyarország Lilly Hungária Kft.",
"Tel: + 36 1 328 5100 Malta Charles de Giorgio Ltd.",
"Κύπρος Βαρνάβας Χατζηπαναγής Λτδ 7 Ανδροκλέους CY- 1060 Λευκωσία Tηλ"]
#tt = []
#for i in range(100000):
# tt.extend(text)
#print(len(tt))
"""
c = Cleaner(1)
lines, s1 = c.process(text)
lines, s2 = c.process(text)
stats = c.add_stats(s1, s2)
c.print_stats(s1)
c.print_stats(s2)
c.print_stats(stats)
print("DONE")
"""
| 40.946721 | 212 | 0.586828 | [
"MIT"
] | senisioi/Romanian-Transformers | corpus/text_cleaner.py | 10,124 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.