ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b41307d90f43557915b808b89d43c7f4e70fecbe |
import json
import re
import functools
from typing import List, Dict, Set, Tuple, Optional, TypeVar, Iterable
from spacy.tokens import Doc, Token, Span, DocBin # type: ignore
import numpy as np
T = TypeVar('T')
############################################
# Utility functions for NLP analysis
############################################
def is_likely_proper(tok: Token, min_rank=200) -> bool:
"""Returns true if the spacy token is a likely proper name, based on its form.
NB: this method should only be used for languages that have a distinction between
lowercase and uppercase (so called bicameral scripts)."""
# We require at least two characters
if len(tok) < 2:
return False
# If the lemma is titled or in uppercase, just return True
elif tok.lemma_.istitle() and len(tok.lemma_) >2:
return True
elif tok.lemma_.isupper() and len(tok.lemma_) >2 and tok.lemma_ != "-PRON-":
return True
# If there is no lemma, but the token is in uppercase, return true as well
elif tok.lemma_=="" and tok.is_upper:
return True
# We do not consider the 200 most common words as proper name
elif (tok.lemma_.islower() and tok.lemma in tok.vocab.strings
and tok.vocab[tok.lemma].rank < min_rank):
return False
# Handling cases such as iPad
elif len(tok) > 2 and tok.text[0].islower() and tok.text[1].isupper():
return True
# Handling cases such as IceFog
elif (len(tok) > 2 and tok.text[0].isupper()
and any([k.islower() for k in tok.text[1:]])
and any([k.isupper() for k in tok.text[1:]])):
return True
# Else, check whether the surface token is titled and is not sentence-initial
# NB: This should be commented out for languages such as German
elif (tok.i > 0 and tok.is_title and not tok.is_sent_start
and tok.nbor(-1).text not in {'\'', '"', '‘', '“', '”', '’', "\n", "|"}
and not tok.nbor(-1).text.endswith(".")):
return True
# If the part-of-speech is a proper noun
elif tok.pos_ == "PROPN":
return True
# If the token is a quite rare token
elif (len(tok) > 3 and (tok.is_lower or tok.is_upper)
and len(tok.vocab.vectors) > 0 and tok.is_oov):
return True
return False
def is_infrequent(span: Span, max_rank_threshold=15000) -> bool:
"""Returns true if there is at least one token that is quite infrequent"""
max_rank = max(tok.rank if len(span.vocab.vectors) >
0 and tok.rank > 0 else 0 for tok in span)
return max_rank > max_rank_threshold
def in_compound(tok: Token):
"""Returns true if the spacy token is part of a compound phrase"""
if tok.dep_ == "compound":
return True
elif tok.i > 0 and tok.nbor(-1).dep_ == "compound":
return True
return False
def replace_ner_spans(doc: Doc, source: str):
"""Given a Spacy Doc object and the name of an annotation source, replaces
the current named entities by the ones specified in the source"""
# We create Spacy spans based on the annotation layer
spans = []
if source in doc.spans:
for span in doc.spans[source]:
spans.append(span)
doc.ents = tuple(spans)
return doc
@functools.lru_cache(maxsize=5)
def get_spacy_model(spacy_model_name: str):
"""Returns the vocabulary associated with the spacy model
(and caches it for faster access)"""
import spacy
return spacy.load(spacy_model_name)
@functools.lru_cache(maxsize=1)
def get_tokens(doc: Doc) -> List[str]:
"""Returns the list of tokens from a given spacy Document. As it is an
operation that (for some strange reason) actually takes some CPU resources,
we cache the results, as it is a frequent operation, e.g. for gazetteers. """
return [tok.text for tok in doc]
@functools.lru_cache(maxsize=1)
def get_next_sentence_boundaries(doc: Doc) -> List[int]:
"""Returns a list of integers (of same size as the number of tokens)
expressing, for each token, the position of the next sentence boundary
(start-of-sentence token). """
boundaries = []
for tok in doc:
if tok.is_sent_start:
boundaries.append(tok.i)
next_boundary_indices = np.searchsorted(boundaries, range(1, len(doc)+1))
next_boundaries = [boundaries[i] if i < len(boundaries) else len(doc)
for i in next_boundary_indices]
return next_boundaries
############################################
# I/O related functions
############################################
def docbin_reader(docbin_file_path: str, spacy_model_name: str = "en_core_web_md",
cutoff: Optional[int] = None, nb_to_skip: int = 0):
"""Read a binary file containing a DocBin repository of spacy documents.
In addition to the file path, we also need to provide the name of the spacy
model (which is necessary to load the vocabulary), such as "en_core_web_md".
If cutoff is specified, the method will stop after generating the given
number of documents. If nb_to_skip is > 0, the method will skip the given
number of documents before starting the generation.
"""
import spacy
# Reading the binary data from the file
fd = open(docbin_file_path, "rb")
data = fd.read()
fd.close()
docbin = DocBin(store_user_data=True)
docbin.from_bytes(data)
del data
# print("Total number of documents in docbin:", len(docbin))
# Skip a number of documents
if nb_to_skip:
docbin.tokens = docbin.tokens[nb_to_skip:]
docbin.spaces = docbin.spaces[nb_to_skip:]
docbin.user_data = docbin.user_data[nb_to_skip:]
# Retrieves the vocabulary
vocab = get_spacy_model(spacy_model_name).vocab
# We finally generate the documents one by one
reader = docbin.get_docs(vocab)
for i, doc in enumerate(reader):
yield doc
if cutoff is not None and (i+1) >= cutoff:
return
def docbin_writer(docs: Iterable[Doc], docbin_output_path: str):
"""Writes a stream of Spacy Doc objects to a binary file in the DocBin format."""
import spacy.attrs
# Creating the DocBin object (with all attributes)
attrs = [spacy.attrs.LEMMA, spacy.attrs.TAG, spacy.attrs.DEP, spacy.attrs.HEAD,
spacy.attrs.ENT_IOB, spacy.attrs.ENT_TYPE]
docbin = DocBin(attrs=attrs, store_user_data=True)
# Storing the documents in the DocBin repository
for doc in docs:
doc.cats = {}
docbin.add(doc)
data = docbin.to_bytes()
# And writing the content to the file
print("Write to", docbin_output_path, end="...", flush=True)
fd = open(docbin_output_path, "wb")
fd.write(data)
fd.close()
print("done")
def json_writer(docs, json_file_path: str, source: str = None):
"""Converts a collection of Spacy Doc objects to a JSON format,
such that it can be used to train the Spacy NER model. (for Spacy v2)
Source must be an aggregated source (defined in user_data["agg_spans"]), which
will correspond to the target values in the JSON file.
"""
import spacy
if int(spacy.__version__[0]) > 2:
raise RuntimeError("Only supported for Spacy v2")
import spacy.gold # type: ignore
# We start opening up the JSON file
print("Writing JSON file to", json_file_path)
out_fd = open(json_file_path, "wt")
out_fd.write("[{\"id\": 0, \"paragraphs\": [\n")
for i, doc in enumerate(docs):
# We replace the NER labels with the annotation source
if source is not None:
doc = replace_ner_spans(doc, source)
# We dump the JSON content to the file
d = spacy.gold.docs_to_json([doc])
s = json.dumps(d["paragraphs"]).strip("[]")
if i > 0:
s = ",\n" + s
out_fd.write(s)
if i > 0 and i % 1000 == 0:
print("Converted documents:", i)
out_fd.flush()
# And finally close all file descriptors
out_fd.write("]}]\n")
out_fd.flush()
out_fd.close()
############################################
# Operations on spans
############################################
def get_spans(doc: Doc, sources: List[str], labels: Optional[List[str]] = None
) -> List[Span]:
"""Return the spans annotated by a list of labelling sources. If two
spans are overlapping, the longest spans are kept. One can also specify the
labels to focus on (if empty, we extract all). """
# Creating a list of spans
spans = []
for source in sources:
if source in doc.spans:
for span in doc.spans[source]:
if labels is None or span.label_ in labels:
spans.append(span)
else:
raise RuntimeError("Annotation source \"%s\" cannot be found" % source)
# Remove possible overlaps
spans = _remove_overlaps(spans)
return spans
def get_spans_with_probs(doc: Doc, source: str, labels: Optional[List[str]] = None
) -> List[Tuple[Span,float]]:
"""Return the spans annotated by an aggregated source. The method returns a
dictionary of non-overlapping spans where the keys
are (start, end) pairs and the values are pairs of (label, prob).
"""
spans = []
if source in doc.spans:
for span in doc.spans[source]:
if labels is None or span.label_ in labels:
prob = _get_agg_span_prob(doc, source, span)
spans.append((span, prob))
else:
raise RuntimeError("Annotation source \"%s\" cannot be found" % source)
return spans
def _get_agg_span_prob(doc, source, span):
"""Get the probability that the source assigns the (start,end)->label span"""
if source not in doc.spans:
return 0
elif "probs" not in doc.spans[source].attrs:
return 1
probs = doc.spans[source].attrs["probs"]
if (span.start, span.end) in probs:
return probs[(span.start, span.end)]
probs_per_token = []
for i in range(span.start, span.end):
if i in probs:
for prefixed_label, prob in probs[i].items():
if prefixed_label.endswith("-%s" % span.label_):
probs_per_token.append(prob)
return sum(probs_per_token)/(len(span))
def count_nb_occurrences(tokens: Tuple[str, ...], all_tokens: List[str]):
"""Count the number of occurences of the sequence of tokens in the
full list all_tokens"""
nb_occurrences = 0
for i in range(len(all_tokens)):
for k in range(len(tokens)):
if all_tokens[i+k] != tokens[k]:
break
else:
nb_occurrences += 1
return nb_occurrences
def at_least_nb_occurrences(tokens: Tuple[str, ...], all_tokens: List[str], min_threshold):
"""Returns true if the number of occurences of the sequence of tokens in the
full list all_tokens is at least min_threshold, and false otherwise"""
if len(tokens) == 1:
return all_tokens.count(tokens[0]) >= min_threshold
nb_occurrences = 0
for i in range(len(all_tokens)):
for k in range(len(tokens)):
if (i+k) >= len(all_tokens) or all_tokens[i+k] != tokens[k]:
break
else:
nb_occurrences += 1
if nb_occurrences >= min_threshold:
return True
return False
def _remove_overlaps(spans: List[Span]) -> List[Span]:
"""Remove overlaps between spans expressed as (start, end, label, score)
tuples. When two overlapping spans are detected, the method keeps the
longest span and removes the other. If the two scores are identical,
the first span is discarded).
"""
# We sort the spans by their position
spans.sort()
# We resolve overlaps between spans
finished = False
while not finished:
finished = True
for i in range(1, len(spans)):
# If two spans are overlapping , keep the longest one
start1 = spans[i-1].start
end1 = spans[i-1].end
start2 = spans[i].start
end2 = spans[i].end
if start2 < end1 and start1 < end2:
length_diff = (end1-start1) - (end2-start2)
if length_diff > 0:
del spans[i]
else:
del spans[i-1]
finished = False
break
return spans
def merge_contiguous_spans(spans: List[Tuple[int, int, str]], doc: Doc,
acceptable_gaps: str = ","):
"""Merge spans that are contiguous (and with same label), or only
separated with some predefined punctuation symbols"""
finished = False
while not finished:
finished = True
spans.sort()
for i in range(1, len(spans)):
start1, end1, label1 = spans[i-1]
start2, end2, label2 = spans[i]
if end1 == start2 or (end1 == start2-1 and doc[end1].text in acceptable_gaps):
if label1 == label2:
new_spans = spans[:i-1] if i > 1 else []
new_spans.append((start1, end2, label1))
new_spans += spans[i+1:]
spans = new_spans
finished = False
break
return spans
def get_overlaps(start: int, end: int, other_spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
"""Returns a list of overlaps (as (start, end, value) between the provided span
and other existing spans"""
overlaps = []
other_spans.sort()
start_search, end_search = _binary_search(start, end, other_spans)
for other_span_start, other_span_end in other_spans[start_search:end_search]:
if start < other_span_start and end > other_span_end:
overlaps.append((other_span_start, other_span_end))
return overlaps
def _binary_search(start: int, end: int, intervals: List[Tuple[int, int]]) -> Tuple[int, int]:
"""Performs a binary search"""
start_search = 0
end_search = len(intervals)
while start_search < (end_search-1):
mid = start_search + (end_search-start_search)//2
(interval_start, interval_end) = intervals[mid]
if interval_end <= start:
start_search = mid
elif interval_start >= end:
end_search = mid
else:
break
return start_search, end_search
def get_subsequences(sequence: List[T]) -> List[List[T]]:
"""Returns the list of possible subsequences that are included
in the full sequence (including the original sequence)."""
subsequences = []
for length in range(1, len(sequence)+1):
for i in range(length, len(sequence)+1):
subsequences.append(sequence[i-length:i])
return subsequences
def spans_to_array(
doc: Doc,
labels: List[str],
sources: Optional[List[str]] = None,
) -> np.ndarray:
"""Convert the annotations of a spacy document into a 2D array.
Each row corresponds to a token, and each column to a labelling
source. In other words, the value at (i,j) represents the prediction
of source j for token i. This prediction is expressed as the
index of the label in the labels.
Labels must be a list of labels (such as B-PERSON, I-ORG) to detect.
Sources should be a list of labelling sources. If empty, all sources
are employed.
NB: we assume the labels use either IO/BIO/BILUO, and that the
O label is at position 0.
"""
label2idx, prefixes, labels_without_prefix, = _index_labels(
original_labels=labels,
strip_prefixes=False,
)
return _spans_to_array(
doc,
sources,
label2idx,
labels_without_prefix,
prefixes
)
def _index_labels(
original_labels: List[str],
strip_prefixes: bool = False
) -> Tuple[Dict[str, int], Set[str], Set[str]]:
""" Normalize and index a list of labels to:
1. Generate a mapping from labels to indices
2. Identify label prefixes (e.g., I, B, L, etc.)
3. Identify list of labels without prefixes
If `strip_prefixes` is True, we normalize IO/BIO/BILUO labels
(e.g., B-PERSON and I-PERSON will be normalized PERSON).
If `strip_prefixes` is False, we assume the labels
use IO/BIO/BILUO formats and mantain different labels/indices for labels
like B-PERSON and I-PERSON.
NB: We assume that the first label in labels is 'O' for the null token.
"""
labels = []
prefixes = set()
labels_without_prefix = set()
for original_label in original_labels:
if "-" in original_label:
# Normalize B-PER and I-PER to PER
prefix, normalized_label = original_label.split("-", 1)
prefixes.add(prefix)
else:
# No normalization required
normalized_label = original_label
# Track labels without prefixes
labels_without_prefix.add(normalized_label)
if strip_prefixes:
# Use normalized label for token labeling
if normalized_label not in labels:
labels.append(normalized_label)
else:
# Use original label for token labeling
if original_label not in labels:
labels.append(original_label)
# Generate mapping of labels to label indices
label2idx = {label: i for i, label in enumerate(labels)}
return label2idx, prefixes, labels_without_prefix
def _spans_to_array(
doc: Doc,
sources: List[str],
label2idx: Dict[str, int],
labels_without_prefix: Set[str],
prefixes: Optional[Set[str]] = None,
warn_missing_labels: bool = False
) -> np.ndarray:
"""Convert the annotations of a spacy document into a 2D array.
Each row corresponds to a token, and each column to a labelling
source. In other words, the value at (i,j) represents the prediction
of source j for token i. This prediction is expressed as the
index of the label in the labels.
NB:
- Sources should be a list of labelling sources. If empty, all sources
are employed.
- If `prefixes` are provided (e.g., [I, B, L]), it is assumed that the
labels in `label2idx` contain the prefixes (e.g., I-PERSON,
B-PERSON).
- If `prefixes` are not provided, it is assumed that the labels in
`label2idx` do not contain prefixes (e.g, PERSON).
- We also assume the O is label is at position 0.
"""
if sources is None:
sources = list(doc.spans.keys())
if warn_missing_labels:
missing_labels = set()
# Creating the numpy array itself
data = np.zeros((len(doc), len(sources)), dtype=np.int16)
for source_index, source in enumerate(sources):
for span in doc.spans.get(source, []):
if span.label_ not in labels_without_prefix:
if warn_missing_labels:
missing_labels.add(span.label_)
continue
if prefixes is None:
# Do not use prefix labels (e.g., use PER instead of
# B-PER, I-PER, etc.)
data[span.start:span.end, source_index] = label2idx[
span.label_
]
else:
# If the span is a single token, we can use U
if "U" in prefixes and len(span) == 1:
data[span.start, source_index] = label2idx[
"U-%s" % span.label_
]
continue
# Otherwise, we use B, I and L
if "B" in prefixes:
data[span.start, source_index] = label2idx[
"B-%s" % span.label_
]
if "I" in prefixes:
start_i = (span.start+1) if "B" in prefixes else span.start
end_i = (span.end-1) if "L" in prefixes else span.end
data[start_i:end_i, source_index] = label2idx[
"I-%s" % span.label_
]
if "L" in prefixes:
data[span.end-1, source_index] = label2idx[
"L-%s" % span.label_
]
if warn_missing_labels:
print(
"WARNING: \
Span labels were found in the dataset that were not provided \
in `labels_without_prefices`: {}".format(missing_labels)
)
return data
def token_array_to_spans(agg_array: np.ndarray,
prefix_labels: List[str]) -> Dict[Tuple[int, int], str]:
"""Returns an dictionary of spans corresponding to the aggregated 2D
array. prefix_labels must be list of prefix labels such as B-PERSON,
I-ORG etc., of same size as the number of columns in the array."""
spans = {}
i = 0
while i < len(agg_array):
if np.isscalar(agg_array[i]):
value_index = agg_array[i]
else: # If we have probabilities, select most likely label
value_index = agg_array[i].argmax()
if value_index == 0:
i += 1
continue
prefix_label = prefix_labels[value_index]
prefix, label = prefix_label.split("-", 1)
# If the prefix is "U", create a single-token span
if prefix == "U":
spans[(i, i+1)] = label
i += 1
# Otherwise, we need to continue until the span ends
elif prefix in {"B", "I"}:
start = i
i += 1
while i < len(agg_array):
if np.isscalar(agg_array[i]):
next_val = agg_array[i]
else:
next_val = agg_array[i].argmax()
if next_val == 0:
break
next_prefix_label = prefix_labels[next_val]
next_prefix, next_label = next_prefix_label.split("-", 1)
if next_prefix not in {"I", "L"}:
break
i += 1
spans[(start, i)] = label
return spans
def token_array_to_probs(agg_array: np.ndarray,
prefix_labels: List[str]) -> Dict[int, Dict[str, float]]:
"""Given a 2D array containing, for each token, the probabilities for a
each possible output label in prefix form (B-PERSON, I-ORG, etc.), returns
a dictionary of dictionaries mapping token indices to probability distributions
over their possible labels. The "O" label and labels with zero probabilities
are ignored.
"""
# Initialising the label sequence
token_probs = {}
# We only look at labels beyond "O", and with non-zero probability
row_indices, col_indices = np.nonzero(agg_array[:, 1:])
for i, j in zip(row_indices, col_indices):
if i not in token_probs:
token_probs[i] = {prefix_labels[j+1]: agg_array[i, j+1]} #type: ignore
else:
token_probs[i][prefix_labels[j+1]] = agg_array[i, j+1] #type: ignore
return token_probs
def is_valid_start(prefix_label, encoding="BIO"):
"""Returns whether the prefix label is allowed to start a sequence"""
return (prefix_label == "O"
or prefix_label.startswith("B-")
or prefix_label.startswith("U-") or
(prefix_label.startswith("I-") and "B" not in encoding))
def is_valid_transition(prefix_label1, prefix_label2, encoding="BIO"):
"""Returns whether the two labels (associated with a prefix, such as B-PERSON,
I-ORG etc.) are allowed to follow one another according to the encoding (which
can be BIO, BILUO, IO, etc.)"""
if prefix_label1.startswith("B-"):
if ((prefix_label2.startswith("I-")
or prefix_label2.startswith("L-"))
and prefix_label1[2:] == prefix_label2[2:]):
return True
elif "U" not in encoding:
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
elif prefix_label1.startswith("I-"):
if ((prefix_label2.startswith("I-")
or prefix_label2.startswith("L-"))
and prefix_label1[2:] == prefix_label2[2:]):
return True
elif "L" not in encoding:
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
elif prefix_label1 == "O" or prefix_label1.startswith("L-") or prefix_label1.startswith("U-"):
return (prefix_label2 == "O"
or prefix_label2.startswith("B-")
or prefix_label2.startswith("U-")
or (prefix_label2.startswith("I-") and "B" not in encoding))
############################################
# Visualisation
############################################
def display_entities(doc: Doc, layer=None, add_tooltip=False):
"""Display the entities annotated in a spacy document, based on the
provided annotation layer(s). If layer is None, the method displays
the entities from Spacy.
This method will only work in a Jupyter Notebook or similar.
If add_tooltip is set to True, the visualisation also adds tooltips to show
the predictions of each labelling functions for a given token. This functionality
only works with Jupyter Lab (not Jupyter Notebook).
"""
import spacy.displacy
import IPython.core.display
if layer is None:
spans = doc.ents
elif type(layer) is list:
spans = get_spans(doc, layer)
elif type(layer) == str:
if "*" in layer:
matched_layers = [l for l in doc.spans
if re.match(layer.replace("*", ".*?")+"$", l)]
spans = get_spans(doc, matched_layers)
else:
spans = get_spans(doc, [layer])
else:
raise RuntimeError("Layer type not accepted")
entities = {}
for span in spans:
start_char = doc[span.start].idx
end_char = doc[span.end-1].idx + len(doc[span.end-1])
if (start_char, end_char) not in entities:
entities[(start_char, end_char)] = span.label_
# If we have several alternative labels for a span, join them with +
elif span.label_ not in entities[(start_char, end_char)]:
entities[(start_char, end_char)] = entities[(
start_char, end_char)] + "+" + span.label_
entities = [{"start": start, "end": end, "label": label}
for (start, end), label in entities.items()]
doc2 = {"text": doc.text, "title": None, "ents": entities}
html = spacy.displacy.render(doc2, jupyter=False, style="ent", manual=True)
if add_tooltip and type(layer)==str and "sources" in doc.spans[layer].attrs:
html = _enrich_with_tooltip(doc, html, doc.spans[layer].attrs["sources"]) # type: ignore
ipython_html = IPython.core.display.HTML(
'<span class="tex2jax_ignore">{}</span>'.format(html))
return IPython.core.display.display(ipython_html)
def _enrich_with_tooltip(doc: Doc, html: str, sources: List[str]):
"""Enrich the HTML produced by spacy with tooltips displaying the predictions
of each labelling function"""
import spacy.util
if len(doc.spans)==0:
return html
# Retrieves annotations for each token
annotations_by_tok = {}
for source in sources:
for span in doc.spans[source]:
for i in range(span.start, span.end):
annotations_by_tok[i] = annotations_by_tok.get(i, []) + [(source, span.label_)]
# We determine which characters are part of the HTML markup and not the text
all_chars_to_skip = set()
for fragment in re.finditer("<span.+?</span>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
for fragment in re.finditer("</?div.*?>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
for fragment in re.finditer("</?mark.*?>", html):
all_chars_to_skip.update(range(fragment.start(0), fragment.end(0)))
# We loop on each token
curr_pos = 0
new_fragments = []
for tok in doc:
# We search for the token position in the HTML
toktext = spacy.util.escape_html(tok.text)
if "\n" in toktext:
continue
start_pos = html.index(toktext, curr_pos)
if start_pos == -1:
raise RuntimeError("could not find", tok)
while any((i in all_chars_to_skip for i in range(start_pos, start_pos + len(toktext)))):
start_pos = html.index(toktext, start_pos+1)
if start_pos == -1:
raise RuntimeError("could not find", tok)
# We add the preceding fragment
new_fragments.append(html[curr_pos:start_pos])
# If the token has annotations, we create a tooltip
if tok.i in annotations_by_tok:
lines = ["%s:\t%s  " %
(ann, label) for ann, label in annotations_by_tok[tok.i]]
max_width = 7*max([len(l) for l in lines])
new_fragment = ("<label class='tooltip'>%s" % toktext +
"<span class='tooltip-text' style='width:%ipx'>"%max_width +
"%s</span></label>" %"<br>".join(lines))
else:
new_fragment = toktext
new_fragments.append(new_fragment)
curr_pos = start_pos + len(toktext)
new_fragments.append(html[curr_pos:])
new_html = """<style>
.tooltip { position: relative; border-bottom: 1px dotted black; }
.tooltip .tooltip-text {visibility: hidden; background-color: black; color: white;
line-height: 1.2; text-align: right; border-radius: 6px;
padding: 5px 0; position: absolute; z-index: 1; margin-left:1em;
opacity: 0; transition: opacity 1s;}
.tooltip .tooltip-text::after {position: absolute; top: 1.5em; right: 100%; margin-top: -5px;
border-width: 5px; border-style: solid;
border-color: transparent black transparent transparent;}
.tooltip:hover .tooltip-text {visibility: visible; opacity: 1;}
</style>
""" + "".join(new_fragments)
return new_html |
py | b41308dab80172d5dd5232cc360098a776053bc6 | from .syngas_adapted import build_model
__all__ = ['build_model']
|
py | b41309a09b31543000d3276bfaf0c9185a43f3d5 | from gym_minigrid.minigrid import *
from gym_minigrid.register import register
class EmptyEnv(MiniGridEnv):
"""
Empty grid environment, no obstacles, sparse reward
"""
def __init__(self, size=8):
super().__init__(
grid_size=size,
max_steps=4*size*size,
# Set this to True for maximum speed
see_through_walls=True
)
def _gen_grid(self, width, height):
# Create an empty grid
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.wall_rect(0, 0, width, height)
# Place the agent in the top-left corner
self.start_pos = (1, 1)
self.start_dir = 0
# Place a goal square in the bottom-right corner
self.grid.set(width - 2, height - 2, Goal())
self.mission = "get to the green goal square"
class EmptyEnv6x6(EmptyEnv):
def __init__(self):
super().__init__(size=6)
class EmptyEnv16x16(EmptyEnv):
def __init__(self):
super().__init__(size=16)
register(
id='MiniGrid-Empty-6x6-v0',
entry_point='gym_minigrid.envs:EmptyEnv6x6'
)
register(
id='MiniGrid-Empty-8x8-v0',
entry_point='gym_minigrid.envs:EmptyEnv'
)
register(
id='MiniGrid-Empty-16x16-v0',
entry_point='gym_minigrid.envs:EmptyEnv16x16'
)
|
py | b4130ac953f8c63a69c89dba93e738848cb99dfc | import os
from hacktools import common
from PIL import Image
def run(data):
infile = data + "extract_BMP/FDT/000.BIN"
outfile = data + "repack_BMP/FDT/000.BIN"
imgfiles = [data + "font_input.png", data + "font_input2.png"]
common.logMessage("Repacking FDT from", imgfiles[0], "...")
common.copyFile(infile, outfile)
with common.Stream(outfile, "rb+", False) as f:
for fntnum in range(len(imgfiles)):
imgfile = imgfiles[fntnum]
if not os.path.isfile(imgfile):
common.logError("Input file", imgfile, "not found")
return
img = Image.open(imgfile)
img = img.convert("RGB")
pixels = img.load()
width = f.readUShort()
width = 8 if fntnum == 0 else 16
height = f.readUShort()
charn = f.readUShort()
charperline = 16
bytelen = (width * height) // 8
imgwidth = (charperline * width) + charperline + 1
imgx = 1
imgy = 1
for i in range(charn):
charx = imgx
chary = imgy
for j in range(bytelen):
data = 0
for x in range(8):
if pixels[charx, chary] == (255, 255, 255):
data |= 1 << (7 - x)
charx += 1
if charx - imgx == width:
charx = imgx
chary += 1
f.writeByte(data)
imgx += width + 1
if imgx == imgwidth:
imgx = 1
imgy += height + 1
common.logMessage("Done!")
|
py | b4130b91941028cd3c0680d79c09a04fd3e40824 | # orm/mapper.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Logic to map Python classes to and from selectables.
Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central
configurational unit which associates a class with a database table.
This is a semi-private module; the main configurational API of the ORM is
available in :class:`~sqlalchemy.orm.`.
"""
from __future__ import absolute_import
from collections import deque
from itertools import chain
import sys
import weakref
from . import attributes
from . import exc as orm_exc
from . import instrumentation
from . import loading
from . import properties
from . import util as orm_util
from .base import _class_to_mapper
from .base import _state_mapper
from .base import class_mapper
from .base import state_str
from .interfaces import _MappedAttribute
from .interfaces import EXT_SKIP
from .interfaces import InspectionAttr
from .interfaces import MapperProperty
from .interfaces import ORMEntityColumnsClauseRole
from .interfaces import ORMFromClauseRole
from .interfaces import StrategizedProperty
from .path_registry import PathRegistry
from .. import event
from .. import exc as sa_exc
from .. import inspection
from .. import log
from .. import schema
from .. import sql
from .. import util
from ..sql import base as sql_base
from ..sql import coercions
from ..sql import expression
from ..sql import operators
from ..sql import roles
from ..sql import util as sql_util
from ..sql import visitors
from ..sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from ..util import HasMemoized
_mapper_registries = weakref.WeakKeyDictionary()
_legacy_registry = None
def _all_registries():
with _CONFIGURE_MUTEX:
return set(_mapper_registries)
def _unconfigured_mappers():
for reg in _all_registries():
for mapper in reg._mappers_to_configure():
yield mapper
_already_compiling = False
# a constant returned by _get_attr_by_column to indicate
# this mapper is not handling an attribute for a particular
# column
NO_ATTRIBUTE = util.symbol("NO_ATTRIBUTE")
# lock used to synchronize the "mapper configure" step
_CONFIGURE_MUTEX = util.threading.RLock()
@inspection._self_inspects
@log.class_logger
class Mapper(
ORMFromClauseRole,
ORMEntityColumnsClauseRole,
sql_base.MemoizedHasCacheKey,
InspectionAttr,
):
"""Defines an association between a Python class and a database table or
other relational structure, so that ORM operations against the class may
proceed.
The :class:`_orm.Mapper` object is instantiated using mapping methods
present on the :class:`_orm.registry` object. For information
about instantiating new :class:`_orm.Mapper` objects, see
:ref:`orm_mapping_classes_toplevel`.
"""
_dispose_called = False
_ready_for_configure = False
@util.deprecated_params(
non_primary=(
"1.3",
"The :paramref:`.mapper.non_primary` parameter is deprecated, "
"and will be removed in a future release. The functionality "
"of non primary mappers is now better suited using the "
":class:`.AliasedClass` construct, which can also be used "
"as the target of a :func:`_orm.relationship` in 1.3.",
),
)
def __init__(
self,
class_,
local_table=None,
properties=None,
primary_key=None,
non_primary=False,
inherits=None,
inherit_condition=None,
inherit_foreign_keys=None,
always_refresh=False,
version_id_col=None,
version_id_generator=None,
polymorphic_on=None,
_polymorphic_map=None,
polymorphic_identity=None,
concrete=False,
with_polymorphic=None,
polymorphic_load=None,
allow_partial_pks=True,
batch=True,
column_prefix=None,
include_properties=None,
exclude_properties=None,
passive_updates=True,
passive_deletes=False,
confirm_deleted_rows=True,
eager_defaults=False,
legacy_is_orphan=False,
_compiled_cache_size=100,
):
r"""Direct constructor for a new :class:`_orm.Mapper` object.
The :func:`_orm.mapper` function is normally invoked through the
use of the :class:`_orm.registry` object through either the
:ref:`Declarative <orm_declarative_mapping>` or
:ref:`Imperative <orm_imperative_mapping>` mapping styles.
.. versionchanged:: 1.4 The :func:`_orm.mapper` function should not
be called directly for classical mapping; for a classical mapping
configuration, use the :meth:`_orm.registry.map_imperatively`
method. The :func:`_orm.mapper` function may become private in a
future release.
Parameters documented below may be passed to either the
:meth:`_orm.registry.map_imperatively` method, or may be passed in the
``__mapper_args__`` declarative class attribute described at
:ref:`orm_declarative_mapper_options`.
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`_schema.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the
:class:`_schema.Table`
produced as a result of the ``__tablename__``
and :class:`_schema.Column` arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`_query.Query.populate_existing`.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`_schema.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param confirm_deleted_rows: defaults to True; when a DELETE occurs
of one more rows based on specific primary keys, a warning is
emitted when the number of rows matched does not equal the number
of rows expected. This parameter may be set to False to handle the
case where database ON DELETE CASCADE rules may be deleting some of
those rows automatically. The warning may be changed to an
exception in a future release.
.. versionadded:: 0.9.4 - added
:paramref:`.mapper.confirm_deleted_rows` as well as conditional
matched row checking on delete.
:param eager_defaults: if True, the ORM will immediately fetch the
value of server-generated default values after an INSERT or UPDATE,
rather than leaving them as expired to be fetched on next access.
This can be used for event schemes where the server-generated values
are needed immediately before the flush completes. By default,
this scheme will emit an individual ``SELECT`` statement per row
inserted or updated, which note can add significant performance
overhead. However, if the
target database supports :term:`RETURNING`, the default values will
be returned inline with the INSERT or UPDATE statement, which can
greatly enhance performance for an application that needs frequent
access to just-generated server defaults.
.. seealso::
:ref:`orm_server_defaults`
.. versionchanged:: 0.9.0 The ``eager_defaults`` option can now
make use of :term:`RETURNING` for backends which support it.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding
:class:`_orm.Mapper`
of one indicating a superclass to which this :class:`_orm.Mapper`
should *inherit* from. The mapped class here must be a subclass
of the other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
.. seealso::
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and
the columns present are missing a :class:`_schema.ForeignKey`
configuration, this parameter can be used to specify which columns
are "foreign". In most cases can be left as ``None``.
:param legacy_is_orphan: Boolean, defaults to ``False``.
When ``True``, specifies that "legacy" orphan consideration
is to be applied to objects mapped by this mapper, which means
that a pending (that is, not persistent) object is auto-expunged
from an owning :class:`.Session` only when it is de-associated
from *all* parents that specify a ``delete-orphan`` cascade towards
this mapper. The new default behavior is that the object is
auto-expunged when it is de-associated with *any* of its parents
that specify ``delete-orphan`` cascade. This behavior is more
consistent with that of a persistent object, and allows behavior to
be consistent in more scenarios independently of whether or not an
orphan object has been flushed yet or not.
See the change note and example at :ref:`legacy_is_orphan_addition`
for more detail on this change.
:param non_primary: Specify that this :class:`_orm.Mapper`
is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`_orm.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
.. seealso::
:ref:`relationship_aliased_class` - the new pattern that removes
the need for the :paramref:`_orm.Mapper.non_primary` flag.
:param passive_deletes: Indicates DELETE behavior of foreign key
columns when a joined-table inheritance entity is being deleted.
Defaults to ``False`` for a base mapper; for an inheriting mapper,
defaults to ``False`` unless the value is set to ``True``
on the superclass mapper.
When ``True``, it is assumed that ON DELETE CASCADE is configured
on the foreign key relationships that link this mapper's table
to its superclass table, so that when the unit of work attempts
to delete the entity, it need only emit a DELETE statement for the
superclass table, and not this table.
When ``False``, a DELETE statement is emitted for this mapper's
table individually. If the primary key attributes local to this
table are unloaded, then a SELECT must be emitted in order to
validate these attributes; note that the primary key columns
of a joined-table subclass are not part of the "primary key" of
the object as a whole.
Note that a value of ``True`` is **always** forced onto the
subclass mappers; that is, it's not possible for a superclass
to specify passive_deletes without this taking effect for
all subclass mappers.
.. versionadded:: 1.1
.. seealso::
:ref:`passive_deletes` - description of similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_updates` - supporting ON UPDATE
CASCADE for joined-table inheritance mappers
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table
inheritance mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The unit of work process will
emit an UPDATE statement for the dependent columns during a
primary key change.
.. seealso::
:ref:`passive_updates` - description of a similar feature as
used with :func:`_orm.relationship`
:paramref:`.mapper.passive_deletes` - supporting ON DELETE
CASCADE for joined-table inheritance mappers
:param polymorphic_load: Specifies "polymorphic loading" behavior
for a subclass in an inheritance hierarchy (joined and single
table inheritance only). Valid values are:
* "'inline'" - specifies this class should be part of the
"with_polymorphic" mappers, e.g. its columns will be included
in a SELECT query against the base.
* "'selectin'" - specifies that when instances of this class
are loaded, an additional SELECT will be emitted to retrieve
the columns specific to this subclass. The SELECT uses
IN to fetch multiple subclasses at once.
.. versionadded:: 1.2
.. seealso::
:ref:`with_polymorphic_mapper_config`
:ref:`polymorphic_selectin`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`_schema.Column` object that's
present in the mapped :class:`_schema.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`_schema.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
.. warning::
Currently, **only one discriminator column may be set**, typically
on the base-most class in the hierarchy. "Cascading" polymorphic
columns are not yet supported.
.. seealso::
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that
:class:`_schema.Column`
objects present in
the mapped :class:`_schema.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`_schema.Column`
objects which define
the primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`_schema.Column`
that will be used to keep a running version id of rows
in the table. This is used to detect concurrent updates or
the presence of stale data in a flush. The methodology is to
detect if an UPDATE statement does not match the last known
version id, a
:class:`~sqlalchemy.orm.exc.StaleDataError` exception is
thrown.
By default, the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies an alternative version
generator.
.. seealso::
:ref:`mapper_version_counter` - discussion of version counting
and rationale.
:param version_id_generator: Define how new version ids should
be generated. Defaults to ``None``, which indicates that
a simple integer counting scheme be employed. To provide a custom
versioning scheme, provide a callable function of the form::
def generate_version(version):
return next_version
Alternatively, server-side versioning functions such as triggers,
or programmatic versioning schemes outside of the version id
generator may be used, by specifying the value ``False``.
Please see :ref:`server_side_version_counter` for a discussion
of important points when using this option.
.. versionadded:: 0.9.0 ``version_id_generator`` supports
server-side version number generation.
.. seealso::
:ref:`custom_version_counter`
:ref:`server_side_version_counter`
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
.. seealso::
:ref:`with_polymorphic` - discussion of polymorphic querying
techniques.
"""
self.class_ = util.assert_arg_type(class_, type, "class_")
self._sort_key = "%s.%s" % (
self.class_.__module__,
self.class_.__name__,
)
self.class_manager = None
self._primary_key_argument = util.to_list(primary_key)
self.non_primary = non_primary
self.always_refresh = always_refresh
if isinstance(version_id_col, MapperProperty):
self.version_id_prop = version_id_col
self.version_id_col = None
else:
self.version_id_col = version_id_col
if version_id_generator is False:
self.version_id_generator = False
elif version_id_generator is None:
self.version_id_generator = lambda x: (x or 0) + 1
else:
self.version_id_generator = version_id_generator
self.concrete = concrete
self.single = False
self.inherits = inherits
if local_table is not None:
self.local_table = coercions.expect(
roles.StrictFromClauseRole, local_table
)
else:
self.local_table = None
self.inherit_condition = inherit_condition
self.inherit_foreign_keys = inherit_foreign_keys
self._init_properties = properties or {}
self._delete_orphans = []
self.batch = batch
self.eager_defaults = eager_defaults
self.column_prefix = column_prefix
self.polymorphic_on = (
coercions.expect(
roles.ColumnArgumentOrKeyRole,
polymorphic_on,
argname="polymorphic_on",
)
if polymorphic_on is not None
else None
)
self._dependency_processors = []
self.validators = util.EMPTY_DICT
self.passive_updates = passive_updates
self.passive_deletes = passive_deletes
self.legacy_is_orphan = legacy_is_orphan
self._clause_adapter = None
self._requires_row_aliasing = False
self._inherits_equated_pairs = None
self._memoized_values = {}
self._compiled_cache_size = _compiled_cache_size
self._reconstructor = None
self.allow_partial_pks = allow_partial_pks
if self.inherits and not self.concrete:
self.confirm_deleted_rows = False
else:
self.confirm_deleted_rows = confirm_deleted_rows
self._set_with_polymorphic(with_polymorphic)
self.polymorphic_load = polymorphic_load
# our 'polymorphic identity', a string name that when located in a
# result set row indicates this Mapper should be used to construct
# the object instance for that row.
self.polymorphic_identity = polymorphic_identity
# a dictionary of 'polymorphic identity' names, associating those
# names with Mappers that will be used to construct object instances
# upon a select operation.
if _polymorphic_map is None:
self.polymorphic_map = {}
else:
self.polymorphic_map = _polymorphic_map
if include_properties is not None:
self.include_properties = util.to_set(include_properties)
else:
self.include_properties = None
if exclude_properties:
self.exclude_properties = util.to_set(exclude_properties)
else:
self.exclude_properties = None
# prevent this mapper from being constructed
# while a configure_mappers() is occurring (and defer a
# configure_mappers() until construction succeeds)
with _CONFIGURE_MUTEX:
self.dispatch._events._new_mapper_instance(class_, self)
self._configure_inheritance()
self._configure_class_instrumentation()
self._configure_properties()
self._configure_polymorphic_setter()
self._configure_pks()
self.registry._flag_new_mapper(self)
self._log("constructed")
self._expire_memoizations()
# major attributes initialized at the classlevel so that
# they can be Sphinx-documented.
is_mapper = True
"""Part of the inspection API."""
represents_outer_join = False
@property
def mapper(self):
"""Part of the inspection API.
Returns self.
"""
return self
def _gen_cache_key(self, anon_map, bindparams):
return (self,)
@property
def entity(self):
r"""Part of the inspection API.
Returns self.class\_.
"""
return self.class_
local_table = None
"""The :class:`_expression.Selectable` which this :class:`_orm.Mapper`
manages.
Typically is an instance of :class:`_schema.Table` or
:class:`_expression.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`_orm.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`_orm.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
.. seealso::
:attr:`_orm.Mapper.persist_selectable`.
"""
persist_selectable = None
"""The :class:`_expression.Selectable` to which this :class:`_orm.Mapper`
is mapped.
Typically an instance of :class:`_schema.Table`,
:class:`_expression.Join`, or :class:`_expression.Alias`.
The :attr:`_orm.Mapper.persist_selectable` is separate from
:attr:`_orm.Mapper.selectable` in that the former represents columns
that are mapped on this class or its superclasses, whereas the
latter may be a "polymorphic" selectable that contains additional columns
which are in fact mapped on subclasses only.
"persist selectable" is the "thing the mapper writes to" and
"selectable" is the "thing the mapper selects from".
:attr:`_orm.Mapper.persist_selectable` is also separate from
:attr:`_orm.Mapper.local_table`, which represents the set of columns that
are locally mapped on this class directly.
.. seealso::
:attr:`_orm.Mapper.selectable`.
:attr:`_orm.Mapper.local_table`.
"""
inherits = None
"""References the :class:`_orm.Mapper` which this :class:`_orm.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
configured = False
"""Represent ``True`` if this :class:`_orm.Mapper` has been configured.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
.. seealso::
:func:`.configure_mappers`.
"""
concrete = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
tables = None
"""An iterable containing the collection of :class:`_schema.Table` objects
which this :class:`_orm.Mapper` is aware of.
If the mapper is mapped to a :class:`_expression.Join`, or an
:class:`_expression.Alias`
representing a :class:`_expression.Select`, the individual
:class:`_schema.Table`
objects that comprise the full construct will be represented here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
primary_key = None
"""An iterable containing the collection of :class:`_schema.Column`
objects
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`_orm.Mapper`.
This list is against the selectable in
:attr:`_orm.Mapper.persist_selectable`.
In the case of inheriting mappers, some columns may be managed by a
superclass mapper. For example, in the case of a
:class:`_expression.Join`, the
primary key is determined by all of the primary key columns across all
tables referenced by the :class:`_expression.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`_orm.Mapper`
features a ``primary_key`` argument that can override what the
:class:`_orm.Mapper` considers as primary key columns.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_ = None
"""The Python class which this :class:`_orm.Mapper` maps.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
class_manager = None
"""The :class:`.ClassManager` which maintains event listeners
and class-bound descriptors for this :class:`_orm.Mapper`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
single = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a single table
inheritance mapper.
:attr:`_orm.Mapper.local_table` will be ``None`` if this flag is set.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
non_primary = None
"""Represent ``True`` if this :class:`_orm.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to select rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_on = None
"""The :class:`_schema.Column` or SQL expression specified as the
``polymorphic_on`` argument
for this :class:`_orm.Mapper`, within an inheritance scenario.
This attribute is normally a :class:`_schema.Column` instance but
may also be an expression, such as one derived from
:func:`.cast`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_map = None
"""A mapping of "polymorphic identity" identifiers mapped to
:class:`_orm.Mapper` instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`_orm.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
polymorphic_identity = None
"""Represent an identifier which is matched against the
:attr:`_orm.Mapper.polymorphic_on` column during result row loading.
Used only with inheritance, this object can be of any type which is
comparable to the type of column represented by
:attr:`_orm.Mapper.polymorphic_on`.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
base_mapper = None
"""The base-most :class:`_orm.Mapper` in an inheritance chain.
In a non-inheriting scenario, this attribute will always be this
:class:`_orm.Mapper`. In an inheritance scenario, it references
the :class:`_orm.Mapper` which is parent to all other :class:`_orm.Mapper`
objects in the inheritance chain.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
columns = None
"""A collection of :class:`_schema.Column` or other scalar expression
objects maintained by this :class:`_orm.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
any :class:`_schema.Table` object,
except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
:class:`_schema.Column` itself. Additionally, scalar expressions mapped
by :func:`.column_property` are also present here.
This is a *read only* attribute determined during mapper construction.
Behavior is undefined if directly modified.
"""
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`_orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
"""A synonym for :attr:`_orm.Mapper.columns`."""
@property
@util.deprecated("1.3", "Use .persist_selectable")
def mapped_table(self):
return self.persist_selectable
@util.memoized_property
def _path_registry(self):
return PathRegistry.per_mapper(self)
def _configure_inheritance(self):
"""Configure settings related to inheriting and/or inherited mappers
being present."""
# a set of all mappers which inherit from this one.
self._inheriting_mappers = util.WeakSequence()
if self.inherits:
if isinstance(self.inherits, type):
self.inherits = class_mapper(self.inherits, configure=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'"
% (self.class_.__name__, self.inherits.class_.__name__)
)
self.dispatch._update(self.inherits.dispatch)
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper"
% (np, self.class_.__name__, np)
)
# inherit_condition is optional.
if self.local_table is None:
self.local_table = self.inherits.local_table
self.persist_selectable = self.inherits.persist_selectable
self.single = True
elif self.local_table is not self.inherits.local_table:
if self.concrete:
self.persist_selectable = self.local_table
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
else:
if self.inherit_condition is None:
# figure out inherit condition from our table to the
# immediate table of the inherited mapper, not its
# full table which could pull in other stuff we don't
# want (allows test/inheritance.InheritTest4 to pass)
try:
self.inherit_condition = sql_util.join_condition(
self.inherits.local_table, self.local_table
)
except sa_exc.NoForeignKeysError as nfe:
assert self.inherits.local_table is not None
assert self.local_table is not None
util.raise_(
sa_exc.NoForeignKeysError(
"Can't determine the inherit condition "
"between inherited table '%s' and "
"inheriting "
"table '%s'; tables have no "
"foreign key relationships established. "
"Please ensure the inheriting table has "
"a foreign key relationship to the "
"inherited "
"table, or provide an "
"'on clause' using "
"the 'inherit_condition' mapper argument."
% (
self.inherits.local_table.description,
self.local_table.description,
)
),
replace_context=nfe,
)
except sa_exc.AmbiguousForeignKeysError as afe:
assert self.inherits.local_table is not None
assert self.local_table is not None
util.raise_(
sa_exc.AmbiguousForeignKeysError(
"Can't determine the inherit condition "
"between inherited table '%s' and "
"inheriting "
"table '%s'; tables have more than one "
"foreign key relationship established. "
"Please specify the 'on clause' using "
"the 'inherit_condition' mapper argument."
% (
self.inherits.local_table.description,
self.local_table.description,
)
),
replace_context=afe,
)
self.persist_selectable = sql.join(
self.inherits.persist_selectable,
self.local_table,
self.inherit_condition,
)
fks = util.to_set(self.inherit_foreign_keys)
self._inherits_equated_pairs = sql_util.criterion_as_pairs(
self.persist_selectable.onclause,
consider_as_foreign_keys=fks,
)
else:
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None and not self.concrete:
self._identity_class = self.inherits._identity_class
else:
self._identity_class = self.class_
if self.version_id_col is None:
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
elif (
self.inherits.version_id_col is not None
and self.version_id_col is not self.inherits.version_id_col
):
util.warn(
"Inheriting version_id_col '%s' does not match inherited "
"version_id_col '%s' and will not automatically populate "
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning."
% (
self.version_id_col.description,
self.inherits.version_id_col.description,
)
)
self.polymorphic_map = self.inherits.polymorphic_map
self.batch = self.inherits.batch
self.inherits._inheriting_mappers.append(self)
self.base_mapper = self.inherits.base_mapper
self.passive_updates = self.inherits.passive_updates
self.passive_deletes = (
self.inherits.passive_deletes or self.passive_deletes
)
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
if self.polymorphic_identity in self.polymorphic_map:
util.warn(
"Reassigning polymorphic association for identity %r "
"from %r to %r: Check for duplicate use of %r as "
"value for polymorphic_identity."
% (
self.polymorphic_identity,
self.polymorphic_map[self.polymorphic_identity],
self,
self.polymorphic_identity,
)
)
self.polymorphic_map[self.polymorphic_identity] = self
if self.polymorphic_load and self.concrete:
raise sa_exc.ArgumentError(
"polymorphic_load is not currently supported "
"with concrete table inheritance"
)
if self.polymorphic_load == "inline":
self.inherits._add_with_polymorphic_subclass(self)
elif self.polymorphic_load == "selectin":
pass
elif self.polymorphic_load is not None:
raise sa_exc.ArgumentError(
"unknown argument for polymorphic_load: %r"
% self.polymorphic_load
)
else:
self._all_tables = set()
self.base_mapper = self
self.persist_selectable = self.local_table
if self.polymorphic_identity is not None:
self.polymorphic_map[self.polymorphic_identity] = self
self._identity_class = self.class_
if self.persist_selectable is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a persist_selectable specified."
% self
)
def _set_with_polymorphic(self, with_polymorphic):
if with_polymorphic == "*":
self.with_polymorphic = ("*", None)
elif isinstance(with_polymorphic, (tuple, list)):
if isinstance(
with_polymorphic[0], util.string_types + (tuple, list)
):
self.with_polymorphic = with_polymorphic
else:
self.with_polymorphic = (with_polymorphic, None)
elif with_polymorphic is not None:
raise sa_exc.ArgumentError("Invalid setting for with_polymorphic")
else:
self.with_polymorphic = None
if self.with_polymorphic and self.with_polymorphic[1] is not None:
self.with_polymorphic = (
self.with_polymorphic[0],
coercions.expect(
roles.StrictFromClauseRole,
self.with_polymorphic[1],
allow_select=True,
),
)
if self.configured:
self._expire_memoizations()
def _add_with_polymorphic_subclass(self, mapper):
subcl = mapper.class_
if self.with_polymorphic is None:
self._set_with_polymorphic((subcl,))
elif self.with_polymorphic[0] != "*":
self._set_with_polymorphic(
(self.with_polymorphic[0] + (subcl,), self.with_polymorphic[1])
)
def _set_concrete_base(self, mapper):
"""Set the given :class:`_orm.Mapper` as the 'inherits' for this
:class:`_orm.Mapper`, assuming this :class:`_orm.Mapper` is concrete
and does not already have an inherits."""
assert self.concrete
assert not self.inherits
assert isinstance(mapper, Mapper)
self.inherits = mapper
self.inherits.polymorphic_map.update(self.polymorphic_map)
self.polymorphic_map = self.inherits.polymorphic_map
for mapper in self.iterate_to_root():
if mapper.polymorphic_on is not None:
mapper._requires_row_aliasing = True
self.batch = self.inherits.batch
for mp in self.self_and_descendants:
mp.base_mapper = self.inherits.base_mapper
self.inherits._inheriting_mappers.append(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
self._configure_polymorphic_setter(True)
def _configure_class_instrumentation(self):
"""If this mapper is to be a primary mapper (i.e. the
non_primary flag is not set), associate this Mapper with the
given class and entity name.
Subsequent calls to ``class_mapper()`` for the ``class_`` / ``entity``
name combination will return this mapper. Also decorate the
`__init__` method on the mapped class to include optional
auto-session attachment logic.
"""
# we expect that declarative has applied the class manager
# already and set up a registry. if this is None,
# we will emit a deprecation warning below when we also see that
# it has no registry.
manager = attributes.manager_of_class(self.class_)
if self.non_primary:
if not manager or not manager.is_mapped:
raise sa_exc.InvalidRequestError(
"Class %s has no primary mapper configured. Configure "
"a primary mapper first before setting up a non primary "
"Mapper." % self.class_
)
self.class_manager = manager
self.registry = manager.registry
self._identity_class = manager.mapper._identity_class
manager.registry._add_non_primary_mapper(self)
return
if manager is not None:
assert manager.class_ is self.class_
if manager.is_mapped:
# changed in #7579:
# this message is defined in two places as of this change,
# also in decl_api -> _add_manager(). in 2.0, this codepath
# is removed as any calls to mapper() / Mapper without
# the registry setting up first will be rejected.
raise sa_exc.ArgumentError(
"Class '%s' already has a primary mapper defined. "
% self.class_
)
# else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
self.dispatch.instrument_class(self, self.class_)
# this invokes the class_instrument event and sets up
# the __init__ method. documented behavior is that this must
# occur after the instrument_class event above.
# yes two events with the same two words reversed and different APIs.
# :(
manager = instrumentation.register_class(
self.class_,
mapper=self,
expired_attribute_loader=util.partial(
loading.load_scalar_attributes, self
),
# finalize flag means instrument the __init__ method
# and call the class_instrument event
finalize=True,
)
if not manager.registry:
util.warn_deprecated_20(
"Calling the mapper() function directly outside of a "
"declarative registry is deprecated."
" Please use the sqlalchemy.orm.registry.map_imperatively() "
"function for a classical mapping."
)
assert _legacy_registry is not None
_legacy_registry._add_manager(manager)
self.class_manager = manager
self.registry = manager.registry
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.mapper is None:
return
event.listen(manager, "init", _event_on_init, raw=True)
for key, method in util.iterate_attributes(self.class_):
if key == "__init__" and hasattr(method, "_sa_original_init"):
method = method._sa_original_init
if hasattr(method, "__func__"):
method = method.__func__
if callable(method):
if hasattr(method, "__sa_reconstructor__"):
self._reconstructor = method
event.listen(manager, "load", _event_on_load, raw=True)
elif hasattr(method, "__sa_validators__"):
validation_opts = method.__sa_validation_opts__
for name in method.__sa_validators__:
if name in self.validators:
raise sa_exc.InvalidRequestError(
"A validation function for mapped "
"attribute %r on mapper %s already exists."
% (name, self)
)
self.validators = self.validators.union(
{name: (method, validation_opts)}
)
def _set_dispose_flags(self):
self.configured = True
self._ready_for_configure = True
self._dispose_called = True
self.__dict__.pop("_configure_failed", None)
def _configure_pks(self):
self.tables = sql_util.find_tables(self.persist_selectable)
self._pks_by_table = {}
self._cols_by_table = {}
all_cols = util.column_set(
chain(*[col.proxy_set for col in self._columntoproperty])
)
pk_cols = util.column_set(c for c in all_cols if c.primary_key)
# identify primary key columns which are also mapped by this mapper.
tables = set(self.tables + [self.persist_selectable])
self._all_tables.update(tables)
for t in tables:
if t.primary_key and pk_cols.issuperset(t.primary_key):
# ordering is important since it determines the ordering of
# mapper.primary_key (and therefore query.get())
self._pks_by_table[t] = util.ordered_column_set(
t.primary_key
).intersection(pk_cols)
self._cols_by_table[t] = util.ordered_column_set(t.c).intersection(
all_cols
)
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
# otherwise, see that we got a full PK for the mapped table
elif (
self.persist_selectable not in self._pks_by_table
or len(self._pks_by_table[self.persist_selectable]) == 0
):
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
elif self.local_table not in self._pks_by_table and isinstance(
self.local_table, schema.Table
):
util.warn(
"Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
% self.local_table.description
)
if (
self.inherits
and not self.concrete
and not self._primary_key_argument
):
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or persist_selectable pks
if self._primary_key_argument:
primary_key = [
self.persist_selectable.corresponding_column(c)
for c in self._primary_key_argument
]
else:
# if heuristically determined PKs, reduce to the minimal set
# of columns by eliminating FK->PK pairs for a multi-table
# expression. May over-reduce for some kinds of UNIONs
# / CTEs; use explicit PK argument for these special cases
primary_key = sql_util.reduce_columns(
self._pks_by_table[self.persist_selectable],
ignore_nonexistent_tables=True,
)
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'"
% (self, self.persist_selectable.description)
)
self.primary_key = tuple(primary_key)
self._log("Identified primary key columns: %s", primary_key)
# determine cols that aren't expressed within our tables; mark these
# as "read only" properties which are refreshed upon INSERT/UPDATE
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if self._columntoproperty[col] not in self._identity_key_props
and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
)
)
def _configure_properties(self):
# Column and other ClauseElement objects which are mapped
# TODO: technically this should be a DedupeColumnCollection
# however DCC needs changes and more tests to fully cover
# storing columns under a separate key name
self.columns = self.c = sql_base.ColumnCollection()
# object attribute names mapped to MapperProperty objects
self._props = util.OrderedDict()
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
for key, prop in self._init_properties.items():
self._configure_property(key, prop, False)
# pull properties from the inherited mapper if any.
if self.inherits:
for key, prop in self.inherits._props.items():
if key not in self._props and not self._should_exclude(
key, key, local=False, column=None
):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
# for those columns which don't already map to a property
for column in self.persist_selectable.columns:
if column in self._columntoproperty:
continue
column_key = (self.column_prefix or "") + column.key
if self._should_exclude(
column.key,
column_key,
local=self.local_table.c.contains_column(column),
column=column,
):
continue
# adjust the "key" used for this column to that
# of the inheriting mapper
for mapper in self.iterate_to_root():
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(
column_key, column, init=False, setparent=True
)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
setter = False
if self.polymorphic_on is not None:
setter = True
if isinstance(self.polymorphic_on, util.string_types):
# polymorphic_on specified as a string - link
# it to mapped ColumnProperty
try:
self.polymorphic_on = self._props[self.polymorphic_on]
except KeyError as err:
util.raise_(
sa_exc.ArgumentError(
"Can't determine polymorphic_on "
"value '%s' - no attribute is "
"mapped to this name." % self.polymorphic_on
),
replace_context=err,
)
if self.polymorphic_on in self._columntoproperty:
# polymorphic_on is a column that is already mapped
# to a ColumnProperty
prop = self._columntoproperty[self.polymorphic_on]
elif isinstance(self.polymorphic_on, MapperProperty):
# polymorphic_on is directly a MapperProperty,
# ensure it's a ColumnProperty
if not isinstance(
self.polymorphic_on, properties.ColumnProperty
):
raise sa_exc.ArgumentError(
"Only direct column-mapped "
"property or SQL expression "
"can be passed for polymorphic_on"
)
prop = self.polymorphic_on
else:
# polymorphic_on is a Column or SQL expression and
# doesn't appear to be mapped. this means it can be 1.
# only present in the with_polymorphic selectable or
# 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's persist_selectable
col = self.persist_selectable.corresponding_column(
self.polymorphic_on
)
if col is None:
# polymorphic_on doesn't derive from any
# column/expression isn't present in the mapped
# table. we will make a "hidden" ColumnProperty
# for it. Just check that if it's directly a
# schema.Column and we have with_polymorphic, it's
# likely a user error if the schema.Column isn't
# represented somehow in either persist_selectable or
# with_polymorphic. Otherwise as of 0.7.4 we
# just go with it and assume the user wants it
# that way (i.e. a CASE statement)
setter = False
instrument = False
col = self.polymorphic_on
if isinstance(col, schema.Column) and (
self.with_polymorphic is None
or self.with_polymorphic[1].corresponding_column(col)
is None
):
raise sa_exc.InvalidRequestError(
"Could not map polymorphic_on column "
"'%s' to the mapped table - polymorphic "
"loads will not function properly"
% col.description
)
else:
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use
# polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, "key", None)
if key:
if self._should_exclude(col.key, col.key, False, col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the "
"discriminator column %r" % col.key
)
else:
self.polymorphic_on = col = col.label("_sa_polymorphic_on")
key = col.key
prop = properties.ColumnProperty(col, _instrument=instrument)
self._configure_property(key, prop, init=init, setparent=True)
# the actual polymorphic_on should be the first public-facing
# column in the property
self.polymorphic_on = prop.columns[0]
polymorphic_key = prop.key
else:
# no polymorphic_on was set.
# check inheriting mappers for one.
for mapper in self.iterate_to_root():
# determine if polymorphic_on of the parent
# should be propagated here. If the col
# is present in our mapped table, or if our mapped
# table is the same as the parent (i.e. single table
# inheritance), we can use it
if mapper.polymorphic_on is not None:
if self.persist_selectable is mapper.persist_selectable:
self.polymorphic_on = mapper.polymorphic_on
else:
self.polymorphic_on = (
self.persist_selectable
).corresponding_column(mapper.polymorphic_on)
# we can use the parent mapper's _set_polymorphic_identity
# directly; it ensures the polymorphic_identity of the
# instance's mapper is used so is portable to subclasses.
if self.polymorphic_on is not None:
self._set_polymorphic_identity = (
mapper._set_polymorphic_identity
)
self._validate_polymorphic_identity = (
mapper._validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
return
if setter:
def _set_polymorphic_identity(state):
dict_ = state.dict
state.get_impl(polymorphic_key).set(
state,
dict_,
state.manager.mapper.polymorphic_identity,
None,
)
def _validate_polymorphic_identity(mapper, state, dict_):
if (
polymorphic_key in dict_
and dict_[polymorphic_key]
not in mapper._acceptable_polymorphic_identities
):
util.warn_limited(
"Flushing object %s with "
"incompatible polymorphic identity %r; the "
"object may not refresh and/or load correctly",
(state_str(state), dict_[polymorphic_key]),
)
self._set_polymorphic_identity = _set_polymorphic_identity
self._validate_polymorphic_identity = (
_validate_polymorphic_identity
)
else:
self._set_polymorphic_identity = None
_validate_polymorphic_identity = None
@HasMemoized.memoized_attribute
def _version_id_prop(self):
if self.version_id_col is not None:
return self._columntoproperty[self.version_id_col]
else:
return None
@HasMemoized.memoized_attribute
def _acceptable_polymorphic_identities(self):
identities = set()
stack = deque([self])
while stack:
item = stack.popleft()
if item.persist_selectable is self.persist_selectable:
identities.add(item.polymorphic_identity)
stack.extend(item._inheriting_mappers)
return identities
@HasMemoized.memoized_attribute
def _prop_set(self):
return frozenset(self._props.values())
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _adapt_inherited_property(self, key, prop, init):
descriptor_props = util.preloaded.orm_descriptor_props
if not self.concrete:
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
# determine if the class implements this attribute; if not,
# or if it is implemented by the attribute that is handling the
# given superclass-mapped property, then we need to report that we
# can't use this at the instance level since we are a concrete
# mapper and we don't map this. don't trip user-defined
# descriptors that might have side effects when invoked.
implementing_attribute = self.class_manager._get_class_attr_mro(
key, prop
)
if implementing_attribute is prop or (
isinstance(
implementing_attribute, attributes.InstrumentedAttribute
)
and implementing_attribute._parententity is prop.parent
):
self._configure_property(
key,
descriptor_props.ConcreteInheritedProperty(),
init=init,
setparent=True,
)
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _configure_property(self, key, prop, init=True, setparent=True):
descriptor_props = util.preloaded.orm_descriptor_props
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
prop = self._property_from_column(key, prop)
if isinstance(prop, properties.ColumnProperty):
col = self.persist_selectable.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
for m in self.inherits.iterate_to_root():
col = m.local_table.corresponding_column(prop.columns[0])
if col is not None:
for m2 in path:
m2.persist_selectable._refresh_for_new_column(col)
col = self.persist_selectable.corresponding_column(
prop.columns[0]
)
break
path.append(m)
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, "_readonly_props") and (
not hasattr(col, "table")
or col.table not in self._cols_by_table
):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if (
hasattr(self, "_cols_by_table")
and col.table in self._cols_by_table
and col not in self._cols_by_table[col.table]
):
self._cols_by_table[col.table].add(col)
# if this properties.ColumnProperty represents the "polymorphic
# discriminator" column, mark it. We'll need this when rendering
# columns in SELECT statements.
if not hasattr(prop, "_is_polymorphic_discriminator"):
prop._is_polymorphic_discriminator = (
col is self.polymorphic_on
or prop.columns[0] is self.polymorphic_on
)
if isinstance(col, expression.Label):
# new in 1.4, get column property against expressions
# to be addressable in subqueries
col.key = col._tq_key_label = key
self.columns.add(col, key)
for col in prop.columns + prop._orig_columns:
for col in col.proxy_set:
self._columntoproperty[col] = prop
prop.key = key
if setparent:
prop.set_parent(self, init)
if key in self._props and getattr(
self._props[key], "_mapped_by_synonym", False
):
syn = self._props[key]._mapped_by_synonym
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" % (syn, key, key, syn)
)
if (
key in self._props
and not isinstance(prop, properties.ColumnProperty)
and not isinstance(
self._props[key],
(
properties.ColumnProperty,
descriptor_props.ConcreteInheritedProperty,
),
)
):
util.warn(
"Property %s on %s being replaced with new "
"property %s; the old property will be discarded"
% (self._props[key], self, prop)
)
oldprop = self._props[key]
self._path_registry.pop(oldprop, None)
self._props[key] = prop
if not self.non_primary:
prop.instrument_class(self)
for mapper in self._inheriting_mappers:
mapper._adapt_inherited_property(key, prop, init)
if init:
prop.init()
prop.post_instrument_class(self)
if self.configured:
self._expire_memoizations()
@util.preload_module("sqlalchemy.orm.descriptor_props")
def _property_from_column(self, key, prop):
"""generate/update a :class:`.ColumnProperty` given a
:class:`_schema.Column` object."""
descriptor_props = util.preloaded.orm_descriptor_props
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
assert isinstance(column, expression.ColumnElement)
prop = self._props.get(key, None)
if isinstance(prop, properties.ColumnProperty):
if (
(
not self._inherits_equated_pairs
or (prop.columns[0], column)
not in self._inherits_equated_pairs
)
and not prop.columns[0].shares_lineage(column)
and prop.columns[0] is not self.version_id_col
and column is not self.version_id_col
):
warn_only = prop.parent is not self
msg = (
"Implicitly combining column %s with column "
"%s under attribute '%s'. Please configure one "
"or more attributes for these same-named columns "
"explicitly." % (prop.columns[-1], column, key)
)
if warn_only:
util.warn(msg)
else:
raise sa_exc.InvalidRequestError(msg)
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
self._log(
"inserting column to existing list "
"in properties.ColumnProperty %s" % (key)
)
return prop
elif prop is None or isinstance(
prop, descriptor_props.ConcreteInheritedProperty
):
mapped_column = []
for c in columns:
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
mc = self.local_table.corresponding_column(c)
if mc is not None:
# if the column is in the local table but not the
# mapped table, this corresponds to adding a
# column after the fact to the local table.
# [ticket:1523]
self.persist_selectable._refresh_for_new_column(mc)
mc = self.persist_selectable.corresponding_column(c)
if mc is None:
raise sa_exc.ArgumentError(
"When configuring property '%s' on %s, "
"column '%s' is not represented in the mapper's "
"table. Use the `column_property()` function to "
"force this column to be mapped as a read-only "
"attribute." % (key, self, c)
)
mapped_column.append(mc)
return properties.ColumnProperty(*mapped_column)
else:
raise sa_exc.ArgumentError(
"WARNING: when configuring property '%s' on %s, "
"column '%s' conflicts with property '%r'. "
"To resolve this, map the column to the class under a "
"different name in the 'properties' dictionary. Or, "
"to remove all awareness of the column entirely "
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." % (key, self, column.key, prop)
)
def _check_configure(self):
if self.registry._new_mappers:
_configure_registries({self.registry}, cascade=True)
def _post_configure_properties(self):
"""Call the ``init()`` method on all ``MapperProperties``
attached to this mapper.
This is a deferred configuration step which is intended
to execute once all mappers have been constructed.
"""
self._log("_post_configure_properties() started")
l = [(key, prop) for key, prop in self._props.items()]
for key, prop in l:
self._log("initialize prop %s", key)
if prop.parent is self and not prop._configure_started:
prop.init()
if prop._configure_finished:
prop.post_instrument_class(self)
self._log("_post_configure_properties() complete")
self.configured = True
def add_properties(self, dict_of_properties):
"""Add the given dictionary of properties to this mapper,
using `add_property`.
"""
for key, value in dict_of_properties.items():
self.add_property(key, value)
def add_property(self, key, prop):
"""Add an individual MapperProperty to this mapper.
If the mapper has not been configured yet, just adds the
property to the initial properties dictionary sent to the
constructor. If this Mapper has already been configured, then
the given MapperProperty is configured immediately.
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.configured)
def _expire_memoizations(self):
for mapper in self.iterate_to_root():
mapper._reset_memoizations()
@property
def _log_desc(self):
return (
"("
+ self.class_.__name__
+ "|"
+ (
self.local_table is not None
and self.local_table.description
or str(self.local_table)
)
+ (self.non_primary and "|non-primary" or "")
+ ")"
)
def _log(self, msg, *args):
self.logger.info("%s " + msg, *((self._log_desc,) + args))
def _log_debug(self, msg, *args):
self.logger.debug("%s " + msg, *((self._log_desc,) + args))
def __repr__(self):
return "<Mapper at 0x%x; %s>" % (id(self), self.class_.__name__)
def __str__(self):
return "mapped class %s%s->%s" % (
self.class_.__name__,
self.non_primary and " (non-primary)" or "",
self.local_table.description
if self.local_table is not None
else self.persist_selectable.description,
)
def _is_orphan(self, state):
orphan_possible = False
for mapper in self.iterate_to_root():
for (key, cls) in mapper._delete_orphans:
orphan_possible = True
has_parent = attributes.manager_of_class(cls).has_parent(
state, key, optimistic=state.has_identity
)
if self.legacy_is_orphan and has_parent:
return False
elif not self.legacy_is_orphan and not has_parent:
return True
if self.legacy_is_orphan:
return orphan_possible
else:
return False
def has_property(self, key):
return key in self._props
def get_property(self, key, _configure_mappers=True):
"""return a MapperProperty associated with the given key."""
if _configure_mappers:
self._check_configure()
try:
return self._props[key]
except KeyError as err:
util.raise_(
sa_exc.InvalidRequestError(
"Mapper '%s' has no property '%s'" % (self, key)
),
replace_context=err,
)
def get_property_by_column(self, column):
"""Given a :class:`_schema.Column` object, return the
:class:`.MapperProperty` which maps this column."""
return self._columntoproperty[column]
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
self._check_configure()
return iter(self._props.values())
def _mappers_from_spec(self, spec, selectable):
"""given a with_polymorphic() argument, return the set of mappers it
represents.
Trims the list of mappers to just those represented within the given
selectable, if present. This helps some more legacy-ish mappings.
"""
if spec == "*":
mappers = list(self.self_and_descendants)
elif spec:
mappers = set()
for m in util.to_list(spec):
m = _class_to_mapper(m)
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" % (m, self)
)
if selectable is None:
mappers.update(m.iterate_to_root())
else:
mappers.add(m)
mappers = [m for m in self.self_and_descendants if m in mappers]
else:
mappers = []
if selectable is not None:
tables = set(
sql_util.find_tables(selectable, include_aliases=True)
)
mappers = [m for m in mappers if m.local_table in tables]
return mappers
def _selectable_from_mappers(self, mappers, innerjoin):
"""given a list of mappers (assumed to be within this mapper's
inheritance hierarchy), construct an outerjoin amongst those mapper's
mapped tables.
"""
from_obj = self.persist_selectable
for m in mappers:
if m is self:
continue
if m.concrete:
raise sa_exc.InvalidRequestError(
"'with_polymorphic()' requires 'selectable' argument "
"when concrete-inheriting mappers are used."
)
elif not m.single:
if innerjoin:
from_obj = from_obj.join(
m.local_table, m.inherit_condition
)
else:
from_obj = from_obj.outerjoin(
m.local_table, m.inherit_condition
)
return from_obj
@HasMemoized.memoized_attribute
def _single_table_criterion(self):
if self.single and self.inherits and self.polymorphic_on is not None:
return self.polymorphic_on._annotate({"parentmapper": self}).in_(
m.polymorphic_identity for m in self.self_and_descendants
)
else:
return None
@HasMemoized.memoized_attribute
def _with_polymorphic_mappers(self):
self._check_configure()
if not self.with_polymorphic:
return []
return self._mappers_from_spec(*self.with_polymorphic)
@HasMemoized.memoized_attribute
def _post_inspect(self):
"""This hook is invoked by attribute inspection.
E.g. when Query calls:
coercions.expect(roles.ColumnsClauseRole, ent, keep_inspect=True)
This allows the inspection process run a configure mappers hook.
"""
self._check_configure()
@HasMemoized.memoized_attribute
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.persist_selectable
spec, selectable = self.with_polymorphic
if selectable is not None:
return selectable
else:
return self._selectable_from_mappers(
self._mappers_from_spec(spec, selectable), False
)
with_polymorphic_mappers = _with_polymorphic_mappers
"""The list of :class:`_orm.Mapper` objects included in the
default "polymorphic" query.
"""
@HasMemoized.memoized_attribute
def _insert_cols_evaluating_none(self):
return dict(
(
table,
frozenset(
col for col in columns if col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _insert_cols_as_none(self):
return dict(
(
table,
frozenset(
col.key
for col in columns
if not col.primary_key
and not col.server_default
and not col.default
and not col.type.should_evaluate_none
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _propkey_to_col(self):
return dict(
(
table,
dict(
(self._columntoproperty[col].key, col) for col in columns
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _pk_keys_by_table(self):
return dict(
(table, frozenset([col.key for col in pks]))
for table, pks in self._pks_by_table.items()
)
@HasMemoized.memoized_attribute
def _pk_attr_keys_by_table(self):
return dict(
(
table,
frozenset([self._columntoproperty[col].key for col in pks]),
)
for table, pks in self._pks_by_table.items()
)
@HasMemoized.memoized_attribute
def _server_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_default is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_attribute
def _server_default_plus_onupdate_propkeys(self):
result = set()
for table, columns in self._cols_by_table.items():
for col in columns:
if (
col.server_default is not None
or col.server_onupdate is not None
) and col in self._columntoproperty:
result.add(self._columntoproperty[col].key)
return result
@HasMemoized.memoized_attribute
def _server_onupdate_default_cols(self):
return dict(
(
table,
frozenset(
[
col.key
for col in columns
if col.server_onupdate is not None
]
),
)
for table, columns in self._cols_by_table.items()
)
@HasMemoized.memoized_instancemethod
def __clause_element__(self):
annotations = {
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
if self.persist_selectable is not self.local_table:
# joined table inheritance, with polymorphic selectable,
# etc.
annotations["dml_table"] = self.local_table._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
}
)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
return self.selectable._annotate(annotations)._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
@util.memoized_property
def select_identity_token(self):
return (
expression.null()
._annotate(
{
"entity_namespace": self,
"parententity": self,
"parentmapper": self,
"identity_token": True,
}
)
._set_propagate_attrs(
{"compile_state_plugin": "orm", "plugin_subject": self}
)
)
@property
def selectable(self):
"""The :class:`_schema.FromClause` construct this
:class:`_orm.Mapper` selects from by default.
Normally, this is equivalent to :attr:`.persist_selectable`, unless
the ``with_polymorphic`` feature is in use, in which case the
full "polymorphic" selectable is returned.
"""
return self._with_polymorphic_selectable
def _with_polymorphic_args(
self, spec=None, selectable=False, innerjoin=False
):
if selectable not in (None, False):
selectable = coercions.expect(
roles.StrictFromClauseRole, selectable, allow_select=True
)
if self.with_polymorphic:
if not spec:
spec = self.with_polymorphic[0]
if selectable is False:
selectable = self.with_polymorphic[1]
elif selectable is False:
selectable = None
mappers = self._mappers_from_spec(spec, selectable)
if selectable is not None:
return mappers, selectable
else:
return mappers, self._selectable_from_mappers(mappers, innerjoin)
@HasMemoized.memoized_attribute
def _polymorphic_properties(self):
return list(
self._iterate_polymorphic_properties(
self._with_polymorphic_mappers
)
)
@property
def _all_column_expressions(self):
poly_properties = self._polymorphic_properties
adapter = self._polymorphic_adapter
return [
adapter.columns[prop.columns[0]] if adapter else prop.columns[0]
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
and prop._renders_in_subqueries
]
def _columns_plus_keys(self, polymorphic_mappers=()):
if polymorphic_mappers:
poly_properties = self._iterate_polymorphic_properties(
polymorphic_mappers
)
else:
poly_properties = self._polymorphic_properties
return [
(prop.key, prop.columns[0])
for prop in poly_properties
if isinstance(prop, properties.ColumnProperty)
]
@HasMemoized.memoized_attribute
def _polymorphic_adapter(self):
if self.with_polymorphic:
return sql_util.ColumnAdapter(
self.selectable, equivalents=self._equivalent_columns
)
else:
return None
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
if mappers is None:
mappers = self._with_polymorphic_mappers
if not mappers:
for c in self.iterate_properties:
yield c
else:
# in the polymorphic case, filter out discriminator columns
# from other mappers, as these are sometimes dependent on that
# mapper's polymorphic selectable (which we don't want rendered)
for c in util.unique_list(
chain(
*[
list(mapper.iterate_properties)
for mapper in [self] + mappers
]
)
):
if getattr(c, "_is_polymorphic_discriminator", False) and (
self.polymorphic_on is None
or c.columns[0] is not self.polymorphic_on
):
continue
yield c
@HasMemoized.memoized_attribute
def attrs(self):
"""A namespace of all :class:`.MapperProperty` objects
associated this mapper.
This is an object that provides each property based on
its key name. For instance, the mapper for a
``User`` class which has ``User.name`` attribute would
provide ``mapper.attrs.name``, which would be the
:class:`.ColumnProperty` representing the ``name``
column. The namespace object can also be iterated,
which would yield each :class:`.MapperProperty`.
:class:`_orm.Mapper` has several pre-filtered views
of this attribute which limit the types of properties
returned, including :attr:`.synonyms`, :attr:`.column_attrs`,
:attr:`.relationships`, and :attr:`.composites`.
.. warning::
The :attr:`_orm.Mapper.attrs` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.attrs[somename]`` over
``getattr(mapper.attrs, somename)`` to avoid name collisions.
.. seealso::
:attr:`_orm.Mapper.all_orm_descriptors`
"""
self._check_configure()
return util.ImmutableProperties(self._props)
@HasMemoized.memoized_attribute
def all_orm_descriptors(self):
"""A namespace of all :class:`.InspectionAttr` attributes associated
with the mapped class.
These attributes are in all cases Python :term:`descriptors`
associated with the mapped class or its superclasses.
This namespace includes attributes that are mapped to the class
as well as attributes declared by extension modules.
It includes any Python descriptor type that inherits from
:class:`.InspectionAttr`. This includes
:class:`.QueryableAttribute`, as well as extension types such as
:class:`.hybrid_property`, :class:`.hybrid_method` and
:class:`.AssociationProxy`.
To distinguish between mapped attributes and extension attributes,
the attribute :attr:`.InspectionAttr.extension_type` will refer
to a constant that distinguishes between different extension types.
The sorting of the attributes is based on the following rules:
1. Iterate through the class and its superclasses in order from
subclass to superclass (i.e. iterate through ``cls.__mro__``)
2. For each class, yield the attributes in the order in which they
appear in ``__dict__``, with the exception of those in step
3 below. In Python 3.6 and above this ordering will be the
same as that of the class' construction, with the exception
of attributes that were added after the fact by the application
or the mapper.
3. If a certain attribute key is also in the superclass ``__dict__``,
then it's included in the iteration for that class, and not the
class in which it first appeared.
The above process produces an ordering that is deterministic in terms
of the order in which attributes were assigned to the class.
.. versionchanged:: 1.3.19 ensured deterministic ordering for
:meth:`_orm.Mapper.all_orm_descriptors`.
When dealing with a :class:`.QueryableAttribute`, the
:attr:`.QueryableAttribute.property` attribute refers to the
:class:`.MapperProperty` property, which is what you get when
referring to the collection of mapped properties via
:attr:`_orm.Mapper.attrs`.
.. warning::
The :attr:`_orm.Mapper.all_orm_descriptors`
accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.all_orm_descriptors[somename]`` over
``getattr(mapper.all_orm_descriptors, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs`
"""
return util.ImmutableProperties(
dict(self.class_manager._all_sqla_attributes())
)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def synonyms(self):
"""Return a namespace of all :class:`.SynonymProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
descriptor_props = util.preloaded.orm_descriptor_props
return self._filter_properties(descriptor_props.SynonymProperty)
@property
def entity_namespace(self):
return self.class_
@HasMemoized.memoized_attribute
def column_attrs(self):
"""Return a namespace of all :class:`.ColumnProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(properties.ColumnProperty)
@util.preload_module("sqlalchemy.orm.relationships")
@HasMemoized.memoized_attribute
def relationships(self):
"""A namespace of all :class:`.RelationshipProperty` properties
maintained by this :class:`_orm.Mapper`.
.. warning::
the :attr:`_orm.Mapper.relationships` accessor namespace is an
instance of :class:`.OrderedProperties`. This is
a dictionary-like object which includes a small number of
named methods such as :meth:`.OrderedProperties.items`
and :meth:`.OrderedProperties.values`. When
accessing attributes dynamically, favor using the dict-access
scheme, e.g. ``mapper.relationships[somename]`` over
``getattr(mapper.relationships, somename)`` to avoid name
collisions.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_relationships.RelationshipProperty
)
@HasMemoized.memoized_attribute
@util.preload_module("sqlalchemy.orm.descriptor_props")
def composites(self):
"""Return a namespace of all :class:`.CompositeProperty`
properties maintained by this :class:`_orm.Mapper`.
.. seealso::
:attr:`_orm.Mapper.attrs` - namespace of all
:class:`.MapperProperty`
objects.
"""
return self._filter_properties(
util.preloaded.orm_descriptor_props.CompositeProperty
)
def _filter_properties(self, type_):
self._check_configure()
return util.ImmutableProperties(
util.OrderedDict(
(k, v) for k, v in self._props.items() if isinstance(v, type_)
)
)
@HasMemoized.memoized_attribute
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
by primary key.
"""
params = [
(
primary_key,
sql.bindparam("pk_%d" % idx, type_=primary_key.type),
)
for idx, primary_key in enumerate(self.primary_key, 1)
]
return (
sql.and_(*[k == v for (k, v) in params]),
util.column_dict(params),
)
@HasMemoized.memoized_attribute
def _equivalent_columns(self):
"""Create a map of all equivalent columns, based on
the determination of column pairs that are equated to
one another based on inherit condition. This is designed
to work with the queries that util.polymorphic_union
comes up with, which often don't include the columns from
the base table directly (including the subclass table columns
only).
The resulting structure is a dictionary of columns mapped
to lists of equivalent columns, e.g.::
{
tablea.col1:
{tableb.col1, tablec.col1},
tablea.col2:
{tabled.col2}
}
"""
result = util.column_dict()
def visit_binary(binary):
if binary.operator == operators.eq:
if binary.left in result:
result[binary.left].add(binary.right)
else:
result[binary.left] = util.column_set((binary.right,))
if binary.right in result:
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {}, {"binary": visit_binary}
)
return result
def _is_userland_descriptor(self, assigned_name, obj):
if isinstance(
obj,
(
_MappedAttribute,
instrumentation.ClassManager,
expression.ColumnElement,
),
):
return False
else:
return assigned_name not in self._dataclass_fields
@HasMemoized.memoized_attribute
def _dataclass_fields(self):
return [f.name for f in util.dataclass_fields(self.class_)]
def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
This occurs when properties are propagated from an inherited class, or
are applied from the columns present in the mapped table.
"""
# check for class-bound attributes and/or descriptors,
# either local or from an inherited class
# ignore dataclass field default values
if local:
if self.class_.__dict__.get(
assigned_name, None
) is not None and self._is_userland_descriptor(
assigned_name, self.class_.__dict__[assigned_name]
):
return True
else:
attr = self.class_manager._get_class_attr_mro(assigned_name, None)
if attr is not None and self._is_userland_descriptor(
assigned_name, attr
):
return True
if (
self.include_properties is not None
and name not in self.include_properties
and (column is None or column not in self.include_properties)
):
self._log("not including property %s" % (name))
return True
if self.exclude_properties is not None and (
name in self.exclude_properties
or (column is not None and column in self.exclude_properties)
):
self._log("excluding property %s" % (name))
return True
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
def is_sibling(self, other):
"""return true if the other mapper is an inheriting sibling to this
one. common parent but different branch
"""
return (
self.base_mapper is other.base_mapper
and not self.isa(other)
and not other.isa(self)
)
def _canload(self, state, allow_subtypes):
s = self.primary_mapper()
if self.polymorphic_on is not None or allow_subtypes:
return _state_mapper(state).isa(s)
else:
return _state_mapper(state) is s
def isa(self, other):
"""Return True if the this mapper inherits from the given mapper."""
m = self
while m and m is not other:
m = m.inherits
return bool(m)
def iterate_to_root(self):
m = self
while m:
yield m
m = m.inherits
@HasMemoized.memoized_attribute
def self_and_descendants(self):
"""The collection including this mapper and all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
"""
descendants = []
stack = deque([self])
while stack:
item = stack.popleft()
descendants.append(item)
stack.extend(item._inheriting_mappers)
return util.WeakSequence(descendants)
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
This includes not just the immediately inheriting mappers but
all their inheriting mappers as well.
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
"""
return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
(class)."""
return self.class_manager.mapper
@property
def primary_base_mapper(self):
return self.class_manager.mapper.base_mapper
def _result_has_identity_key(self, result, adapter=None):
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
rk = result.keys()
for col in pk_cols:
if col not in rk:
return False
else:
return True
def identity_key_from_row(self, row, identity_token=None, adapter=None):
"""Return an identity-map key for use in storing/retrieving an
item from the identity map.
:param row: A :class:`.Row` instance. The columns which are
mapped by this :class:`_orm.Mapper` should be locatable in the row,
preferably via the :class:`_schema.Column`
object directly (as is the case
when a :func:`_expression.select` construct is executed), or
via string names of the form ``<tablename>_<colname>``.
"""
pk_cols = self.primary_key
if adapter:
pk_cols = [adapter.columns[c] for c in pk_cols]
return (
self._identity_class,
tuple(row[column] for column in pk_cols),
identity_token,
)
def identity_key_from_primary_key(self, primary_key, identity_token=None):
"""Return an identity-map key for use in storing/retrieving an
item from an identity map.
:param primary_key: A list of values indicating the identifier.
"""
return self._identity_class, tuple(primary_key), identity_token
def identity_key_from_instance(self, instance):
"""Return the identity key for the given instance, based on
its primary key attributes.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
This value is typically also found on the instance state under the
attribute name `key`.
"""
state = attributes.instance_state(instance)
return self._identity_key_from_state(state, attributes.PASSIVE_OFF)
def _identity_key_from_state(
self, state, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
dict_ = state.dict
manager = state.manager
return (
self._identity_class,
tuple(
[
manager[prop.key].impl.get(state, dict_, passive)
for prop in self._identity_key_props
]
),
state.identity_token,
)
def primary_key_from_instance(self, instance):
"""Return the list of primary key values for the given
instance.
If the instance's state is expired, calling this method
will result in a database check to see if the object has been deleted.
If the row no longer exists,
:class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
"""
state = attributes.instance_state(instance)
identity_key = self._identity_key_from_state(
state, attributes.PASSIVE_OFF
)
return identity_key[1]
@HasMemoized.memoized_attribute
def _persistent_sortkey_fn(self):
key_fns = [col.type.sort_key_function for col in self.primary_key]
if set(key_fns).difference([None]):
def key(state):
return tuple(
key_fn(val) if key_fn is not None else val
for key_fn, val in zip(key_fns, state.key[1])
)
else:
def key(state):
return state.key[1]
return key
@HasMemoized.memoized_attribute
def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
@HasMemoized.memoized_attribute
def _all_pk_cols(self):
collection = set()
for table in self.tables:
collection.update(self._pks_by_table[table])
return collection
@HasMemoized.memoized_attribute
def _should_undefer_in_wildcard(self):
cols = set(self.primary_key)
if self.polymorphic_on is not None:
cols.add(self.polymorphic_on)
return cols
@HasMemoized.memoized_attribute
def _primary_key_propkeys(self):
return {self._columntoproperty[col].key for col in self._all_pk_cols}
def _get_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
def _set_committed_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set_committed_value(state, dict_, value)
def _set_state_attr_by_column(self, state, dict_, column, value):
prop = self._columntoproperty[column]
state.manager[prop.key].impl.set(state, dict_, value, None)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(
state, dict_, column, passive=attributes.PASSIVE_OFF
)
def _get_committed_state_attr_by_column(
self, state, dict_, column, passive=attributes.PASSIVE_RETURN_NO_VALUE
):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get_committed_value(
state, dict_, passive=passive
)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
col_attribute_names = set(attribute_names).intersection(
state.mapper.column_attrs.keys()
)
tables = set(
chain(
*[
sql_util.find_tables(c, check_columns=True)
for key in col_attribute_names
for c in props[key].columns
]
)
)
if self.base_mapper.local_table in tables:
return None
def visit_binary(binary):
leftcol = binary.left
rightcol = binary.right
if leftcol is None or rightcol is None:
return
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state,
state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if leftval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.left = sql.bindparam(
None, leftval, type_=binary.right.type
)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state,
state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE,
)
if rightval in orm_util._none_set:
raise _OptGetColumnsNotAvailable()
binary.right = sql.bindparam(
None, rightval, type_=binary.right.type
)
allconds = []
start = False
# as of #7507, from the lowest base table on upwards,
# we include all intermediary tables.
for mapper in reversed(list(self.iterate_to_root())):
if mapper.local_table in tables:
start = True
elif not isinstance(mapper.local_table, expression.TableClause):
return None
if start and not mapper.single:
allconds.append(mapper.inherit_condition)
tables.add(mapper.local_table)
# only the bottom table needs its criteria to be altered to fit
# the primary key ident - the rest of the tables upwards to the
# descendant-most class should all be present and joined to each
# other.
try:
allconds[0] = visitors.cloned_traverse(
allconds[0], {}, {"binary": visit_binary}
)
except _OptGetColumnsNotAvailable:
return None
cond = sql.and_(*allconds)
cols = []
for key in col_attribute_names:
cols.extend(props[key].columns)
return (
sql.select(*cols)
.where(cond)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
)
def _iterate_to_target_viawpoly(self, mapper):
if self.isa(mapper):
prev = self
for m in self.iterate_to_root():
yield m
if m is not prev and prev not in m._with_polymorphic_mappers:
break
prev = m
if m is mapper:
break
def _should_selectin_load(self, enabled_via_opt, polymorphic_from):
if not enabled_via_opt:
# common case, takes place for all polymorphic loads
mapper = polymorphic_from
for m in self._iterate_to_target_viawpoly(mapper):
if m.polymorphic_load == "selectin":
return m
else:
# uncommon case, selectin load options were used
enabled_via_opt = set(enabled_via_opt)
enabled_via_opt_mappers = {e.mapper: e for e in enabled_via_opt}
for entity in enabled_via_opt.union([polymorphic_from]):
mapper = entity.mapper
for m in self._iterate_to_target_viawpoly(mapper):
if (
m.polymorphic_load == "selectin"
or m in enabled_via_opt_mappers
):
return enabled_via_opt_mappers.get(m, m)
return None
@util.preload_module("sqlalchemy.orm.strategy_options")
def _subclass_load_via_in(self, entity):
"""Assemble a that can load the columns local to
this subclass as a SELECT with IN.
"""
strategy_options = util.preloaded.orm_strategy_options
assert self.inherits
if self.polymorphic_on is not None:
polymorphic_prop = self._columntoproperty[self.polymorphic_on]
keep_props = set([polymorphic_prop] + self._identity_key_props)
else:
keep_props = set(self._identity_key_props)
disable_opt = strategy_options.Load(entity)
enable_opt = strategy_options.Load(entity)
for prop in self.attrs:
if prop.parent is self or prop in keep_props:
# "enable" options, to turn on the properties that we want to
# load by default (subject to options from the query)
if not isinstance(prop, StrategizedProperty):
continue
enable_opt.set_generic_strategy(
# convert string name to an attribute before passing
# to loader strategy
(getattr(entity.entity_namespace, prop.key),),
dict(prop.strategy_key),
)
else:
# "disable" options, to turn off the properties from the
# superclass that we *don't* want to load, applied after
# the options from the query to override them
disable_opt.set_generic_strategy(
# convert string name to an attribute before passing
# to loader strategy
(getattr(entity.entity_namespace, prop.key),),
{"do_nothing": True},
)
primary_key = [
sql_util._deep_annotate(pk, {"_orm_adapt": True})
for pk in self.primary_key
]
if len(primary_key) > 1:
in_expr = sql.tuple_(*primary_key)
else:
in_expr = primary_key[0]
if entity.is_aliased_class:
assert entity.mapper is self
q = sql.select(entity).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
in_expr = entity._adapter.traverse(in_expr)
primary_key = [entity._adapter.traverse(k) for k in primary_key]
q = q.where(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*primary_key)
else:
q = sql.select(self).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
)
q = q.where(
in_expr.in_(sql.bindparam("primary_keys", expanding=True))
).order_by(*primary_key)
return q, enable_opt, disable_opt
@HasMemoized.memoized_attribute
def _subclass_load_via_in_mapper(self):
return self._subclass_load_via_in(self)
def cascade_iterator(self, type_, state, halt_on=None):
r"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
:param type\_:
The name of the cascade rule (i.e. ``"save-update"``, ``"delete"``,
etc.).
.. note:: the ``"all"`` cascade is not accepted here. For a generic
object traversal function, see :ref:`faq_walk_objects`.
:param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
:return: the method yields individual object instances.
.. seealso::
:ref:`unitofwork_cascades`
:ref:`faq_walk_objects` - illustrates a generic function to
traverse all objects without relying on cascades.
"""
visited_states = set()
prp, mpp = object(), object()
assert state.mapper.isa(self)
visitables = deque(
[(deque(state.mapper._props.values()), prp, state, state.dict)]
)
while visitables:
iterator, item_type, parent_state, parent_dict = visitables[-1]
if not iterator:
visitables.pop()
continue
if item_type is prp:
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(
prop.cascade_iterator(
type_,
parent_state,
parent_dict,
visited_states,
halt_on,
)
)
if queue:
visitables.append((queue, mpp, None, None))
elif item_type is mpp:
(
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
) = iterator.popleft()
yield (
instance,
instance_mapper,
corresponding_state,
corresponding_dict,
)
visitables.append(
(
deque(instance_mapper._props.values()),
prp,
corresponding_state,
corresponding_dict,
)
)
@HasMemoized.memoized_attribute
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
@HasMemoized.memoized_attribute
def _sorted_tables(self):
table_to_mapper = {}
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
extra_dependencies = []
for table, mapper in table_to_mapper.items():
super_ = mapper.inherits
if super_:
extra_dependencies.extend(
[(super_table, table) for super_table in super_.tables]
)
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
# for two tables that are related by inheritance.
# while that dependency may be important, it's technically
# not what we mean to sort on here.
parent = table_to_mapper.get(fk.parent.table)
dep = table_to_mapper.get(fk.column.table)
if (
parent is not None
and dep is not None
and dep is not parent
and dep.inherit_condition is not None
):
cols = set(sql_util._find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(
sql_util._find_columns(parent.inherit_condition)
)
return fk.parent not in cols and fk.column not in cols
else:
return fk.parent not in cols
return False
sorted_ = sql_util.sort_tables(
table_to_mapper,
skip_fn=skip,
extra_dependencies=extra_dependencies,
)
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
return ret
def _memo(self, key, callable_):
if key in self._memoized_values:
return self._memoized_values[key]
else:
self._memoized_values[key] = value = callable_()
return value
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
for table in self._sorted_tables:
cols = set(table.c)
for m in self.iterate_to_root():
if m._inherits_equated_pairs and cols.intersection(
util.reduce(
set.union,
[l.proxy_set for l, r in m._inherits_equated_pairs],
)
):
result[table].append((m, m._inherits_equated_pairs))
return result
class _OptGetColumnsNotAvailable(Exception):
pass
def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far across all :class:`_orm.registry`
collections.
The configure step is used to reconcile and initialize the
:func:`_orm.relationship` linkages between mapped classes, as well as to
invoke configuration events such as the
:meth:`_orm.MapperEvents.before_configured` and
:meth:`_orm.MapperEvents.after_configured`, which may be used by ORM
extensions or user-defined extension hooks.
Mapper configuration is normally invoked automatically, the first time
mappings from a particular :class:`_orm.registry` are used, as well as
whenever mappings are used and additional not-yet-configured mappers have
been constructed. The automatic configuration process however is local only
to the :class:`_orm.registry` involving the target mapper and any related
:class:`_orm.registry` objects which it may depend on; this is
equivalent to invoking the :meth:`_orm.registry.configure` method
on a particular :class:`_orm.registry`.
By contrast, the :func:`_orm.configure_mappers` function will invoke the
configuration process on all :class:`_orm.registry` objects that
exist in memory, and may be useful for scenarios where many individual
:class:`_orm.registry` objects that are nonetheless interrelated are
in use.
.. versionchanged:: 1.4
As of SQLAlchemy 1.4.0b2, this function works on a
per-:class:`_orm.registry` basis, locating all :class:`_orm.registry`
objects present and invoking the :meth:`_orm.registry.configure` method
on each. The :meth:`_orm.registry.configure` method may be preferred to
limit the configuration of mappers to those local to a particular
:class:`_orm.registry` and/or declarative base class.
Points at which automatic configuration is invoked include when a mapped
class is instantiated into an instance, as well as when ORM queries
are emitted using :meth:`.Session.query` or :meth:`_orm.Session.execute`
with an ORM-enabled statement.
The mapper configure process, whether invoked by
:func:`_orm.configure_mappers` or from :meth:`_orm.registry.configure`,
provides several event hooks that can be used to augment the mapper
configuration step. These hooks include:
* :meth:`.MapperEvents.before_configured` - called once before
:func:`.configure_mappers` or :meth:`_orm.registry.configure` does any
work; this can be used to establish additional options, properties, or
related mappings before the operation proceeds.
* :meth:`.MapperEvents.mapper_configured` - called as each individual
:class:`_orm.Mapper` is configured within the process; will include all
mapper state except for backrefs set up by other mappers that are still
to be configured.
* :meth:`.MapperEvents.after_configured` - called once after
:func:`.configure_mappers` or :meth:`_orm.registry.configure` is
complete; at this stage, all :class:`_orm.Mapper` objects that fall
within the scope of the configuration operation will be fully configured.
Note that the calling application may still have other mappings that
haven't been produced yet, such as if they are in modules as yet
unimported, and may also have mappings that are still to be configured,
if they are in other :class:`_orm.registry` collections not part of the
current scope of configuration.
"""
_configure_registries(_all_registries(), cascade=True)
def _configure_registries(registries, cascade):
for reg in registries:
if reg._new_mappers:
break
else:
return
with _CONFIGURE_MUTEX:
global _already_compiling
if _already_compiling:
return
_already_compiling = True
try:
# double-check inside mutex
for reg in registries:
if reg._new_mappers:
break
else:
return
Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
_do_configure_registries(registries, cascade)
finally:
_already_compiling = False
Mapper.dispatch._for_class(Mapper).after_configured()
@util.preload_module("sqlalchemy.orm.decl_api")
def _do_configure_registries(registries, cascade):
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependencies(registries):
has_skip = False
for mapper in reg._mappers_to_configure():
run_configure = None
for fn in mapper.dispatch.before_mapper_configured:
run_configure = fn(mapper, mapper.class_)
if run_configure is EXT_SKIP:
has_skip = True
break
if run_configure is EXT_SKIP:
continue
if getattr(mapper, "_configure_failed", False):
e = sa_exc.InvalidRequestError(
"One or more mappers failed to initialize - "
"can't proceed with initialization of other "
"mappers. Triggering mapper: '%s'. "
"Original exception was: %s"
% (mapper, mapper._configure_failed)
)
e._configure_failed = mapper._configure_failed
raise e
if not mapper.configured:
try:
mapper._post_configure_properties()
mapper._expire_memoizations()
mapper.dispatch.mapper_configured(mapper, mapper.class_)
except Exception:
exc = sys.exc_info()[1]
if not hasattr(exc, "_configure_failed"):
mapper._configure_failed = exc
raise
if not has_skip:
reg._new_mappers = False
if not cascade and reg._dependencies.difference(orig):
raise sa_exc.InvalidRequestError(
"configure was called with cascade=False but "
"additional registries remain"
)
@util.preload_module("sqlalchemy.orm.decl_api")
def _dispose_registries(registries, cascade):
registry = util.preloaded.orm_decl_api.registry
orig = set(registries)
for reg in registry._recurse_with_dependents(registries):
if not cascade and reg._dependents.difference(orig):
raise sa_exc.InvalidRequestError(
"Registry has dependent registries that are not disposed; "
"pass cascade=True to clear these also"
)
while reg._managers:
try:
manager, _ = reg._managers.popitem()
except KeyError:
# guard against race between while and popitem
pass
else:
reg._dispose_manager_and_mapper(manager)
reg._non_primary_mappers.clear()
reg._dependents.clear()
for dep in reg._dependencies:
dep._dependents.discard(reg)
reg._dependencies.clear()
# this wasn't done in the 1.3 clear_mappers() and in fact it
# was a bug, as it could cause configure_mappers() to invoke
# the "before_configured" event even though mappers had all been
# disposed.
reg._new_mappers = False
def reconstructor(fn):
"""Decorate a method as the 'reconstructor' hook.
Designates a single method as the "reconstructor", an ``__init__``-like
method that will be called by the ORM after the instance has been
loaded from the database or otherwise reconstituted.
The reconstructor will be invoked with no arguments. Scalar
(non-collection) database-mapped attributes of the instance will
be available for use within the function. Eagerly-loaded
collections are generally not yet available and will usually only
contain the first element. ORM state changes made to objects at
this stage will not be recorded for the next flush() operation, so
the activity within a reconstructor should be conservative.
.. seealso::
:ref:`mapping_constructors`
:meth:`.InstanceEvents.load`
"""
fn.__sa_reconstructor__ = True
return fn
def validates(*names, **kw):
r"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
name of the attribute as well as a value to be assigned, or in the
case of a collection, the value to be added to the collection.
The function can then raise validation exceptions to halt the
process from continuing (where Python's built-in ``ValueError``
and ``AssertionError`` exceptions are reasonable choices), or can
modify or replace the value before proceeding. The function should
otherwise return the given value.
Note that a validator for a collection **cannot** issue a load of that
collection within the validation routine - this usage raises
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
:param include_backrefs: defaults to ``True``; if ``False``, the
validation function will not emit if the originator is an attribute
event related via a backref. This can be used for bi-directional
:func:`.validates` usage where only one validator should emit per
attribute operation.
.. versionadded:: 0.9.0
.. seealso::
:ref:`simple_validators` - usage examples for :func:`.validates`
"""
include_removes = kw.pop("include_removes", False)
include_backrefs = kw.pop("include_backrefs", True)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_validation_opts__ = {
"include_removes": include_removes,
"include_backrefs": include_backrefs,
}
return fn
return wrap
def _event_on_load(state, ctx):
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper._reconstructor:
instrumenting_mapper._reconstructor(state.obj())
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.mapper
if instrumenting_mapper:
instrumenting_mapper._check_configure()
if instrumenting_mapper._set_polymorphic_identity:
instrumenting_mapper._set_polymorphic_identity(state)
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
__slots__ = ("mapper",)
def __init__(self, mapper):
self.mapper = mapper
def __missing__(self, column):
prop = self.mapper._props.get(column)
if prop:
raise orm_exc.UnmappedColumnError(
"Column '%s.%s' is not available, due to "
"conflicting property '%s':%r"
% (column.table.name, column.name, column.key, prop)
)
raise orm_exc.UnmappedColumnError(
"No column %s is configured on mapper %s..."
% (column, self.mapper)
)
|
py | b4130ce9f68df87da5128660cbfdc67b82b3f6c2 | import os
def get_html_theme_path():
"""Return the HTML theme path to set in Sphinx to use this theme"""
cur_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
return cur_dir
|
py | b4130d04b43c706ebb56a9d6ede2201a268db5d7 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for hparam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.training.python.training import hparam
from tensorflow.python.platform import test
class HParamsTest(test.TestCase):
def _assertDictEquals(self, d1, d2):
self.assertEqual(len(d1), len(d2))
for k, v in six.iteritems(d1):
self.assertTrue(k in d2, k)
self.assertEquals(v, d2[k], d2[k])
def testEmpty(self):
hparams = hparam.HParams()
self._assertDictEquals({}, hparams.values())
hparams.parse('')
self._assertDictEquals({}, hparams.values())
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('xyz=123')
def testSomeValues(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6')
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
expected_str = '[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\')]'
self.assertEquals(expected_str, str(hparams.__str__()))
self.assertEquals(expected_str, str(hparams))
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('aaa=12')
self._assertDictEquals(
{'aaa': 12, 'b': 2.0, 'c_c': 'relu6'}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse('c_c=relu4,b=-2.0e10')
self._assertDictEquals({'aaa': 12, 'b': -2.0e10, 'c_c': 'relu4'},
hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(-2.0e10, hparams.b)
self.assertEquals('relu4', hparams.c_c)
hparams.parse('c_c=,b=0,')
self._assertDictEquals({'aaa': 12, 'b': 0, 'c_c': ''}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(0.0, hparams.b)
self.assertEquals('', hparams.c_c)
hparams.parse('c_c=2.3",b=+2,')
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=123')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=poipoi')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=1.0')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=12x')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=relu')
with self.assertRaisesRegexp(ValueError, 'Must not pass a list'):
hparams.parse('aaa=[123]')
self.assertEquals(12, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('2.3"', hparams.c_c)
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals(12, hparams2.aaa)
self.assertEquals(2.0, hparams2.b)
self.assertEquals('2.3"', hparams2.c_c)
def testBoolParsing(self):
for value in 'true', 'false', 'True', 'False', '1', '0':
for initial in False, True:
hparams = hparam.HParams(use_gpu=initial)
hparams.parse('use_gpu=' + value)
self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1'])
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
self.assertEquals(hparams.use_gpu, hparams2.use_gpu)
# Check that hparams2.use_gpu is a bool rather than an int.
# The assertEquals() call above won't catch this, since
# (0 == False) and (1 == True) in Python.
self.assertEquals(bool, type(hparams2.use_gpu))
def testBoolParsingFail(self):
hparams = hparam.HParams(use_gpu=True)
with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'):
hparams.parse('use_gpu=yep')
def testLists(self):
hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6'])
self._assertDictEquals({'aaa': [1], 'b': [2.0, 3.0], 'c_c': ['relu6']},
hparams.values())
self.assertEquals([1], hparams.aaa)
self.assertEquals([2.0, 3.0], hparams.b)
self.assertEquals(['relu6'], hparams.c_c)
hparams.parse('aaa=[12]')
self.assertEquals([12], hparams.aaa)
hparams.parse('aaa=[12,34,56]')
self.assertEquals([12, 34, 56], hparams.aaa)
hparams.parse('c_c=[relu4,relu12],b=[1.0]')
self.assertEquals(['relu4', 'relu12'], hparams.c_c)
self.assertEquals([1.0], hparams.b)
hparams.parse('c_c=[],aaa=[-34]')
self.assertEquals([-34], hparams.aaa)
self.assertEquals([], hparams.c_c)
hparams.parse('c_c=[_12,3\'4"],aaa=[+3]')
self.assertEquals([3], hparams.aaa)
self.assertEquals(['_12', '3\'4"'], hparams.c_c)
with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'):
hparams.parse('x=[123]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[poipoi]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('aaa=[1.0]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[12x]')
with self.assertRaisesRegexp(ValueError, 'Could not parse'):
hparams.parse('b=[relu]')
with self.assertRaisesRegexp(ValueError, 'Must pass a list'):
hparams.parse('aaa=123')
# Exports to proto.
hparam_def = hparams.to_proto()
# Imports from proto.
hparams2 = hparam.HParams(hparam_def=hparam_def)
# Verifies that all hparams are restored.
self.assertEquals([3], hparams2.aaa)
self.assertEquals([1.0], hparams2.b)
self.assertEquals(['_12', '3\'4"'], hparams2.c_c)
def testJson(self):
hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True)
self._assertDictEquals(
{'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': True}, hparams.values())
self.assertEquals(1, hparams.aaa)
self.assertEquals(2.0, hparams.b)
self.assertEquals('relu6', hparams.c_c)
hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}')
self._assertDictEquals(
{'aaa': 12, 'b': 3.0, 'c_c': 'relu4', 'd': False}, hparams.values())
self.assertEquals(12, hparams.aaa)
self.assertEquals(3.0, hparams.b)
self.assertEquals('relu4', hparams.c_c)
json_str = hparams.to_json()
hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False)
hparams2.parse_json(json_str)
self.assertEquals(12, hparams2.aaa)
self.assertEquals(3.0, hparams2.b)
self.assertEquals('relu4', hparams2.c_c)
self.assertEquals(False, hparams2.d)
def testNonProtoFails(self):
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=1.0)
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def='hello')
with self.assertRaisesRegexp(AssertionError, ''):
hparam.HParams(hparam_def=[1, 2, 3])
if __name__ == '__main__':
test.main()
|
py | b4130e48d535fdb76df202c42e25c4d93cb2c91a | """
This module contains useful functions to compute distances and errors on on
circles and spheres.
"""
from __future__ import division
import numpy as np
def circ_dist(azimuth1, azimuth2, r=1.0):
"""
Returns the shortest distance between two points on a circle
Parameters
----------
azimuth1:
azimuth of point 1
azimuth2:
azimuth of point 2
r: optional
radius of the circle (Default 1)
"""
return np.arccos(np.cos(azimuth1 - azimuth2))
def great_circ_dist(r, colatitude1, azimuth1, colatitude2, azimuth2):
"""
calculate great circle distance for points located on a sphere
Parameters
----------
r: radius of the sphere
colatitude1: colatitude of point 1
azimuth1: azimuth of point 1
colatitude2: colatitude of point 2
azimuth2: azimuth of point 2
Returns
-------
float or ndarray
great-circle distance
"""
d_azimuth = np.abs(azimuth1 - azimuth2)
dist = r * np.arctan2(
np.sqrt(
(np.sin(colatitude2) * np.sin(d_azimuth)) ** 2
+ (
np.sin(colatitude1) * np.cos(colatitude2)
- np.cos(colatitude1) * np.sin(colatitude2) * np.cos(d_azimuth)
)
** 2
),
np.cos(colatitude1) * np.cos(colatitude2)
+ np.sin(colatitude1) * np.sin(colatitude2) * np.cos(d_azimuth),
)
return dist
def spher2cart(azimuth, colatitude=None, r=1, degrees=False):
"""
Convert a spherical point to cartesian coordinates.
Parameters
----------
r:
radius
azimuth:
azimuth
colatitude:
colatitude
Returns
-------
ndarray
An ndarray containing the Cartesian coordinates of the points as its columns.
"""
if degrees:
azimuth = np.radians(azimuth)
if colatitude is not None:
colatitude = np.radians(colatitude)
if colatitude is None:
# default to XY plane
colatitude = np.pi / 2
if hasattr(azimuth, "__len__"):
colatitude = np.ones(len(azimuth)) * colatitude
# convert to cartesian
x = r * np.cos(azimuth) * np.sin(colatitude)
y = r * np.sin(azimuth) * np.sin(colatitude)
z = r * np.cos(colatitude)
return np.array([x, y, z])
def polar_distance(x1, x2):
"""
Given two arrays of numbers x1 and x2, pairs the cells that are the
closest and provides the pairing matrix index: x1(index(1,:)) should be as
close as possible to x2(index(2,:)). The function outputs the average of
the absolute value of the differences abs(x1(index(1,:))-x2(index(2,:))).
Parameters
----------
x1:
vector 1
x2:
vector 2
Returns
-------
d:
minimum distance between d
index:
the permutation matrix
"""
x1 = np.reshape(x1, (1, -1), order="F")
x2 = np.reshape(x2, (1, -1), order="F")
N1 = x1.size
N2 = x2.size
diffmat = np.arccos(np.cos(x1 - np.reshape(x2, (-1, 1), order="F")))
min_N1_N2 = np.min([N1, N2])
index = np.zeros((min_N1_N2, 2), dtype=int)
if min_N1_N2 > 1:
for k in range(min_N1_N2):
d2 = np.min(diffmat, axis=0)
index2 = np.argmin(diffmat, axis=0)
index1 = np.argmin(d2)
index2 = index2[index1]
index[k, :] = [index1, index2]
diffmat[index2, :] = float("inf")
diffmat[:, index1] = float("inf")
d = np.mean(np.arccos(np.cos(x1[:, index[:, 0]] - x2[:, index[:, 1]])))
else:
d = np.min(diffmat)
index = np.argmin(diffmat)
if N1 == 1:
index = np.array([1, index])
else:
index = np.array([index, 1])
return d, index
|
py | b4130eda5f7c068f66981cd7b1d5e1fc4d3751ca | from library import *
from collections import defaultdict
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = None
i, o, t = load_npz(sys.argv[1])
t = t[:, 250000:280000]
normalize(t)
align_fft(t, 1000)
t = t[:, :-1000]
if 1:
t_parts = np.zeros((len(t) * 16, 2000))
for j, tra in enumerate(t):
off = 2800
for k in range(16):
if k < 1:
t_parts[j * 16 + k] = tra[off:off+2000]
off += 1465
if k % 4 == 3:
off += 308
align_fft(t_parts, 100)
t = t_parts
if 1:
t = t[:, 100:1600]
normalize(t)
align_fft(t, 200)
if 0:
t = t[:, 4500:6500]
normalize(t)
align_fft(t, 100)
if 0:
t_avg = np.zeros((len(t) / 16, 1500))
for j in range(len(t_avg)):
t_avg[j] = np.mean(t[j*16:j*16+16], axis=0)
t = t_avg
d = defaultdict(list)
for j, inp in enumerate(i):
inp = "".join(chr(c) for c in inp)
for k in range(16):
d[inp].append(j*16+k)
tlist = []
for indices in d.values():
tlist.append(indices)
t1 = t[tlist[0][:12*16]]
t2 = t[tlist[1][:12*16]]
show_red_green(t1, t2)
|
py | b4130f3b119d757cc5075c29ad51647f138830f1 | from lib_sudoku import Sudoku_Solver
mylist= list()
mylist2= list()
#first
mylist.append([0,0,0,0,0,0,0,0,8])
mylist.append([0,1,8,0,6,7,2,0,0])
mylist.append([0,5,0,9,0,0,0,0,0])
mylist.append([0,0,5,0,4,0,0,6,0])
mylist.append([0,6,0,3,0,0,7,4,9])
mylist.append([0,7,0,0,0,9,0,0,3])
mylist.append([0,9,0,0,2,0,0,0,0])
mylist.append([0,0,0,4,7,0,0,0,1])
mylist.append([4,0,0,0,9,5,0,2,0])
#second
mylist2.append([0,0,0,0,0,0,0,0,0])
mylist2.append([0,0,0,5,0,6,0,0,0])
mylist2.append([0,9,0,0,0,0,0,7,0])
mylist2.append([0,0,5,0,0,0,1,0,0])
mylist2.append([0,3,0,8,0,2,0,6,0])
mylist2.append([0,2,1,0,0,0,8,4,0])
mylist2.append([2,1,0,0,7,0,0,3,8])
mylist2.append([7,0,3,0,2,0,6,0,9])
mylist2.append([0,5,0,1,0,3,0,2,0])
slib = Sudoku_Solver(mylist2)
# print(mylist[4][2])
# print(slib.populate_get_all_possibilities())
slib.brain()
# http://norvig.com/sudoku.html
|
py | b4130fe75ea04e2eceeab44c6f157e786eb58707 | import src.MainImages as main_img
class LettersPlayFair(object):
def __init__(self):
self.letters = main_img.letters
self.letters_start_pos = {'A': (908, 8), 'B': (980, 8), 'C': (1053, 8), 'D': (1127, 8), 'E': (1200, 8),
'F': (908, 80), 'G': (980, 80), 'H': (1053, 80), 'I': (1127, 80), 'K': (1200, 80),
'L': (908, 153), 'M': (980, 153), 'N': (1053, 153), 'O': (1127, 153), 'P': (1200, 153),
'Q': (908, 227), 'R': (980, 227), 'S': (1053, 227), 'T': (1127, 227), 'U': (1200, 227),
'V': (908, 300), 'W': (980, 300), 'X': (1053, 300), 'Y': (1127, 300), 'Z': (1200, 300)}
def draw(self, surface):
for letter in self.letters:
surface.blit(self.letters[letter], self.letters_start_pos[letter])
|
py | b413113557160f068161df8ff423281bfe183038 | import glob
from PIL import Image
images = glob.glob('*.jpg')
new_im = Image.new('RGB', (20*15,20*15), (250,250,250))
i=0
j=0
for image in images:
with open(image, 'rb') as file:
img = Image.open(file)
new_im.paste(img, ((i%20)*15,j*15))
i=i+1
if(i>0 and i%20==0):
j=j+1
new_im.save("merged_images.png", "PNG")
new_im.show()
|
py | b4131141aad4084ea581a1d08f39218ba986c3c3 | """Basic Test."""
import hashlib
import pytest
from nr_merkletree import MerkleTree
def test_basic():
"""Test if MerkleTree works for a basic example."""
data_chunks = [b'0', b'1', b'2']
merkle_tree = MerkleTree(data_chunks)
expected_hash0 = hashlib.sha256(data_chunks[0]).digest()
expected_hash1 = hashlib.sha256(data_chunks[1]).digest()
expected_hash2 = hashlib.sha256(data_chunks[2]).digest()
expected_hash01 = hashlib.sha256(expected_hash0 + expected_hash1).digest()
expected_hash2x = hashlib.sha256(expected_hash2).digest()
expected_root_hash = hashlib.sha256(expected_hash01 + expected_hash2x).digest()
assert merkle_tree.get_node(nid=expected_hash0).identifier == expected_hash0
assert merkle_tree.get_node(nid=expected_hash1).identifier == expected_hash1
assert merkle_tree.get_node(nid=expected_hash1).identifier == expected_hash1
assert merkle_tree.get_node(nid=expected_hash01).identifier == expected_hash01
assert merkle_tree.get_node(nid=expected_hash2x).identifier == expected_hash2x
assert merkle_tree.get_node(nid=expected_root_hash).identifier == expected_root_hash
# Sanity check.
with pytest.raises(AttributeError):
assert merkle_tree.get_node(nid=b'not_an_expected_hash').identifier
|
py | b41313438dbeb5af3d5081e892429fd51b3555c1 |
import sys
import os
# 09-Mar-2017 Diogo Ribeiro
# Routine to merge and process RPISeq raw results into a format readable to my scripts (catRAPID format)
inputFiles = sys.argv[1] # file with list of RPISeq output files to parse here, full paths must be given
outputFolder = sys.argv[2] # folder where to write output file
proteinID = sys.argv[3] # ID of the interacting protein, to appear in output file
### Input example (RPISeq output)
# RNA ID RF Classifier SVM Classifier
# >ENST00000437898 0.75 0.541
# >ENST00000452728 0.75 0.774
# >ENST00000422049 0.8 0.451
# (split into several files, each one with a header)
# the protein name is defined beforehand, e.g. P52298
# RPISeq provides two scores, one output file will be created for each
### Wanted Output
# sp|P52298|NCBP2_HUMAN ENST00000625358 40.15 0.88 0.08
# sp|P52298|NCBP2_HUMAN ENST00000625632 49.31 0.94 0.29
# sp|P52298|NCBP2_HUMAN ENST00000627767 48.38 0.94 0.29
# (no header)
outFileRF = open( outputFolder + "/RIPSeq_RF_formatted.out", "w")
outFileSVM = open( outputFolder + "/RIPSeq_SVM_formatted.out", "w")
with open( inputFiles, "r") as initialFile:
for li in initialFile:
# open each file
with open( li.strip(), "r") as inFile:
header = inFile.readline()
for line in inFile:
spl = line.strip().split()
if spl < 3:
print "Problem with input file: %s, %s" % (inFile, line)
txID = spl[0].replace(">","")
rfScore = spl[1]
svmScore = spl[2]
# write RF scores
outFileRF.write( "sp|%s|sp %s\t%s\n" % (proteinID, txID, rfScore))
# write SVM scores
outFileSVM.write( "sp|%s|sp %s\t%s\n" % (proteinID, txID, svmScore))
outFileRF.close()
outFileSVM.close()
|
py | b41314a686e69446c6ee0bcc550ac7751ebe4671 | """
Classes to represent the default SQL aggregate functions
"""
from django.db.models.fields import IntegerField, FloatField
# Fake fields used to identify aggregate types in data-conversion operations.
ordinal_aggregate_field = IntegerField()
computed_aggregate_field = FloatField()
class Aggregate(object):
"""
Default SQL Aggregate.
"""
is_ordinal = False
is_computed = False
sql_template = '%(function)s(%(field)s)'
def __init__(self, col, source=None, is_summary=False, **extra):
"""Instantiate an SQL aggregate
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* sql_function, the name of the SQL function that implements the
aggregate.
* sql_template, a template string that is used to render the
aggregate into SQL.
* is_ordinal, a boolean indicating if the output of this aggregate
is an integer (e.g., a count)
* is_computed, a boolean indicating if this output of this aggregate
is a computed float (e.g., an average), regardless of the input
type.
"""
self.col = col
self.source = source
self.is_summary = is_summary
self.extra = extra
# Follow the chain of aggregate sources back until you find an
# actual field, or an aggregate that forces a particular output
# type. This type of this field will be used to coerce values
# retrieved from the database.
tmp = self
while tmp and isinstance(tmp, Aggregate):
if getattr(tmp, 'is_ordinal', False):
tmp = ordinal_aggregate_field
elif getattr(tmp, 'is_computed', False):
tmp = computed_aggregate_field
else:
tmp = tmp.source
self.field = tmp
def relabel_aliases(self, change_map):
if isinstance(self.col, (list, tuple)):
self.col = (change_map.get(self.col[0], self.col[0]), self.col[1])
def as_sql(self, qn, connection):
"Return the aggregate, rendered as SQL."
if hasattr(self.col, 'as_sql'):
field_name = self.col.as_sql(qn, connection)
elif isinstance(self.col, (list, tuple)):
field_name = '.'.join([qn(c) for c in self.col])
else:
field_name = self.col
params = {
'function': self.sql_function,
'field': field_name
}
params.update(self.extra)
return self.sql_template % params
class Avg(Aggregate):
is_computed = True
sql_function = 'AVG'
class Count(Aggregate):
is_ordinal = True
sql_function = 'COUNT'
sql_template = '%(function)s(%(distinct)s%(field)s)'
def __init__(self, col, distinct=False, **extra):
super(Count, self).__init__(col, distinct=distinct and 'DISTINCT ' or '', **extra)
class Max(Aggregate):
sql_function = 'MAX'
class Min(Aggregate):
sql_function = 'MIN'
class StdDev(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(StdDev, self).__init__(col, **extra)
self.sql_function = sample and 'STDDEV_SAMP' or 'STDDEV_POP'
class Sum(Aggregate):
sql_function = 'SUM'
class Variance(Aggregate):
is_computed = True
def __init__(self, col, sample=False, **extra):
super(Variance, self).__init__(col, **extra)
self.sql_function = sample and 'VAR_SAMP' or 'VAR_POP'
|
py | b41314cb5ead0a315423d208ceda7dbbfe65ad43 | # Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various utilities for dealing with Neutron and the renaming from Quantum.
import six
from subprocess import check_output
from charmhelpers.core.hookenv import (
config,
log,
ERROR,
)
from charmhelpers.contrib.openstack.utils import (
os_release,
CompareOpenStackReleases,
)
def headers_package():
"""Ensures correct linux-headers for running kernel are installed,
for building DKMS package"""
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
return 'linux-headers-%s' % kver
QUANTUM_CONF_DIR = '/etc/quantum'
def kernel_version():
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
kver = kver.split('.')
return (int(kver[0]), int(kver[1]))
def determine_dkms_package():
""" Determine which DKMS package should be used based on kernel version """
# NOTE: 3.13 kernels have support for GRE and VXLAN native
if kernel_version() >= (3, 13):
return []
else:
return [headers_package(), 'openvswitch-datapath-dkms']
# legacy
def quantum_plugins():
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
'contexts': [],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
'server_packages': ['quantum-server',
'quantum-plugin-openvswitch'],
'server_services': ['quantum-server']
},
'nvp': {
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
'quantum-plugin-nicira'],
'server_services': ['quantum-server']
}
}
NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
release = os_release('nova-common')
plugins = {
'ovs': {
'config': '/etc/neutron/plugins/openvswitch/'
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
'contexts': [],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
'server_packages': ['neutron-server',
'neutron-plugin-openvswitch'],
'server_services': ['neutron-server']
},
'nvp': {
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-nicira'],
'server_services': ['neutron-server']
},
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
'neutron-plugin-vmware'],
'server_services': ['neutron-server']
},
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
'contexts': [],
'services': [],
'packages': [determine_dkms_package(),
['neutron-plugin-cisco']],
'server_packages': ['neutron-server',
'neutron-plugin-cisco'],
'server_services': ['neutron-server']
},
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
'contexts': [],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata',
'etcd'],
'packages': [determine_dkms_package(),
['calico-compute',
'bird',
'neutron-dhcp-agent',
'nova-api-metadata',
'etcd']],
'server_packages': ['neutron-server', 'calico-control', 'etcd'],
'server_services': ['neutron-server', 'etcd']
},
'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
'server_services': ['neutron-server']
},
'plumgrid': {
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
'contexts': [],
'services': [],
'packages': ['plumgrid-lxc',
'iovisor-dkms'],
'server_packages': ['neutron-server',
'neutron-plugin-plumgrid'],
'server_services': ['neutron-server']
},
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
'contexts': [],
'services': [],
'packages': [determine_dkms_package()],
'server_packages': ['neutron-server',
'python-neutron-plugin-midonet'],
'server_services': ['neutron-server']
}
}
if CompareOpenStackReleases(release) >= 'icehouse':
# NOTE: patch in ml2 plugin for icehouse onwards
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['ovs']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
plugins['nvp'] = plugins['nsx']
if CompareOpenStackReleases(release) >= 'kilo':
plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2')
if CompareOpenStackReleases(release) >= 'liberty':
plugins['midonet']['driver'] = (
'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['server_packages'].remove(
'python-neutron-plugin-midonet')
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins'
'.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
if CompareOpenStackReleases(release) >= 'mitaka':
plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
plugins['nsx']['server_packages'].append('python-vmware-nsx')
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
plugins['vsp']['driver'] = (
'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
if CompareOpenStackReleases(release) >= 'newton':
plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['vsp']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins
def neutron_plugin_attribute(plugin, attr, net_manager=None):
manager = net_manager or network_manager()
if manager == 'quantum':
plugins = quantum_plugins()
elif manager == 'neutron':
plugins = neutron_plugins()
else:
log("Network manager '%s' does not support plugins." % (manager),
level=ERROR)
raise Exception
try:
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise Exception
try:
return _plugin[attr]
except KeyError:
return None
def network_manager():
'''
Deals with the renaming of Quantum to Neutron in H and any situations
that require compatibility (eg, deploying H with network-manager=quantum,
upgrading from G).
'''
release = os_release('nova-common')
manager = config('network-manager').lower()
if manager not in ['quantum', 'neutron']:
return manager
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'
else:
# ensure accurate naming for all releases post-H
return 'neutron'
def parse_mappings(mappings, key_rvalue=False):
"""By default mappings are lvalue keyed.
If key_rvalue is True, the mapping will be reversed to allow multiple
configs for the same lvalue.
"""
parsed = {}
if mappings:
mappings = mappings.split()
for m in mappings:
p = m.partition(':')
if key_rvalue:
key_index = 2
val_index = 0
# if there is no rvalue skip to next
if not p[1]:
continue
else:
key_index = 0
val_index = 2
key = p[key_index].strip()
parsed[key] = p[val_index].strip()
return parsed
def parse_bridge_mappings(mappings):
"""Parse bridge mappings.
Mappings must be a space-delimited list of provider:bridge mappings.
Returns dict of the form {provider:bridge}.
"""
return parse_mappings(mappings)
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port.
Returns dict of the form {port:bridge} where ports may be mac addresses or
interface names.
"""
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
# proposed for <port> since it may be a mac address which will differ
# across units this allowing first-known-good to be chosen.
_mappings = parse_mappings(mappings, key_rvalue=True)
if not _mappings or list(_mappings.values()) == ['']:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {mappings.split()[0]: default_bridge}
ports = _mappings.keys()
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")
return _mappings
def parse_vlan_range_mappings(mappings):
"""Parse vlan range mappings.
Mappings must be a space-delimited list of provider:start:end mappings.
The start:end range is optional and may be omitted.
Returns dict of the form {provider: (start, end)}.
"""
_mappings = parse_mappings(mappings)
if not _mappings:
return {}
mappings = {}
for p, r in six.iteritems(_mappings):
mappings[p] = tuple(r.split(':'))
return mappings
|
py | b41314de90fb6691a6f247e30ae35c16bacce685 | from pathlib import Path
from shutil import move
import os
import sys
import time
from tqdm import tqdm
import requests
import numpy as np
imaging_url = "https://kits19.sfo2.digitaloceanspaces.com/"
imaging_name_tmplt = "master_{:05d}.nii.gz"
temp_f = Path(__file__).parent / "temp.tmp"
def get_destination(i):
destination = Path("__file__").parent.parent /\
"data" / "case_{:05d}".format(i) / "imaging.nii.gz"
if not destination.parent.exists():
destination.parent.mkdir()
return destination
def cleanup(bar, msg):
bar.close()
if temp_f.exists():
temp_f.unlink()
print(msg)
sys.exit()
if __name__ == "__main__":
left_to_download = []
for i in range(300):
if not get_destination(i).exists():
left_to_download = left_to_download + [i]
print("{} cases to download...".format(len(left_to_download)))
for i, cid in enumerate(left_to_download):
print("Download {}/{}: ".format(
i+1, len(left_to_download)
))
destination = get_destination(cid)
remote_name = imaging_name_tmplt.format(cid)
uri = imaging_url + remote_name
chnksz = 1000
tries = 0
while True:
try:
tries = tries + 1
response = requests.get(uri, stream=True)
break
except Exception as e:
print("Failed to establish connection with server:\n")
print(str(e) + "\n")
if tries < 1000:
print("Retrying in 30s")
time.sleep(30)
else:
print("Max retries exceeded")
sys.exit()
with temp_f.open("wb") as f:
bar = tqdm(
unit="KB",
desc="case_{:05d}".format(cid),
total=int(
np.ceil(int(response.headers["content-length"])/chnksz)
)
)
try:
for pkg in response.iter_content(chunk_size=chnksz):
f.write(pkg)
bar.update(int(len(pkg)/chnksz))
move(str(temp_f), str(destination))
bar.close()
except KeyboardInterrupt:
cleanup(bar, "KeyboardInterrupt")
except Exception as e:
cleanup(bar, str(e))
|
py | b413164859df84d1bb1ed2dc38071189821df6e0 | from lclpy.termination.abstract_termination_criterion \
import AbstractTerminationCriterion
class MultiCriterion(AbstractTerminationCriterion):
"""Class to combine multiple terminationcriteria.
Parameters
----------
criteria : list or tuple of AbstractTerminationCriterion
An iterable object containing the intialised termination criterions one
wishes to use.
Attributes
----------
criteria : list or tuple of AbstractTerminationCriterion
An iterable object containing the intialised termination criterions one
wishes to use.
Examples
--------
3 termination criteria are used, three tests are done to ensure that all
three criterions are capable of stopping the iterating correctly.
MaxSecondsTerminationCriterion stops the iterating:
.. doctest::
>>> import time
>>> from lclpy.termination.max_seconds_termination_criterion \\
... import MaxSecondsTerminationCriterion
>>> from lclpy.termination.max_iterations_termination_criterion \\
... import MaxIterationsTerminationCriterion
>>> from lclpy.termination.no_improvement_termination_criterion \\
... import NoImprovementTerminationCriterion
>>> from lclpy.termination.multi_criterion import MultiCriterion
... # init list
>>> criteria = []
>>> criteria.append(MaxSecondsTerminationCriterion(3))
>>> criteria.append(MaxIterationsTerminationCriterion(10))
>>> criteria.append(NoImprovementTerminationCriterion(3))
... # init MultiCriterion
>>> multi_criterion = MultiCriterion(criteria)
... # test
>>> start = time.time()
>>> multi_criterion.start_timing()
>>> while multi_criterion.keep_running():
... multi_criterion.iteration_done()
>>> stop = time.time()
>>> time_passed = stop - start
>>> time_passed < 4
True
MaxIterationsTerminationCriterion stops the iterating:
.. doctest::
>>> from lclpy.termination.max_seconds_termination_criterion \\
... import MaxSecondsTerminationCriterion
>>> from lclpy.termination.max_iterations_termination_criterion \\
... import MaxIterationsTerminationCriterion
>>> from lclpy.termination.no_improvement_termination_criterion \\
... import NoImprovementTerminationCriterion
>>> from lclpy.termination.multi_criterion import MultiCriterion
... # init list
>>> criteria = []
>>> criteria.append(MaxSecondsTerminationCriterion(3))
>>> criteria.append(MaxIterationsTerminationCriterion(10))
>>> criteria.append(NoImprovementTerminationCriterion(3))
... # init MultiCriterion
>>> multi_criterion = MultiCriterion(criteria)
... # test
>>> iterations = 0
>>> values = [20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9]
>>> multi_criterion.start_timing()
>>> while multi_criterion.keep_running():
... multi_criterion.check_new_value(values[iterations])
... iterations += 1
... multi_criterion.iteration_done()
>>> iterations
10
NoImprovementTerminationCriterion stops the iterating:
.. doctest::
>>> from lclpy.termination.max_seconds_termination_criterion \\
... import MaxSecondsTerminationCriterion
>>> from lclpy.termination.max_iterations_termination_criterion \\
... import MaxIterationsTerminationCriterion
>>> from lclpy.termination.no_improvement_termination_criterion \\
... import NoImprovementTerminationCriterion
>>> from lclpy.termination.multi_criterion import MultiCriterion
... # init list
>>> criteria = []
>>> criteria.append(MaxSecondsTerminationCriterion(3))
>>> criteria.append(MaxIterationsTerminationCriterion(10))
>>> criteria.append(NoImprovementTerminationCriterion(3))
... # init MultiCriterion
>>> multi_criterion = MultiCriterion(criteria)
... # test 1
>>> iterations = 0
>>> values = [9, 8, 7, 9, 9, 9, 9, 9, 9, 9, 9, 9]
>>> multi_criterion.start_timing()
>>> while multi_criterion.keep_running():
... multi_criterion.check_new_value(values[iterations])
... iterations += 1
... multi_criterion.iteration_done()
>>> iterations
6
"""
def __init__(self, criteria):
super().__init__()
self.criteria = criteria
def keep_running(self):
"""function to determine if the algorithm needs to continue running.
Returns
-------
bool
The function returns true if the algorithm has to continue
running, if the function returns false the algorithm needs to
stop running. If one or more of the composing termination
criterions returns False if its keep_running method is called, this
method will return False. Else the method will return True.
"""
for criterion in self.criteria:
if criterion.keep_running() is False:
return False
return True
def iteration_done(self):
"""function to be called after every iteration."""
for criterion in self.criteria:
criterion.iteration_done()
def check_new_value(self, value):
"""Checks a value.
Parameters
----------
value : int or float
A value from the evaluation function.
"""
for criterion in self.criteria:
criterion.check_new_value(value)
def start_timing(self):
"""Starts an internal timer if needed."""
for criterion in self.criteria:
criterion.start_timing()
def check_variable(self, variable):
"""Checks a variable specific to an implementation.
Does not need to be used or implemented
Parameters
----------
variable
The value of a certain value of a specific algorithm.
"""
for criterion in self.criteria:
criterion.check_variable(variable)
def reset(self):
"""Resets the object back to it's state after init.
Examples
--------
MaxIterationsTerminationCriterion stops the iterating:
.. doctest::
>>> from lclpy.termination.max_seconds_termination_criterion \\
... import MaxSecondsTerminationCriterion
>>> from lclpy.termination.max_iterations_termination_criterion \\
... import MaxIterationsTerminationCriterion
>>> from lclpy.termination.no_improvement_termination_criterion \\
... import NoImprovementTerminationCriterion
>>> from lclpy.termination.multi_criterion \\
... import MultiCriterion
... # init list
>>> criteria = []
>>> criteria.append(MaxSecondsTerminationCriterion(3))
>>> criteria.append(MaxIterationsTerminationCriterion(10))
>>> criteria.append(NoImprovementTerminationCriterion(3))
... # init MultiCriterion
>>> multi_criterion = MultiCriterion(criteria)
... # run 1
>>> iterations = 0
>>> values = [20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9]
>>> multi_criterion.start_timing()
>>> while multi_criterion.keep_running():
... multi_criterion.check_new_value(values[iterations])
... iterations += 1
... multi_criterion.iteration_done()
>>> iterations
10
>>> # reset
>>> multi_criterion.reset()
... # run 2
>>> iterations = 0
>>> values = [20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9]
>>> multi_criterion.start_timing()
>>> while multi_criterion.keep_running():
... multi_criterion.check_new_value(values[iterations])
... iterations += 1
... multi_criterion.iteration_done()
>>> iterations
10
"""
for criterion in self.criteria:
criterion.reset()
|
py | b413173edb609dac5c45ca3ffb750e75d0a8f75d | import ctypes
import numpy
import glob
# find the shared library, the path depends on the platform and Python version
libfile = glob.glob('build/*/Addbook*.so')[0]
# 1. open the shared library
mylib = ctypes.CDLL(libfile)
class Books(object):
def __init__(self):
self.obj = mylib.b_new()
def myFunc(self):
mylib.B_func(self.obj)
f = Books()
f.add_book()
|
py | b4131763971acab05291eb1c73856db25b64b85e | import torch
import torch.nn as nn
from collections import OrderedDict
from fcos_core.utils.registry import Registry
from fcos_core.layers import FrozenBatchNorm2d
from fcos_core.modeling.make_layers import group_norm
_GN = False
VoVNet27FPNStagesTo5 = {
'config_stage_ch': [64, 80, 96, 112],
'config_concat_ch': [128, 256, 384, 512],
'layer_per_block': 5,
'block_per_stage': [1, 1, 1, 1]
}
VoVNet39FPNStagesTo5 = {
'config_stage_ch': [128, 160, 192, 224],
'config_concat_ch': [256, 512, 768, 1024],
'layer_per_block': 5,
'block_per_stage': [1, 1, 2, 2]
}
VoVNet57FPNStagesTo5 = {
'config_stage_ch': [128, 160, 192, 224],
'config_concat_ch': [256, 512, 768, 1024],
'layer_per_block': 5,
'block_per_stage': [1, 1, 4, 3]
}
VoVNet93FPNStagesTo5 = {
'config_stage_ch': [128, 160, 192, 224],
'config_concat_ch': [256, 512, 768, 1024],
'layer_per_block': 5,
'block_per_stage': [1, 3, 8, 3]
}
_STAGE_SPECS = Registry({
"V-27-FPN": VoVNet27FPNStagesTo5,
"V-39-FPN": VoVNet39FPNStagesTo5,
"V-57-FPN": VoVNet57FPNStagesTo5,
"V-93-FPN": VoVNet93FPNStagesTo5,
"V-27-FPN-RETINANET": VoVNet27FPNStagesTo5,
"V-39-FPN-RETINANET": VoVNet39FPNStagesTo5,
"V-57-FPN-RETINANET": VoVNet57FPNStagesTo5,
"V-93-FPN-RETINANET": VoVNet93FPNStagesTo5
})
def freeze_bn_params(m):
"""Freeze all the weights by setting requires_grad to False
"""
m.eval()
for p in m.parameters():
p.requires_grad = False
def conv3x3(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=3, padding=1):
"""3x3 convolution with padding"""
return [
(f'{module_name}_{postfix}/conv',
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False)),
(f'{module_name}_{postfix}/norm',
group_norm(out_channels) if _GN else FrozenBatchNorm2d(out_channels)
),
(f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True))
]
def conv1x1(in_channels, out_channels, module_name, postfix, stride=1, groups=1, kernel_size=1, padding=0):
"""3x3 convolution with padding"""
return [
(f'{module_name}_{postfix}/conv',
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups,
bias=False)),
(f'{module_name}_{postfix}/norm',
group_norm(out_channels) if _GN else FrozenBatchNorm2d(out_channels)
),
(f'{module_name}_{postfix}/relu', nn.ReLU(inplace=True))
]
class _OSA_module(nn.Module):
def __init__(self, in_ch, stage_ch, concat_ch, layer_per_block, module_name, identity=False):
super(_OSA_module, self).__init__()
self.identity = identity
self.layers = nn.ModuleList()
in_channel = in_ch
for i in range(layer_per_block):
self.layers.append(nn.Sequential(OrderedDict(conv3x3(in_channel, stage_ch, module_name, i))))
in_channel = stage_ch
# feature aggregation
in_channel = in_ch + layer_per_block * stage_ch
self.concat = nn.Sequential(OrderedDict(conv1x1(in_channel, concat_ch, module_name, 'concat')))
def forward(self, x):
identity_feat = x
output = []
output.append(x)
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
xt = self.concat(x)
if self.identity:
xt = xt + identity_feat
return xt
class _OSA_stage(nn.Sequential):
def __init__(self, in_ch, stage_ch, concat_ch, block_per_stage, layer_per_block, stage_num):
super(_OSA_stage, self).__init__()
if not stage_num == 2:
self.add_module('Pooling', nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True))
module_name = f'OSA{stage_num}_1'
self.add_module(module_name, _OSA_module(in_ch, stage_ch, concat_ch, layer_per_block, module_name))
for i in range(block_per_stage - 1):
module_name = f'OSA{stage_num}_{i + 2}'
self.add_module(module_name,
_OSA_module(concat_ch, stage_ch, concat_ch, layer_per_block, module_name, identity=True))
class VoVNet(nn.Module):
def __init__(self, cfg):
super(VoVNet, self).__init__()
global _GN
_GN = cfg.MODEL.VOVNET.USE_GN
stage_specs = _STAGE_SPECS[cfg.MODEL.BACKBONE.CONV_BODY]
config_stage_ch = stage_specs['config_stage_ch']
config_concat_ch = stage_specs['config_concat_ch']
block_per_stage = stage_specs['block_per_stage']
layer_per_block = stage_specs['layer_per_block']
# self.stem = nn.Sequential()
# Stem module
stem = conv3x3(3, 64, 'stem', '1', 2)
stem += conv3x3(64, 64, 'stem', '2', 1)
stem += conv3x3(64, 128, 'stem', '3', 2)
self.add_module('stem', nn.Sequential((OrderedDict(stem))))
stem_out_ch = [128]
in_ch_list = stem_out_ch + config_concat_ch[:-1]
# OSA stages
self.stage_names = []
for i in range(4): # num_stages
name = 'stage%d' % (i + 2)
self.stage_names.append(name)
self.add_module(name, _OSA_stage(in_ch_list[i],
config_stage_ch[i],
config_concat_ch[i],
block_per_stage[i],
layer_per_block,
i + 2))
# initialize weights
self._initialize_weights()
# Optionally freeze (requires_grad=False) parts of the backbone
self._freeze_backbone(cfg.MODEL.BACKBONE.FREEZE_CONV_BODY_AT)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
def _freeze_backbone(self, freeze_at):
if freeze_at < 0:
return
# freeze BN layers
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
freeze_bn_params(m)
for stage_index in range(freeze_at):
if stage_index == 0:
m = self.stem # stage 0 is the stem
else:
m = getattr(self, "stage" + str(stage_index+1))
for p in m.parameters():
p.requires_grad = False
def forward(self, x):
x = self.stem(x)
outputs = []
for name in self.stage_names:
x = getattr(self, name)(x)
outputs.append(x)
return outputs |
py | b4131928125ccecd79e9690381b4286752b9d5a9 | import discord
from discord.ext import commands
from discord.ext.commands import cooldown
from discord.ext.commands.cooldowns import BucketType
from discord import Color
import asyncpg
import asyncio
from random import choice, randint
from datetime import datetime, timedelta
from utils import extras, errorhandler
import lavalink
import time
import re
import logging
import math
import aiodns
import cchardet
import aioredis
time_rx = re.compile("[0-9]+")
url_rx = re.compile("https?:\/\/(?:www\.)?.+")
beta_servers = [
513_888_506_498_646_052,
336_642_139_381_301_249,
253_716_112_292_839_425,
]
uwu_emote = "<:uwu:521394346688249856>"
caution = "<:caution:521002590566219776>"
class music(commands.Cog):
def __init__(self, bot):
self.bot = bot
def cog_unload(self):
self.bot.lavalink._event_hooks.clear()
def cog_load(self):
if self.bot.is_ready:
if not self.bot.lavalink:
self.bot.lavalink = lavalink.Client(self.bot.user.id)
self.bot.lavalink.add_node(
self.bot.config["lavalink_ip"],
8080,
self.bot.config["lavalink"],
"us",
"us-east",
)
self.bot.add_listener(
self.bot.lavalink.voice_update_handler, "on_socket_response"
)
self.bot.lavalink.add_event_hook(self.track_hook)
async def track_hook(self, event):
if isinstance(event, lavalink.TrackStartEvent):
chnnl = event.player.fetch("channel")
if chnnl:
chnnl = self.bot.get_channel(chnnl)
if chnnl:
durtion = lavalink.utils.format_time(event.track.duration)
await chnnl.send(
f"Started song `{event.track.title}` with duration `{durtion}`.",
delete_after=30,
)
elif isinstance(event, lavalink.TrackEndEvent):
chnnl = event.player.fetch("channel")
if chnnl:
chnnl = self.bot.get_channel(chnnl)
if len(event.player.queue) == 0:
await event.player.stop()
await self.connect_to(chnnl.guild.id, None)
return await chnnl.send(
f"Disconnecting because queue is over...", delete_after=30
)
await chnnl.send(f"Song ended...", delete_after=30)
async def connect_to(self, guild_id: int, channel_id: str):
ws = self.bot._connection._get_websocket(guild_id)
await ws.voice_state(str(guild_id), channel_id)
async def cog_check(self, ctx):
if not await self.bot.redis.execute("GET", f"{ctx.author.id}-vote"):
raise (errorhandler.hasVoted(ctx))
player = self.bot.lavalink.players.create(
ctx.guild.id, endpoint=ctx.guild.region.value
)
should_connect = ctx.command.name in (
"play",
"now",
"seek",
"skip",
"stop",
"pause",
"volume",
"disconnect",
"queue",
"remove",
"music_player",
)
if not ctx.author.voice or not ctx.author.voice.channel:
return await ctx.caution("You must be in a voice channel.")
if not player.is_connected:
if not should_connect:
return await ctx.caution("Not connected...")
permissions = ctx.author.voice.channel.permissions_for(ctx.me)
if not permissions.connect:
return await ctx.caution(
f"I need the permission connect to use voice channels. You can check my role or channel overrides to find permissions."
)
if not permissions.speak:
return await ctx.caution(
f"I need the permission speak to use voice channels. You can check my role or channel overrides to find permissions."
)
player.store("channel", ctx.channel.id)
await self.connect_to(ctx.guild.id, str(ctx.author.voice.channel.id))
if (
player.is_connected
and int(player.channel_id) != ctx.author.voice.channel.id
):
return await ctx.caution("You need to be in my current voice channel.")
return True
@commands.command(aliases=["p"])
async def play(self, ctx, *, query: str):
player = self.bot.lavalink.players.get(ctx.guild.id)
query = query.strip("<>")
if not url_rx.match(query):
query = f"ytsearch:{query}"
results = await player.node.get_tracks(query)
if not results or not results["tracks"]:
return await ctx.send("No song found...", delete_after=30)
e = discord.Embed(color=0x7289DA)
if results["loadType"] == "PLAYLIST_LOADED":
tracks = results["tracks"]
for track in tracks:
player.add(requester=ctx.author.id, track=track)
e.set_author(name=f"Playlist queued by {ctx.author}")
e.description = f'{results["playlistInfo"]["name"]} - {len(tracks)} songs'
await ctx.send(embed=e)
else:
track = results["tracks"][0]
e.set_author(name=f"Song queued by {ctx.author}")
e.description = f'[{track["info"]["title"]}]({track["info"]["uri"]})'
await ctx.send(embed=e)
player.add(requester=ctx.author.id, track=track)
if not player.is_playing:
await player.play()
@commands.command(aliases=["np", "n", "playing"])
async def now(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
position = lavalink.utils.format_time(player.position)
if player.current.stream:
duration = "Live"
else:
duration = lavalink.utils.format_time(player.current.duration)
e = discord.Embed(
color=0x7289DA,
description=f"[{player.current.title}]({player.current.uri})",
)
e.add_field(name="Duration", value=f"[{position}/{duration}]")
await ctx.send(embed=e)
@commands.command()
async def seek(self, ctx, *, time: str):
player = self.bot.lavalink.players.get(ctx.guild.id)
seconds = time_rx.search(time)
if not seconds:
return await ctx.send(
"Please specify a time in seconds to skip.", delete_after=30
)
seconds = int(seconds.group()) * 1000
if time.startswith("-"):
seconds *= -1
track_time = player.position + seconds
await player.seek(track_time)
await ctx.send(f"Moved song to `{lavalink.utils.format_time(track_time)}`")
@commands.command()
async def skip(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
await player.skip()
await ctx.send("Skipped.", delete_after=30)
@commands.command()
async def stop(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
player.queue.clear()
await player.stop()
await ctx.send("Stopped.", delete_after=30)
@commands.command(aliases=["resume"])
async def pause(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
if player.paused:
await player.set_pause(False)
await ctx.send("Resumed.", delete_after=30)
else:
await player.set_pause(True)
await ctx.send("Paused.", delete_after=30)
@commands.command(aliases=["vol"])
async def volume(self, ctx, volume: int = None):
player = self.bot.lavalink.players.get(ctx.guild.id)
if not volume:
return await ctx.send(
f"My current player volume is `{player.volume}`%", delete_after=30
)
await player.set_volume(volume)
await ctx.send(f"Set player volume to `{player.volume}`%", delete_after=30)
@commands.command(aliases=["dc"])
async def disconnect(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
player.queue.clear()
await player.stop()
await self.connect_to(ctx.guild.id, None)
await ctx.send("Disconnected.", delete_after=30)
@commands.command(aliases=["q"])
async def queue(self, ctx, page: int = 1):
""" Shows the player's queue. """
player = self.bot.lavalink.players.get(ctx.guild.id)
if not player.queue:
return await ctx.send("Nothing queued.", delete_after=30)
items_per_page = 10
pages = math.ceil(len(player.queue) / items_per_page)
start = (page - 1) * items_per_page
end = start + items_per_page
queue_list = ""
for index, track in enumerate(player.queue[start:end], start=start):
queue_list += f"{index + 1} - [{track.title}]({track.uri})\n"
e = discord.Embed(colour=0x7289DA, description=queue_list)
e.set_author(name=f"{len(player.queue)} songs in the queue ({page}/{pages})")
e.set_footer(
text=f'To change pages do "uwu queue PAGE" replacing page with the desired page'
)
await ctx.send(embed=e)
@commands.command()
async def remove(self, ctx, index: int):
player = self.bot.lavalink.players.get(ctx.guild.id)
if index > len(player.queue) or index < 1:
return await ctx.send(
f"Invalid index please use an index of `1`-`{len(player.queue)}`"
)
index -= 1
removed = player.queue.pop(index)
await ctx.send(f"Removed `{removed.title}` from the queue.")
@commands.command()
async def music_player(self, ctx):
player = self.bot.lavalink.players.get(ctx.guild.id)
is_paused = "No"
if player.paused:
is_paused = "Yes"
e = discord.Embed(colour=0x7289DA)
e.set_author(name=f"Player info for {ctx.guild}")
e.add_field(name="Volume", value=f"{player.volume}/1000", inline=False)
e.add_field(
name=f"Current song",
value=f"[{player.current.title}]({player.current.uri})",
inline=False,
)
e.add_field(name="Is paused", value=is_paused, inline=False)
await ctx.send(embed=e)
def setup(bot):
bot.add_cog(music(bot))
|
py | b4131b1fd7dfeec9d1af5cead0a7f7538f8341e4 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.api import DEFAULT_API_VERSION
from st2common.util.url import get_url_without_trailing_slash
__all__ = [
'get_base_public_api_url',
'get_full_public_api_url',
'get_mistral_api_url'
]
LOG = logging.getLogger(__name__)
def get_base_public_api_url():
"""
Return full public URL to the API endpoint (excluding the API version).
:rtype: ``str``
"""
# Note: This is here for backward compatibility reasons - if api_url is not set we fall back
# to the old approach (using api listen host and port)
if cfg.CONF.auth.api_url:
api_url = get_url_without_trailing_slash(cfg.CONF.auth.api_url)
else:
LOG.warn('"auth.api_url" configuration option is not configured')
api_url = 'http://%s:%s' % (cfg.CONF.api.host, cfg.CONF.api.port)
return api_url
def get_full_public_api_url(api_version=DEFAULT_API_VERSION):
"""
Return full public URL to the API endpoint (including the API version).
:rtype: ``str``
"""
api_url = get_base_public_api_url()
api_url = '%s/%s' % (api_url, api_version)
return api_url
def get_mistral_api_url(api_version=DEFAULT_API_VERSION):
"""
Return a URL which Mistral uses to talk back to the StackStorm API.
Note: If not provided it defaults to the public API url.
"""
if cfg.CONF.mistral.api_url:
api_url = get_url_without_trailing_slash(cfg.CONF.mistral.api_url)
api_url = '%s/%s' % (api_url, api_version)
else:
LOG.warn('"mistral.api_url" not set, using auth.api_url')
api_url = get_full_public_api_url(api_version=api_version)
return api_url
|
py | b4131b62d0723a8d06eacf1be92a0d0adbdb4fd7 | import numpy as np
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import os
from utils import get_spectrograms
import hyperparams as hp
import librosa
class PrepareDataset(Dataset):
"""LJSpeech dataset."""
def __init__(self, csv_file, root_dir):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the wavs.
"""
self.landmarks_frame = pd.read_csv(csv_file, sep='|', header=None)
self.root_dir = root_dir
def load_wav(self, filename):
return librosa.load(filename, sr=hp.sr)
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
wav_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx, 0]) + '.wav'
mel, mag = get_spectrograms(wav_name)
np.save(wav_name[:-4] + '.pt', mel)
np.save(wav_name[:-4] + '.mag', mag)
sample = {'mel':mel, 'mag': mag}
return sample
if __name__ == '__main__':
dataset = PrepareDataset(os.path.join(hp.data_path,'metadata.csv'), os.path.join(hp.data_path,'wavs'))
dataloader = DataLoader(dataset, batch_size=1, drop_last=False, num_workers=8)
from tqdm import tqdm
pbar = tqdm(dataloader)
for d in pbar:
pass
|
py | b4131bf8155c5a6139e9ad2f02edcb4aaee44a0a | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
import tensorflow as tf
from tensorflow.python.training import moving_averages
from inception.slim import losses
from inception.slim import scopes
from inception.slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer,
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer,
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is not None:
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(conv, **batch_norm_params)
else:
bias_shape = [num_filters_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.op_scope([labels], scope, 'OneHotEncoding'):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.op_scope([inputs], scope, 'MaxPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.op_scope([inputs], scope, 'Dropout'):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.op_scope([inputs], scope, 'Flatten'):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
|
py | b4131ce02942d4554788c793fe6dcb0b3001bb65 | import random
def ordenamiento_por_insercion(lista):
for indice in range(1, len(lista)):
valor_actual = lista[indice]
posicion_actual = indice
while posicion_actual > 0 and lista[posicion_actual - 1] > valor_actual:
lista[posicion_actual] = lista[posicion_actual - 1]
posicion_actual -= 1
lista[posicion_actual] = valor_actual
|
py | b4131df5896184305487a2f4108994a899457e04 | import pandas as pd
from datetime import datetime
from django.core.management.base import BaseCommand
from gensim.corpora import Dictionary
from gensim.models import LdaModel
from archiv.models import Stelle
from topics.models import ModelingProcess, Topic
from topics.utils import top_to_topic_object
class Command(BaseCommand):
help = "Creates a document-topic-matrix"
def handle(self, *args, **kwargs):
ModelingProcess.objects.all().delete()
process_start = datetime.now()
params = {
"LdaModel_params": {
"num_topics": 10,
"chunksize": 2000,
"passes": 20,
"iterations": 100,
"eval_every": None,
"alpha": "auto",
"eta": "auto",
},
"dict_filter_params": {
"no_below": 20,
"no_above": 0.5
}
}
qs = Stelle.objects.filter(lemmata__isnull=False).filter(text__text_lang='lat')
print(f"Processing {qs.count()} out of {Stelle.objects.all().count()} passages")
df = pd.DataFrame(
[
{
'index': i,
'db_id': x.id,
'text': x.lemmata['tokens']
} for i, x in enumerate(qs)
]
)
docs = list(df['text'].values)
dictionary = Dictionary(docs)
dictionary.filter_extremes(**params["dict_filter_params"])
corpus = [dictionary.doc2bow(doc) for doc in docs]
print(f"Number of unique tokens: {len(dictionary)}")
print(f"Number of documents: {len(corpus)}")
dictionary[0]
id2word = dictionary.id2token
model = LdaModel(
corpus=corpus,
id2word=id2word,
**params["LdaModel_params"]
)
model_process = ModelingProcess.objects.create(
process_start=process_start,
process_end=datetime.now(),
param=params
)
for x in model.top_topics(corpus):
Topic.objects.create(
process=model_process,
**top_to_topic_object(x)
)
|
py | b4131eeb8660c90376b1c54be2a0766b06fe143c | from .generate_overland_flow_Bates import OverlandFlowBates
from .generate_overland_flow_deAlmeida import OverlandFlow
from .generate_overland_flow_implicit_kinwave import KinwaveImplicitOverlandFlow
from .generate_overland_flow_kinwave import KinwaveOverlandFlowModel
from .kinematic_wave_rengers import KinematicWaveRengers
from .linear_diffusion_overland_flow_router import LinearDiffusionOverlandFlowRouter
__all__ = [
"OverlandFlowBates",
"OverlandFlow",
"KinematicWaveRengers",
"KinwaveImplicitOverlandFlow",
"KinwaveOverlandFlowModel",
"LinearDiffusionOverlandFlowRouter",
]
|
py | b4131f4cedf8aa45d69528cb1e307cbf80c85d5d | from boa.interop.Neo.Runtime import CheckWitness
from boa.interop.Neo.Storage import Delete, Get, GetContext, Put
from contracts.util.serialize import *
IDX_KEY = 0
IDX_ADDRESS = 0
IDX_HASH = 1
OWNER = b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacI)'
ctx = GetContext()
def serialize_data(data):
identity = list(length=2)
identity[IDX_ADDRESS] = data[IDX_ADDRESS]
identity[IDX_HASH] = data[IDX_HASH]
return serialize_array(identity)
def create(data):
"""
Parameters
----------
data : boa.builtins.list
The input data containing an address and a hash, both in bytes
Returns
-------
bool
Returns False if the entry does exist already, otherwise True
"""
if Get(ctx, data[IDX_KEY]):
print('Error: Entry already exists!')
return False
data_serialized = serialize_data(data)
Put(ctx, data[IDX_KEY], data_serialized)
return True
def retrieve(data):
saved = Get(ctx, data[IDX_KEY])
return deserialize_bytearray(saved)
def update(data):
if Get(ctx, data[IDX_KEY]) is None:
print('Error: Entry does not exist!')
return False
data_serialized = serialize_data(data)
Put(ctx, data[IDX_KEY], data_serialized)
return True
def delete(data):
if not Get(ctx, data[IDX_KEY]):
print('Error: Entry does not exist!')
return False
Delete(ctx, data[IDX_KEY])
return True
def verify(data):
saved_serialized = Get(ctx, data[IDX_KEY])
saved_deserialized = deserialize_bytearray(saved_serialized)
saved = saved_deserialized[IDX_HASH]
check = data[IDX_HASH]
return saved == check
def Main(operation, data):
"""
Parameters
----------
operation : str
The operation to execute with the contract call
data : boa.builtins.list
A list containing an address and a hash.
Address: Of the student to whom the identity belongs
Hash: The hash of the identity belonging the the student
Returns
-------
bool
Returns a result of an operation as a boolean
ByteArray
Returns a ByteArray of ByteArrays when data is retrieved
"""
if operation == "Verify":
return verify(data)
if operation == "Create":
return create(data)
# Only the Owner can execute methods hereafter
if not CheckWitness(OWNER):
print('You are not the owner of this Contract!')
return False
if operation == "Retrieve":
return retrieve(data)
if operation == "Update":
return update(data)
if operation == "Delete":
return delete(data)
print('Operation does not exist')
return False
|
py | b4131f64f2c01006bb0b5db21bec0eaec2228104 | from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class ToolBar(QToolBar):
def __init__(self, title):
super(ToolBar, self).__init__(title)
self.layout().setSpacing(0)
self.layout().setContentsMargins(0, 0, 0, 0)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def addAction(self, action):
if isinstance(action, QWidgetAction):
return super(ToolBar, self).addAction(action)
button = QToolButton()
button.setDefaultAction(action)
button.setToolButtonStyle(self.toolButtonStyle())
self.addWidget(button)
# center align
for i in range(self.layout().count()):
if isinstance(self.layout().itemAt(i).widget(), QToolButton):
self.layout().itemAt(i).setAlignment(Qt.AlignCenter)
|
py | b41321097adff1d736f5af774228929cd330428c | from django.contrib import admin
from activitystream.models import ActivityStream
class ActivityStreamAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['created', 'data']})
]
list_display = ('id', 'created', 'data')
readonly_fields = ['id', 'created']
admin.site.register(ActivityStream, ActivityStreamAdmin)
|
py | b41323018bd2cedda9c9c0ebbd95f39dfd9b14b3 |
class Room(object):
def __init__(self, name, description):
self.name = name
self.description = description
self.paths = {}
def go(self, direction):
return self.paths.get(direction, None)
def add_paths(self, paths):
self.paths.update(paths)
|
py | b41324a18fdccddc67ee007e236b9b5883876f83 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('webagg')
import numpy as np
from scipy.special import binom
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
class BezierBuilder(object):
"""Bézier curve interactive builder.
"""
def __init__(self, control_polygon, ax_bernstein):
"""Constructor.
Receives the initial control polygon of the curve.
"""
self.control_polygon = control_polygon
self.xp = list(control_polygon.get_xdata())
self.yp = list(control_polygon.get_ydata())
self.canvas = control_polygon.figure.canvas
self.ax_main = control_polygon.get_axes()
self.ax_bernstein = ax_bernstein
# Event handler for mouse clicking
self.cid = self.canvas.mpl_connect('button_press_event', self)
# Create Bézier curve
line_bezier = Line2D([], [],
c=control_polygon.get_markeredgecolor())
self.bezier_curve = self.ax_main.add_line(line_bezier)
def __call__(self, event):
# Ignore clicks outside axes
if event.inaxes != self.control_polygon.axes:
return
# Add point
self.xp.append(event.xdata)
self.yp.append(event.ydata)
self.control_polygon.set_data(self.xp, self.yp)
# Rebuild Bézier curve and update canvas
self.bezier_curve.set_data(*self._build_bezier())
self._update_bernstein()
self._update_bezier()
def _build_bezier(self):
x, y = Bezier(list(zip(self.xp, self.yp))).T
return x, y
def _update_bezier(self):
self.canvas.draw()
def _update_bernstein(self):
N = len(self.xp) - 1
t = np.linspace(0, 1, num=200)
ax = self.ax_bernstein
ax.clear()
for kk in range(N + 1):
ax.plot(t, Bernstein(N, kk)(t))
ax.set_title("Bernstein basis, N = {}".format(N))
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
def Bernstein(n, k):
"""Bernstein polynomial.
"""
coeff = binom(n, k)
def _bpoly(x):
return coeff * x ** k * (1 - x) ** (n - k)
return _bpoly
def Bezier(points, num=200):
"""Build Bézier curve from points.
"""
N = len(points)
t = np.linspace(0, 1, num=num)
curve = np.zeros((num, 2))
for ii in range(N):
curve += np.outer(Bernstein(N - 1, ii)(t), points[ii])
return curve
if __name__ == '__main__':
# Initial setup
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
# Empty line
line = Line2D([], [], ls='--', c='#666666',
marker='x', mew=2, mec='#204a87')
ax1.add_line(line)
# Canvas limits
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
ax1.set_title("Bezier curve")
# Bernstein plot
ax2.set_title("Bernstein basis")
# Create BezierBuilder
bezier_builder = BezierBuilder(line, ax2)
plt.show()
|
py | b41325b880510f3dbb15d21d16b8eb5de1ecc0a3 | import math
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import build_norm_layer
from mmcv.cnn.bricks.drop import build_dropout
from mmcv.cnn.bricks.transformer import FFN
from mmcv.cnn.utils.weight_init import (constant_init, normal_init,
trunc_normal_init)
from mmcv.runner import BaseModule, ModuleList
from torch.nn.modules.batchnorm import _BatchNorm
from mmseg.models.backbones.mit import EfficientMultiheadAttention
from mmseg.models.builder import BACKBONES
from ..utils.embed import PatchEmbed
class GlobalSubsampledAttention(EfficientMultiheadAttention):
"""Global Sub-sampled Attention (Spatial Reduction Attention)
This module is modified from EfficientMultiheadAttention,
which is a module from mmseg.models.backbones.mit.py.
Specifically, there is no difference between
`GlobalSubsampledAttention` and `EfficientMultiheadAttention`,
`GlobalSubsampledAttention` is built as a brand new class
because it is renamed as `Global sub-sampled attention (GSA)`
in paper.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads.
attn_drop (float): A Dropout layer on attn_output_weights.
Default: 0.0.
proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.
Default: 0.0.
dropout_layer (obj:`ConfigDict`): The dropout_layer used
when adding the shortcut. Default: None.
batch_first (bool): Key, Query and Value are shape of
(batch, n, embed_dims)
or (n, batch, embed_dims). Default: False.
qkv_bias (bool): enable bias for qkv if True. Default: True.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (int): The ratio of spatial reduction of GSA of PCPVT.
Default: 1.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
num_heads,
attn_drop=0.,
proj_drop=0.,
dropout_layer=None,
batch_first=True,
qkv_bias=True,
norm_cfg=dict(type='LN'),
sr_ratio=1,
init_cfg=None):
super(GlobalSubsampledAttention, self).__init__(
embed_dims,
num_heads,
attn_drop=attn_drop,
proj_drop=proj_drop,
dropout_layer=dropout_layer,
batch_first=batch_first,
qkv_bias=qkv_bias,
norm_cfg=norm_cfg,
sr_ratio=sr_ratio,
init_cfg=init_cfg)
class GSAEncoderLayer(BaseModule):
"""Implements one encoder layer with GSA.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float): The drop out rate for attention layer.
Default: 0.0.
drop_path_rate (float): Stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): Enable bias for qkv if True. Default: True
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
sr_ratio (float): Kernel_size of conv in Attention modules. Default: 1.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
sr_ratio=1.,
init_cfg=None):
super(GSAEncoderLayer, self).__init__(init_cfg=init_cfg)
self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1]
self.attn = GlobalSubsampledAttention(
embed_dims=embed_dims,
num_heads=num_heads,
attn_drop=attn_drop_rate,
proj_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
qkv_bias=qkv_bias,
norm_cfg=norm_cfg,
sr_ratio=sr_ratio)
self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1]
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg,
add_identity=False)
self.drop_path = build_dropout(
dict(type='DropPath', drop_prob=drop_path_rate)
) if drop_path_rate > 0. else nn.Identity()
def forward(self, x, hw_shape):
x = x + self.drop_path(self.attn(self.norm1(x), hw_shape, identity=0.))
x = x + self.drop_path(self.ffn(self.norm2(x)))
return x
class LocallyGroupedSelfAttention(BaseModule):
"""Locally-grouped Self Attention (LSA) module.
Args:
embed_dims (int): Number of input channels.
num_heads (int): Number of attention heads. Default: 8
qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.
Default: False.
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Default: 0.0
proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.
window_size(int): Window size of LSA. Default: 1.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop_rate=0.,
proj_drop_rate=0.,
window_size=1,
init_cfg=None):
super(LocallyGroupedSelfAttention, self).__init__(init_cfg=init_cfg)
assert embed_dims % num_heads == 0, f'dim {embed_dims} should be ' \
f'divided by num_heads ' \
f'{num_heads}.'
self.embed_dims = embed_dims
self.num_heads = num_heads
head_dim = embed_dims // num_heads
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop_rate)
self.proj = nn.Linear(embed_dims, embed_dims)
self.proj_drop = nn.Dropout(proj_drop_rate)
self.window_size = window_size
def forward(self, x, hw_shape):
b, n, c = x.shape
h, w = hw_shape
x = x.view(b, h, w, c)
# pad feature maps to multiples of Local-groups
pad_l = pad_t = 0
pad_r = (self.window_size - w % self.window_size) % self.window_size
pad_b = (self.window_size - h % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
# calculate attention mask for LSA
Hp, Wp = x.shape[1:-1]
_h, _w = Hp // self.window_size, Wp // self.window_size
mask = torch.zeros((1, Hp, Wp), device=x.device)
mask[:, -pad_b:, :].fill_(1)
mask[:, :, -pad_r:].fill_(1)
# [B, _h, _w, window_size, window_size, C]
x = x.reshape(b, _h, self.window_size, _w, self.window_size,
c).transpose(2, 3)
mask = mask.reshape(1, _h, self.window_size, _w,
self.window_size).transpose(2, 3).reshape(
1, _h * _w,
self.window_size * self.window_size)
# [1, _h*_w, window_size*window_size, window_size*window_size]
attn_mask = mask.unsqueeze(2) - mask.unsqueeze(3)
attn_mask = attn_mask.masked_fill(attn_mask != 0,
float(-1000.0)).masked_fill(
attn_mask == 0, float(0.0))
# [3, B, _w*_h, nhead, window_size*window_size, dim]
qkv = self.qkv(x).reshape(b, _h * _w,
self.window_size * self.window_size, 3,
self.num_heads, c // self.num_heads).permute(
3, 0, 1, 4, 2, 5)
q, k, v = qkv[0], qkv[1], qkv[2]
# [B, _h*_w, n_head, window_size*window_size, window_size*window_size]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn + attn_mask.unsqueeze(2)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
attn = (attn @ v).transpose(2, 3).reshape(b, _h, _w, self.window_size,
self.window_size, c)
x = attn.transpose(2, 3).reshape(b, _h * self.window_size,
_w * self.window_size, c)
if pad_r > 0 or pad_b > 0:
x = x[:, :h, :w, :].contiguous()
x = x.reshape(b, n, c)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LSAEncoderLayer(BaseModule):
"""Implements one encoder layer in Twins-SVT.
Args:
embed_dims (int): The feature dimension.
num_heads (int): Parallel attention heads.
feedforward_channels (int): The hidden dimension for FFNs.
drop_rate (float): Probability of an element to be zeroed
after the feed forward layer. Default: 0.0.
attn_drop_rate (float, optional): Dropout ratio of attention weight.
Default: 0.0
drop_path_rate (float): Stochastic depth rate. Default 0.0.
num_fcs (int): The number of fully-connected layers for FFNs.
Default: 2.
qkv_bias (bool): Enable bias for qkv if True. Default: True
qk_scale (float | None, optional): Override default qk scale of
head_dim ** -0.5 if set. Default: None.
act_cfg (dict): The activation config for FFNs.
Default: dict(type='GELU').
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN').
window_size (int): Window size of LSA. Default: 1.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
embed_dims,
num_heads,
feedforward_channels,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
num_fcs=2,
qkv_bias=True,
qk_scale=None,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
window_size=1,
init_cfg=None):
super(LSAEncoderLayer, self).__init__(init_cfg=init_cfg)
self.norm1 = build_norm_layer(norm_cfg, embed_dims, postfix=1)[1]
self.attn = LocallyGroupedSelfAttention(embed_dims, num_heads,
qkv_bias, qk_scale,
attn_drop_rate, drop_rate,
window_size)
self.norm2 = build_norm_layer(norm_cfg, embed_dims, postfix=2)[1]
self.ffn = FFN(
embed_dims=embed_dims,
feedforward_channels=feedforward_channels,
num_fcs=num_fcs,
ffn_drop=drop_rate,
dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),
act_cfg=act_cfg,
add_identity=False)
self.drop_path = build_dropout(
dict(type='DropPath', drop_prob=drop_path_rate)
) if drop_path_rate > 0. else nn.Identity()
def forward(self, x, hw_shape):
x = x + self.drop_path(self.attn(self.norm1(x), hw_shape))
x = x + self.drop_path(self.ffn(self.norm2(x)))
return x
class ConditionalPositionEncoding(BaseModule):
"""The Conditional Position Encoding (CPE) module.
The CPE is the implementation of 'Conditional Positional Encodings
for Vision Transformers <https://arxiv.org/abs/2102.10882>'_.
Args:
in_channels (int): Number of input channels.
embed_dims (int): The feature dimension. Default: 768.
stride (int): Stride of conv layer. Default: 1.
"""
def __init__(self, in_channels, embed_dims=768, stride=1, init_cfg=None):
super(ConditionalPositionEncoding, self).__init__(init_cfg=init_cfg)
self.proj = nn.Conv2d(
in_channels,
embed_dims,
kernel_size=3,
stride=stride,
padding=1,
bias=True,
groups=embed_dims)
self.stride = stride
def forward(self, x, hw_shape):
b, n, c = x.shape
h, w = hw_shape
feat_token = x
cnn_feat = feat_token.transpose(1, 2).view(b, c, h, w)
if self.stride == 1:
x = self.proj(cnn_feat) + cnn_feat
else:
x = self.proj(cnn_feat)
x = x.flatten(2).transpose(1, 2)
return x
@BACKBONES.register_module()
class PCPVT(BaseModule):
"""The backbone of Twins-PCPVT.
This backbone is the implementation of `Twins: Revisiting the Design
of Spatial Attention in Vision Transformers
<https://arxiv.org/abs/1512.03385>`_.
Args:
in_channels (int): Number of input channels. Default: 3.
embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512].
patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2].
strides (list): The strides. Default: [4, 2, 2, 2].
num_heads (int): Number of attention heads. Default: [1, 2, 4, 8].
mlp_ratios (int): Ratio of mlp hidden dim to embedding dim.
Default: [4, 4, 4, 4].
out_indices (tuple[int]): Output from which stages.
Default: (0, 1, 2, 3).
qkv_bias (bool): Enable bias for qkv if True. Default: False.
drop_rate (float): Probability of an element to be zeroed.
Default 0.
attn_drop_rate (float): The drop out rate for attention layer.
Default 0.0
drop_path_rate (float): Stochastic depth rate. Default 0.0
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
depths (list): Depths of each stage. Default [3, 4, 6, 3]
sr_ratios (list): Kernel_size of conv in each Attn module in
Transformer encoder layer. Default: [8, 4, 2, 1].
norm_after_stage(bool): Add extra norm. Default False.
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
in_channels=3,
embed_dims=[64, 128, 256, 512],
patch_sizes=[4, 2, 2, 2],
strides=[4, 2, 2, 2],
num_heads=[1, 2, 4, 8],
mlp_ratios=[4, 4, 4, 4],
out_indices=(0, 1, 2, 3),
qkv_bias=False,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
norm_cfg=dict(type='LN'),
depths=[3, 4, 6, 3],
sr_ratios=[8, 4, 2, 1],
norm_after_stage=False,
pretrained=None,
init_cfg=None):
super(PCPVT, self).__init__(init_cfg=init_cfg)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be set at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is not None:
raise TypeError('pretrained must be a str or None')
self.depths = depths
# patch_embed
self.patch_embeds = ModuleList()
self.position_encoding_drops = ModuleList()
self.layers = ModuleList()
for i in range(len(depths)):
self.patch_embeds.append(
PatchEmbed(
in_channels=in_channels if i == 0 else embed_dims[i - 1],
embed_dims=embed_dims[i],
conv_type='Conv2d',
kernel_size=patch_sizes[i],
stride=strides[i],
padding='corner',
norm_cfg=norm_cfg))
self.position_encoding_drops.append(nn.Dropout(p=drop_rate))
self.position_encodings = ModuleList([
ConditionalPositionEncoding(embed_dim, embed_dim)
for embed_dim in embed_dims
])
# transformer encoder
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
cur = 0
for k in range(len(depths)):
_block = ModuleList([
GSAEncoderLayer(
embed_dims=embed_dims[k],
num_heads=num_heads[k],
feedforward_channels=mlp_ratios[k] * embed_dims[k],
attn_drop_rate=attn_drop_rate,
drop_rate=drop_rate,
drop_path_rate=dpr[cur + i],
num_fcs=2,
qkv_bias=qkv_bias,
act_cfg=dict(type='GELU'),
norm_cfg=dict(type='LN'),
sr_ratio=sr_ratios[k]) for i in range(depths[k])
])
self.layers.append(_block)
cur += depths[k]
self.norm_name, norm = build_norm_layer(
norm_cfg, embed_dims[-1], postfix=1)
self.out_indices = out_indices
self.norm_after_stage = norm_after_stage
if self.norm_after_stage:
self.norm_list = ModuleList()
for dim in embed_dims:
self.norm_list.append(build_norm_layer(norm_cfg, dim)[1])
def init_weights(self):
if self.init_cfg is not None:
super(PCPVT, self).init_weights()
else:
for m in self.modules():
if isinstance(m, nn.Linear):
trunc_normal_init(m, std=.02, bias=0.)
elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):
constant_init(m, val=1.0, bias=0.)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[
1] * m.out_channels
fan_out //= m.groups
normal_init(
m, mean=0, std=math.sqrt(2.0 / fan_out), bias=0)
def forward(self, x):
outputs = list()
b = x.shape[0]
for i in range(len(self.depths)):
x, hw_shape = self.patch_embeds[i](x)
h, w = hw_shape
x = self.position_encoding_drops[i](x)
for j, blk in enumerate(self.layers[i]):
x = blk(x, hw_shape)
if j == 0:
x = self.position_encodings[i](x, hw_shape)
if self.norm_after_stage:
x = self.norm_list[i](x)
x = x.reshape(b, h, w, -1).permute(0, 3, 1, 2).contiguous()
if i in self.out_indices:
outputs.append(x)
return tuple(outputs)
@BACKBONES.register_module()
class SVT(PCPVT):
"""The backbone of Twins-SVT.
This backbone is the implementation of `Twins: Revisiting the Design
of Spatial Attention in Vision Transformers
<https://arxiv.org/abs/1512.03385>`_.
Args:
in_channels (int): Number of input channels. Default: 3.
embed_dims (list): Embedding dimension. Default: [64, 128, 256, 512].
patch_sizes (list): The patch sizes. Default: [4, 2, 2, 2].
strides (list): The strides. Default: [4, 2, 2, 2].
num_heads (int): Number of attention heads. Default: [1, 2, 4].
mlp_ratios (int): Ratio of mlp hidden dim to embedding dim.
Default: [4, 4, 4].
out_indices (tuple[int]): Output from which stages.
Default: (0, 1, 2, 3).
qkv_bias (bool): Enable bias for qkv if True. Default: False.
drop_rate (float): Dropout rate. Default 0.
attn_drop_rate (float): Dropout ratio of attention weight.
Default 0.0
drop_path_rate (float): Stochastic depth rate. Default 0.2.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='LN')
depths (list): Depths of each stage. Default [4, 4, 4].
sr_ratios (list): Kernel_size of conv in each Attn module in
Transformer encoder layer. Default: [4, 2, 1].
windiow_sizes (list): Window size of LSA. Default: [7, 7, 7],
input_features_slice(bool): Input features need slice. Default: False.
norm_after_stage(bool): Add extra norm. Default False.
strides (list): Strides in patch-Embedding modules. Default: (2, 2, 2)
init_cfg (dict, optional): The Config for initialization.
Defaults to None.
"""
def __init__(self,
in_channels=3,
embed_dims=[64, 128, 256],
patch_sizes=[4, 2, 2, 2],
strides=[4, 2, 2, 2],
num_heads=[1, 2, 4],
mlp_ratios=[4, 4, 4],
out_indices=(0, 1, 2, 3),
qkv_bias=False,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.2,
norm_cfg=dict(type='LN'),
depths=[4, 4, 4],
sr_ratios=[4, 2, 1],
windiow_sizes=[7, 7, 7],
norm_after_stage=True,
pretrained=None,
init_cfg=None):
super(SVT, self).__init__(in_channels, embed_dims, patch_sizes,
strides, num_heads, mlp_ratios, out_indices,
qkv_bias, drop_rate, attn_drop_rate,
drop_path_rate, norm_cfg, depths, sr_ratios,
norm_after_stage, pretrained, init_cfg)
# transformer encoder
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
for k in range(len(depths)):
for i in range(depths[k]):
if i % 2 == 0:
self.layers[k][i] = \
LSAEncoderLayer(
embed_dims=embed_dims[k],
num_heads=num_heads[k],
feedforward_channels=mlp_ratios[k] * embed_dims[k],
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=dpr[sum(depths[:k])+i],
qkv_bias=qkv_bias,
window_size=windiow_sizes[k])
|
py | b4132750ac0e23fa53808f4a9388fa49df1e6ad8 | #!/usr/bin/python
class GrabzItWaterMark:
def __init__(self, identifier, xPosition, yPosition, format):
self.Identifier = identifier
self.XPosition = xPosition
self.YPosition = yPosition
self.Format = format |
py | b41327b5b177e500d0d49ddbdc7f02215c254744 | #!/usr/bin/python3
# Created by Adam Smith on 20211003
# Imports
import random
# classes
class Decks:
"""This class is for shuffling between 1 to 6 decks of cards
the primary set will be a deck of cards. Up to 5 more
decks can be added with the add_decks function."""
def __init__(self):
self.deck = []
self.last_round = False
self.diamond_suit = ['AD', '2D', '3D', '4D', '5D', '6D', '7D', '8D', '9D', '10D', 'JD', 'QD', 'KD']
self.spade_suit = ['AS', '2S', '3S', '4S', '5S', '6S', '7S', '8S', '9S', '10S', 'JS', 'QS', 'KS']
self.heart_suit = ['AH', '2H', '3H', '4H', '5H', '6H', '7H', '8H', '9H', '10H', 'JH', 'QH', 'KH']
self.club_suit = ['AC', '2C', '3C', '4C', '5C', '6C', '7C', '8C', '9C', '10C', 'JC', 'QC', 'KC']
def shoe_build(self, num_deck=1):
self.deck = (self.diamond_suit + self.spade_suit + self.heart_suit + self.club_suit) * num_deck
def deck_shuffle(self):
temp_deck = []
while len(self.deck) > 0:
rand_num = random.randint(0, len(self.deck) - 1)
temp_deck.append(self.deck[rand_num])
self.deck.pop(rand_num)
self.deck = temp_deck
self.deck.insert(int(len(self.deck) / 52) * -15, 'r')
def show_deck(self):
print(self.deck)
def next_card(self):
if self.deck[0] != 'r':
next_cd = self.deck[0]
self.deck.pop(0)
else:
next_cd = self.deck[1]
self.deck.pop(0)
self.deck.pop(0)
self.last_round = True
return next_cd
|
py | b41327b6d79c8138998b0068d5ddfa0bc399815b | # Generated by Django 3.0 on 2019-12-15 22:07
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import koocook_core.models.base
import koocook_core.models.review
import koocook_core.support.markdown
import koocook_core.support.quantity
class Migration(migrations.Migration):
initial = True
dependencies = [
('koocook_auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AggregateRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating_value', models.DecimalField(decimal_places=10, max_digits=13)),
('rating_count', models.IntegerField()),
('best_rating', models.IntegerField(default=5)),
('worst_rating', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='koocook_auth.KoocookUser')),
],
bases=(koocook_core.models.base.SerialisableModel, models.Model),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_published', models.DateTimeField(auto_now_add=True)),
('body', koocook_core.support.markdown.FormattedField()),
('aggregate_rating', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='koocook_core.AggregateRating')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='koocook_core.Author')),
('reviewed_comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Comment')),
],
bases=(koocook_core.models.review.ReviewerModel, koocook_core.models.base.SerialisableModel, koocook_core.models.review.ReviewableModel, models.Model),
),
migrations.CreateModel(
name='MetaIngredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('nutrient', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('description', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_published', models.DateTimeField(auto_now_add=True)),
('body', koocook_core.support.markdown.FormattedField()),
('aggregate_rating', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.PROTECT, to='koocook_core.AggregateRating')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='koocook_core.Author')),
],
bases=(koocook_core.models.base.SerialisableModel, koocook_core.models.review.ReviewableModel, models.Model),
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('image', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=200), blank=True, null=True, size=None)),
('video', models.URLField(blank=True, null=True)),
('date_published', models.DateTimeField(auto_now_add=True, null=True)),
('description', models.TextField()),
('prep_time', models.DurationField(null=True)),
('cook_time', models.DurationField(null=True)),
('recipe_instructions', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('recipe_yield', koocook_core.support.quantity.QuantityField(null=True)),
('aggregate_rating', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.PROTECT, to='koocook_core.AggregateRating')),
('author', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='koocook_core.Author')),
],
bases=(koocook_core.models.review.ReviewableModel, models.Model),
),
migrations.CreateModel(
name='RecipeEquipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
],
bases=(koocook_core.models.base.SerialisableModel, models.Model),
),
migrations.CreateModel(
name='TagLabel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('level', models.IntegerField(default=1)),
],
bases=(koocook_core.models.base.SerialisableModel, models.Model),
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('label', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='koocook_core.TagLabel')),
],
options={
'unique_together': {('name', 'label')},
},
bases=(koocook_core.models.base.SerialisableModel, models.Model),
),
migrations.CreateModel(
name='RecipeIngredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', koocook_core.support.quantity.QuantityField()),
('description', models.CharField(blank=True, max_length=255, null=True)),
('meta', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='koocook_core.MetaIngredient')),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Recipe')),
('substitute_set', models.ManyToManyField(blank=True, related_name='_recipeingredient_substitute_set_+', to='koocook_core.RecipeIngredient')),
],
),
migrations.AddField(
model_name='recipe',
name='equipment_set',
field=models.ManyToManyField(blank=True, to='koocook_core.RecipeEquipment'),
),
migrations.AddField(
model_name='recipe',
name='tag_set',
field=models.ManyToManyField(blank=True, to='koocook_core.Tag'),
),
migrations.CreateModel(
name='Rating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rating_value', models.IntegerField()),
('best_rating', models.IntegerField(default=5)),
('worst_rating', models.IntegerField(default=1)),
('used', models.BooleanField(blank=True, default=False)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='koocook_core.Author')),
('reviewed_comment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Comment')),
('reviewed_post', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Post')),
('reviewed_recipe', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Recipe')),
],
bases=(koocook_core.models.review.ReviewerModel, models.Model),
),
migrations.AddField(
model_name='comment',
name='reviewed_post',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Post'),
),
migrations.AddField(
model_name='comment',
name='reviewed_recipe',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Recipe'),
),
migrations.CreateModel(
name='RecipeVisit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_address', models.CharField(max_length=45)),
('date_first_visited', models.DateTimeField(auto_now_add=True)),
('date_last_visited', models.DateTimeField(auto_now=True)),
('recipe', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='koocook_core.Recipe')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='koocook_auth.KoocookUser')),
],
options={
'verbose_name': 'Recipe visit count',
'db_table': 'koocook_core_recipe_visit',
'unique_together': {('ip_address', 'user', 'recipe'), ('user', 'recipe')},
},
),
]
|
py | b41327f58cf4ddbee338ed2bea2c29649f34c00d | """
This is for running the PRNTR's complete operation:
What to do
Place .stl file in the code folder
Check path
Check the correct camera operation is being used, if using computer use camera.py, if using py use os.systemm(...)
Check camera.py if using computer --> if the cv2.videocapture is 0 then its builtin webcam, if its 1 then its the connected camera
Check range values for number of layers --> change in bottomrange & toprange variables below
Check that 'PRNTR/files/Layer/ideal/image_name.jpg' exists
Check that 'PRNTR/files/Layer/diffs/diff.jpg' exists
Run PRNTR.py
Enter the STL filename
Answer file deletion question
"""
import time
import os
import shutil
import os.path
import LayerNumber
start = time.time()
path = str('/Users/BobTheBlockchainBuilder/Desktop/PRNTR') #change path according to device
location = path
#Range values for the layers in the print used in: LayerSplitter.py & Image_maker.py
bottomrange = int(0)
bottom_range = bottomrange
toprange = int(101)
top_range = toprange
#currentlayernumber = LayerNumber.numberiton #not working properly
currentlayernumber = 4
def main():
location = path
#Copies g code file from code file into files file and change name to operation.gcode so its easier to streamline the rest of the code
stlname = str(input('Name of STL file: ')) #STL file has to be stored in code file
os.system( 'mandoline -o {}.gcode -n {}.stl'.format(stlname, stlname) ) #this runs command in terminal to begin the slicer for the stl file with the name inputed and save the g-code of that file
src = '{}/code/{}.gcode'.format(path, stlname)
dst = '{}/files/operation.gcode'.format(path)
shutil.copyfile(src, dst) #Copys the gcode file into the files file and renames it to operation.gcode
if os.path.exists('{}/files/operation.gcode'.format(location)):
#Telling Layer Splitter operation to activate
os.system('python LayerSplitter.py')
#operation to trigger the skin maker
if os.path.exists("{}/files/Layer/Layer0.gcode".format(location)):
os.system('python skin.py')
# Operation to produce image files from the g code, to be used as an 'ideal image'
if os.path.exists('{}/files/Layer/Layer0.gcode'.format(location)):
os.system('python Image_maker.py')
else:
print('Error in splitted layer files or Image_maker')
else:
print('There is an error in the LayerSplitter or skin code')
else:
print('There is an error with the operation.gcode file')
def CCO(): # CCO - Camera & Comparison Operation
"""
#CAMERA
# using python find a way to see what layer is being printed at that specific moment in time
#Layer_number = (layer being printed, which will be read from the printers 'website')
if(Layer_number%5==0):
os.system('python camera.py')
else:
print('Not capturing yet')
#add code to check head is clear and camera can capture correctly
"""
# -------- Camera Operation --------
# Camera operation; use camera.py for computer & fswebcam for Raspberry Pi
os.system('python camera.py') #Use with Mac/Windows
#os.system('fswebcam -r 1920x1080 --no-banner {}/files/capture.jpg'.format(location)) #Use with raspberry pi
# -------- Camera Operation --------
# Operation to verify that the file has been captured
if os.path.exists('{}/files/capture.jpg'.format(location)):
# Operation to run Image_manipulation.py
os.system('python Image_manipulation.py')
# Operation to verify Image_manipulation.py has occured
if os.path.exists('{}/files/new_test_resize.jpg'.format(location)):
# Operation to run edge.py
os.system('python edge.py')
# Operation to verify that edge.py has been run
if os.path.exists('{}/files/Edgey.jpg'.format(location)):
print('Edgey exists!')
# Operation to run image comparison (for the layer file set it so the image with 'Layer{}' file name is the one used for comparison)
os.system('python ImageComparison2.py')
# *show comparison*
else:
print('There is an error in the ImageComparison file')
else:
print('There is an error in the edge file, or the file names')
else:
print("There is an error in the Image_manipulation file")
def delete():
# Operation to delete files
finish = input('The printing complete, do you want to delete the files? Yes or No --> ')
if finish == "yes" or finish == "Yes" or finish == "y" or finish == "Y" or finish == "YES":
print("The files will be deleted")
os.system('python delete.py')
elif finish == 'no' or finish == "NO" or finish == "No" or finish == "N" or finish == "n":
print('Not Deleting')
else:
print("Could no understand answer")
end = time.time()
print('Time to run PRNTR code:', end - start, "seconds")
print("")
# FLO needs to be fixed
# Send 5 Layers to printer then CCO
def FLO(): # FLO - five layer operation
num = currentlayernumber
div = int(5)
if num%div == 0:
CCO()
elif num%toprange == 1: #doesnt work very well. needs to be redone because when using multiples of 5 it doesnt recgnise that it could also be the last layer
print('Operation complete')
delete()
else:
print("We have not reached that layer number yet!")
#FLO() #if this is turned on. remove comand below "CCO()" | FLO causes the camera operation to occur when it is on a layer which is a multiple of 5
CCO()
delete()
if __name__ == '__main__':
main()
print("😃")
print('')
#else: # this is used to main commands that will run when importing the file into another
# print('---')
|
py | b413285cad9fe824824041a54fc68c638c79f553 | from typing import Optional, List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def maxPathSum(self, root: Optional[TreeNode]) -> int:
max_value = [float('-inf')]
self.maxPathDown(root, max_value)
return int(max_value[0])
def maxPathDown(self, node: Optional[TreeNode], max_value: List[float]):
if not node:
return 0
left = max(0, self.maxPathDown(node.left, max_value))
right = max(0, self.maxPathDown(node.right, max_value))
max_value[0] = max(max_value[0], left + right + node.val)
return max(left, right) + node.val
|
py | b41329ac1d4f4628c50fc8d9c7922779c2dcfaed | """
The old version of this script is here:
https://github.com/mrdrozdov/spinn/blob/2a06fc489faf44796dcb88a57afd699ba9937cfa/scripts/analyze_trees.py
Usage:
$ mkdir example_trees
$ python scripts/analyze_report_trees.py \
--data_path ./snli_1.0/snli_1.0_dev.jsonl \
--report_path ./checkpoints/example-snli.report \
--out_path ./example_trees
"""
from __future__ import print_function
import gflags
import numpy as np
import pandas as pd
import sys
import pydot
import argparse
from tqdm import tqdm
from spinn.data.listops import load_listops_data
FLAGS = gflags.FLAGS
def create_tree_ascii(words, transitions):
buf = list(reversed(words))
stack = []
for i, t in enumerate(transitions):
if t == 0:
stack.append([buf.pop()])
elif t == 1:
r = stack.pop()
l = stack.pop()
stack.append(['('] + l + r + [')'])
return stack[-1]
def newline(depth):
print('\n' + ' '*depth, end='')
def ascii_tree_prettyprint(tree):
print()
ind = 0
for i, sym in enumerate(tree):
if sym == '(':
if tree[i - 1] != '(':
print(' ', end='')
print(sym, end='')
ind += 1
elif sym == ')':
print(sym, end='')
ind -= 1
newline(ind)
else:
print(sym, end='')
if tree[i + 1] != ')':
newline(ind)
def print_tree(words, transitions):
ascii_tree_prettyprint(create_tree_ascii(words, transitions))
def read_report(fn):
return pd.read_csv(fn, delimiter=' ', names=['example_id', 'correct', 'truth', 'pred', '', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'p', 'sent1_parse'])
def build_data_dict(data):
table = dict()
for ex in data:
table[str(ex['example_id'])] = ex
return table
def run():
print("Loading data...")
data, _ = load_listops_data.load_data(FLAGS.data_path)
print("Building data table...")
data_table = build_data_dict(data)
print("Loading report...")
report = read_report(FLAGS.report_path)
report = report.to_records()
for rec in tqdm(report):
if str(rec.example_id) not in data_table:
continue
ex = data_table[str(rec.example_id)]
transitions1 = [int(t) for t in rec.sent1_parse]
tokens1 = ex['tokens']
# Writes tree to file.
graph1 = print_tree(tokens1, transitions1)
if __name__ == '__main__':
gflags.DEFINE_string("report_path", "./checkpoints/example.report", "")
gflags.DEFINE_string("data_path", "./spinn/data/listops/test_d20a.tsv", "")
gflags.DEFINE_string("out_path", "./example_trees", "")
FLAGS(sys.argv)
run()
|
py | b4132a99a5390c8d94bebb2302b2bd409105030e | """
datadict.py :
Data classes we use throughout the plottr package, and tools to work on them.
"""
import warnings
import copy as cp
import numpy as np
from functools import reduce
from typing import List, Tuple, Dict, Sequence, Union, Any, Iterator, Optional, TypeVar
from plottr.utils import num, misc
__author__ = 'Wolfgang Pfaff'
__license__ = 'MIT'
# TODO: functionality that returns axes values given a set of slices.
# TODO: an easier way to access data and meta values.
# maybe with getattr/setattr?
# TODO: direct slicing of full datasets. implement getitem/setitem?
# TODO: feature to compare if datadicts are equal not fully tested yet.
def is_meta_key(key: str) -> bool:
if key[:2] == '__' and key[-2:] == '__':
return True
else:
return False
def meta_key_to_name(key: str) -> str:
if is_meta_key(key):
return key[2:-2]
else:
raise ValueError(f'{key} is not a meta key.')
def meta_name_to_key(name: str) -> str:
return '__' + name + '__'
T = TypeVar('T', bound='DataDictBase')
class DataDictBase(dict):
"""
Simple data storage class that is based on a regular dictionary.
This base class does not make assumptions about the structure of the
values. This is implemented in inheriting classes.
"""
def __init__(self, **kw: Any):
super().__init__(self, **kw)
def __eq__(self, other: object) -> bool:
"""Check for content equality of two datadicts."""
if not isinstance(other, DataDictBase):
return NotImplemented
if not self.same_structure(self, other):
# print('structure')
return False
for k, v in self.meta_items():
if k not in [kk for kk, vv in other.meta_items()]:
# print(f'{k} not in {other}')
return False
elif other.meta_val(k) != v:
# print(f'{other.meta_val(k)} != {v}')
return False
for k, v in other.meta_items():
if k not in [kk for kk, vv in self.meta_items()]:
# print(f'{k} not in {self}')
return False
for dn, dv in self.data_items():
# print(dn)
if dn not in [dnn for dnn, dvv in other.data_items()]:
# print(f"{dn} not in {other}")
return False
if self[dn].get('unit', '') != other[dn].get('unit', ''):
# print(f"different units for {dn}")
return False
if self[dn].get('axes', []) != other[dn].get('axes', []):
# print(f"different axes for {dn}")
return False
if not num.arrays_equal(
np.array(self.data_vals(dn)),
np.array(other.data_vals(dn)),
):
# print(f"different data for {dn}")
return False
for k, v in self.meta_items(dn):
if k not in [kk for kk, vv in other.meta_items(dn)]:
# print(f"{dn}: {k} not in {other}")
return False
elif v != other.meta_val(k, dn):
# print(f"{v} != {other.meta_val(k, dn)}")
return False
for dn, dv in other.data_items():
# print(dn)
if dn not in [dnn for dnn, dvv in self.data_items()]:
# print(f"{dn} not in {other}")
return False
for k, v in other.meta_items(dn):
if k not in [kk for kk, vv in self.meta_items(dn)]:
# print(f"{dn}: {k} not in {other}")
return False
return True
# Assignment and retrieval of data and meta data
@staticmethod
def _is_meta_key(key: str) -> bool:
return is_meta_key(key)
@staticmethod
def _meta_key_to_name(key: str) -> str:
return meta_key_to_name(key)
@staticmethod
def _meta_name_to_key(name: str) -> str:
return meta_name_to_key(name)
def data_items(self) -> Iterator[Tuple[str, Dict[str, Any]]]:
"""
Generator for data field items.
Like dict.items(), but ignores meta data.
"""
for k, v in self.items():
if not self._is_meta_key(k):
yield k, v
def meta_items(self, data: Union[str, None] = None,
clean_keys: bool = True) -> Iterator[Tuple[str, Dict[str, Any]]]:
"""
Generator for meta items.
Like dict.items(), but yields `only` meta entries.
The keys returned do not contain the underscores used internally.
:param data: if ``None`` iterate over global meta data.
if it's the name of a data field, iterate over the meta
information of that field.
:param clean_keys: if `True`, remove the underscore pre/suffix
"""
if data is None:
for k, v in self.items():
if self._is_meta_key(k):
if clean_keys:
n = self._meta_key_to_name(k)
else:
n = k
yield n, v
else:
for k, v in self[data].items():
if self._is_meta_key(k):
if clean_keys:
n = self._meta_key_to_name(k)
else:
n = k
yield n, v
def data_vals(self, key: str) -> Union[Sequence, np.ndarray]:
"""
Return the data values of field ``key``.
Equivalent to ``DataDict['key'].values``.
:param key: name of the data field
:return: values of the data field
"""
if self._is_meta_key(key):
raise ValueError(f"{key} is a meta key.")
return self[key].get('values', np.array([]))
def has_meta(self, key: str) -> bool:
"""Check whether meta field exists in the dataset."""
k = self._meta_name_to_key(key)
if k in self:
return True
else:
return False
def meta_val(self, key: str, data: Union[str, None] = None) -> Any:
"""
Return the value of meta field ``key`` (given without underscore).
:param key: name of the meta field
:param data: ``None`` for global meta; name of data field for data meta.
:return: the value of the meta information.
"""
k = self._meta_name_to_key(key)
if data is None:
return self[k]
else:
return self[data][k]
def add_meta(self, key: str, value: Any, data: Union[str, None] = None) -> None:
"""
Add meta info to the dataset.
If the key already exists, meta info will be overwritten.
:param key: Name of the meta field (without underscores)
:param value: Value of the meta information
:param data: if ``None``, meta will be global; otherwise assigned to
data field ``data``.
"""
key = self._meta_name_to_key(key)
if data is None:
self[key] = value
else:
self[data][key] = value
set_meta = add_meta
def delete_meta(self, key: str, data: Union[str, None] = None) -> None:
"""
Remove meta data.
:param key: name of the meta field to remove.
:param data: if ``None``, this affects global meta; otherwise remove
from data field ``data``.
"""
key = self._meta_name_to_key(key)
if data is None:
del self[key]
else:
del self[data][key]
def clear_meta(self, data: Union[str, None] = None) -> None:
"""
Delete meta information.
:param data: if this is not None, delete onlymeta information from data
field `data`. Else, delete all top-level meta, as well as
meta for all data fields.
"""
if data is None:
meta_list = [k for k, _ in self.meta_items()]
for m in meta_list:
self.delete_meta(m)
for d, _ in self.data_items():
data_meta_list = [k for k, _ in self.meta_items(d)]
for m in data_meta_list:
self.delete_meta(m, d)
else:
for m, _ in self.meta_items(data):
self.delete_meta(m, data)
def extract(self: T, data: List[str], include_meta: bool = True,
copy: bool = True, sanitize: bool = True) -> T:
"""
Extract data from a dataset.
Return a new datadict with all fields specified in ``data`` included.
Will also take any axes fields along that have not been explicitly
specified.
:param data: data field or list of data fields to be extracted
:param include_meta: if ``True``, include the global meta data.
data meta will always be included.
:param copy: if ``True``, data fields will be deep copies of the
original.
:param sanitize: if ``True``, will run DataDictBase.sanitize before
returning.
:return: new DataDictBase containing only requested fields.
"""
if isinstance(data, str):
data = [data]
else:
data = data.copy()
for d in data:
for a in self.axes(d):
if a not in data:
data.append(a)
ret = self.__class__()
for d in data:
if copy:
ret[d] = cp.deepcopy(self[d])
else:
ret[d] = self[d]
if include_meta:
for k, v in self.meta_items():
if copy:
ret.add_meta(k, cp.deepcopy(v))
else:
ret.add_meta(k, v)
if sanitize:
ret = ret.sanitize()
ret.validate()
return ret
# info about structure
@staticmethod
def same_structure(*data: T,
check_shape: bool = False) -> bool:
"""
Check if all supplied DataDicts share the same data structure
(i.e., dependents and axes).
Ignores meta info and values. Checks also for matching shapes if
`check_shape` is `True`.
:param data: the data sets to compare
:param check_shape: whether to include a shape check in the comparison
:return: ``True`` if the structure matches for all, else ``False``.
"""
if len(data) < 2:
return True
def empty_structure(d: T) -> T:
s = misc.unwrap_optional(d.structure(include_meta=False, add_shape=check_shape))
for k, v in s.data_items():
if 'values' in v:
del s[k]['values']
return s
s0 = empty_structure(data[0])
for d in data[1:]:
if d is None:
return False
if s0 != empty_structure(d):
return False
return True
def structure(self: T, add_shape: bool = False,
include_meta: bool = True,
same_type: bool = False) -> Optional[T]:
"""
Get the structure of the DataDict.
Return the datadict without values (`value` omitted in the dict).
:param add_shape: Deprecated -- ignored.
:param include_meta: if `True`, include the meta information in
the returned dict, else clear it.
:param same_type: if `True`, return type will be the one of the
object this is called on. Else, DataDictBase.
:return: The DataDict containing the structure only. The exact type
is the same as the type of ``self``
"""
if add_shape:
warnings.warn("'add_shape' is deprecated and will be ignored",
DeprecationWarning)
add_shape = False
if self.validate():
s = self.__class__()
for n, v in self.data_items():
v2 = v.copy()
v2.pop('values')
s[n] = v2
if include_meta:
for n, v in self.meta_items():
s.add_meta(n, v)
else:
s.clear_meta()
if same_type:
s = self.__class__(**s)
return s
return None
def label(self, name: str) -> Optional[str]:
"""
Get a label for a data field.
If a unit is present, this is the name with the unit appended in
brackets: ``name (unit)``; if no unit is present, just the name.
:param name: name of the data field
:return: labelled name
"""
if self.validate():
if name not in self:
raise ValueError("No field '{}' present.".format(name))
n = name
if self[name]['unit'] != '':
n += ' ({})'.format(self[name]['unit'])
return n
return None
def axes_are_compatible(self) -> bool:
"""
Check if all dependent data fields have the same axes.
This includes axes order.
:return: ``True`` or ``False``
"""
axes = []
for i, d in enumerate(self.dependents()):
if i == 0:
axes = self.axes(d)
else:
if self.axes(d) != axes:
return False
return True
def axes(self, data: Union[Sequence[str], str, None] = None) -> List[str]:
"""
Return a list of axes.
:param data: if ``None``, return all axes present in the dataset,
otherwise only the axes of the dependent ``data``.
:return: the list of axes
"""
lst = []
if data is None:
for k, v in self.data_items():
if 'axes' in v:
for n in v['axes']:
if n not in lst and self[n].get('axes', []) == []:
lst.append(n)
else:
if isinstance(data, str):
dataseq: Sequence[str] = (data,)
else:
dataseq = data
for n in dataseq:
if 'axes' not in self[n]:
continue
for m in self[n]['axes']:
if m not in lst and self[m].get('axes', []) == []:
lst.append(m)
return lst
def dependents(self) -> List[str]:
"""
Get all dependents in the dataset.
:return: a list of the names of dependents (data fields that have axes)
"""
ret = []
for n, v in self.data_items():
if len(v.get('axes', [])) != 0:
ret.append(n)
return ret
def shapes(self) -> Dict[str, Tuple[int, ...]]:
"""
Get the shapes of all data fields.
:return: a dictionary of the form ``{key : shape}``, where shape is the
np.shape-tuple of the data with name ``key``.
"""
shapes = {}
for k, v in self.data_items():
shapes[k] = np.array(self.data_vals(k)).shape
return shapes
# validation and sanitizing
def validate(self) -> bool:
"""
Check the validity of the dataset.
Checks performed:
* all axes specified with dependents must exist as data fields.
Other tasks performed:
* ``unit`` keys are created if omitted
* ``shape`` meta information is updated with the correct values
(only if present already).
:return: ``True`` if valid.
:raises: ``ValueError`` if invalid.
"""
msg = '\n'
for n, v in self.data_items():
if 'axes' in v:
for na in v['axes']:
if na not in self:
msg += " * '{}' has axis '{}', but no field " \
"with name '{}' registered.\n".format(
n, na, na)
elif na not in self.axes():
msg += " * '{}' has axis '{}', but no independent " \
"with name '{}' registered.\n".format(
n, na, na)
else:
v['axes'] = []
if 'unit' not in v:
v['unit'] = ''
vals = v.get('values', [])
if type(vals) not in [np.ndarray, np.ma.core.MaskedArray]:
vals = np.array(vals)
v['values'] = vals
if msg != '\n':
raise ValueError(msg)
return True
def remove_unused_axes(self: T) -> T:
"""
Removes axes not associated with dependents.
:return: cleaned dataset.
"""
dependents = self.dependents()
unused = []
ret = self.copy()
for n, v in self.data_items():
used = False
if n not in dependents:
for m in dependents:
if n in self[m]['axes']:
used = True
else:
used = True
if not used:
unused.append(n)
for u in unused:
del ret[u]
return ret
def sanitize(self: T) -> T:
"""
Clean-up tasks:
* removes unused axes.
:return: sanitized dataset.
"""
return self.remove_unused_axes()
# axes order tools
def reorder_axes_indices(self, name: str,
**pos: int) -> Tuple[Tuple[int, ...], List[str]]:
"""
Get the indices that can reorder axes in a given way.
:param name: name of the data field of which we want to reorder axes
:param pos: new axes position in the form ``axis_name = new_position``.
non-specified axes positions are adjusted automatically.
:return: the tuple of new indices, and the list of axes names in the
new order.
"""
axlist = self.axes(name)
order = misc.reorder_indices_from_new_positions(axlist, **pos)
return order, [axlist[i] for i in order]
def reorder_axes(self: T, data_names: Union[str, Sequence[str], None] = None,
**pos: int) -> T:
"""
Reorder data axes.
:param data_names: data name(s) for which to reorder the axes
if None, apply to all dependents.
:param pos: new axes position in the form ``axis_name = new_position``.
non-specified axes positions are adjusted automatically.
:return: dataset with re-ordered axes.
"""
if data_names is None:
data_names = self.dependents()
if isinstance(data_names, str):
data_names = [data_names]
ret = self.copy()
for n in data_names:
neworder, newaxes = self.reorder_axes_indices(n, **pos)
ret[n]['axes'] = newaxes
ret.validate()
return ret
def copy(self: T) -> T:
"""
Make a copy of the dataset.
:return: A copy of the dataset.
"""
return cp.deepcopy(self)
def astype(self: T, dtype: np.dtype) -> T:
"""
Convert all data values to given dtype.
:param dtype: np dtype.
:return: copy of the dataset, with values as given type.
"""
ret = self.copy()
for k, v in ret.data_items():
vals = v['values']
if type(v['values']) not in [np.ndarray, np.ma.core.MaskedArray]:
vals = np.array(v['values'])
ret[k]['values'] = vals.astype(dtype)
return ret
def mask_invalid(self: T) -> T:
"""
Mask all invalid data in all values.
:return: copy of the dataset with invalid entries (nan/None) masked.
"""
ret = self.copy()
for d, _ in self.data_items():
arr = self.data_vals(d)
assert isinstance(arr, np.ndarray)
vals = np.ma.masked_where(num.is_invalid(arr), arr, copy=True)
try:
vals.fill_value = np.nan
except TypeError:
vals.fill_value = -9999
ret[d]['values'] = vals
return ret
class DataDict(DataDictBase):
"""
The most basic implementation of the DataDict class.
It only enforces that the number of `records` per data field must be
equal for all fields. This refers to the most outer dimension in case
of nested arrays.
The class further implements simple appending of datadicts through the
``DataDict.append`` method, as well as allowing addition of DataDict
instances.
"""
def __add__(self, newdata: 'DataDict') -> 'DataDict':
"""
Adding two datadicts by appending each data array.
Requires that the datadicts have the same structure.
Retains the meta information of the first array.
:param newdata: DataDict to be added.
:returns: combined DataDict.
:raises: ``ValueError`` if the structures are incompatible.
"""
# FIXME: remove shape
s = misc.unwrap_optional(self.structure(add_shape=False))
if DataDictBase.same_structure(self, newdata):
for k, v in self.data_items():
val0 = self[k]['values']
val1 = newdata[k]['values']
s[k]['values'] = np.append(
self[k]['values'],
newdata[k]['values'],
axis=0
)
return s
else:
raise ValueError('Incompatible data structures.')
def append(self, newdata: "DataDict") -> None:
"""
Append a datadict to this one by appending data values.
:param newdata: DataDict to append.
:raises: ``ValueError``, if the structures are incompatible.
"""
if not DataDictBase.same_structure(self, newdata):
raise ValueError('Incompatible data structures.')
newvals = {}
for k, v in newdata.data_items():
if isinstance(self[k]['values'], list) and isinstance(
v['values'], list):
newvals[k] = self[k]['values'] + v['values']
else:
newvals[k] = np.append(
self[k]['values'],
v['values'],
axis=0
)
# only actually
for k, v in newvals.items():
self[k]['values'] = v
def add_data(self, **kw: Sequence) -> None:
# TODO: fill non-given data with nan or none
"""
Add data to all values. new data must be valid in itself.
This method is useful to easily add data without needing to specify
meta data or dependencies, etc.
:param kw: one array per data field (none can be omitted).
:return: None
"""
dd = misc.unwrap_optional(self.structure(same_type=True))
for k, v in kw.items():
if isinstance(v, list):
dd[k]['values'] = np.array(v)
elif isinstance(v, np.ndarray):
dd[k]['values'] = v
else:
dd[k]['values'] = np.array([v])
if dd.validate():
records = self.nrecords()
if records is not None and records > 0:
self.append(dd)
else:
for key, val in dd.data_items():
self[key]['values'] = val['values']
self.validate()
# shape information and expansion
def nrecords(self) -> Optional[int]:
"""
:return: The number of records in the dataset.
"""
self.validate()
for _, v in self.data_items():
return len(v['values'])
return None
def _inner_shapes(self) -> Dict[str, Tuple[int, ...]]:
shapes = self.shapes()
return {k: v[1:] for k, v in shapes.items()}
def is_expanded(self) -> bool:
"""
Determine if the DataDict is expanded.
:return: ``True`` if expanded. ``False`` if not.
"""
ishp = self._inner_shapes()
if set(ishp.values()) == {tuple()}:
return True
else:
return False
def is_expandable(self) -> bool:
"""
Determine if the DataDict can be expanded.
Expansion flattens all nested data values to a 1D array. For doing so,
we require that all data fields that have nested/inner dimensions (i.e,
inside the `records` level) shape the inner shape.
In other words, all data fields must be of shape (N,) or (N, (shape)),
where shape is common to all that have a shape not equal to (N,).
:return: ``True`` if expandable. ``False`` otherwise.
"""
shp = self._inner_shapes()
if len(set(shp.values())) == 1:
return True
elif len(set(shp.values())) == 2 and tuple() in set(shp.values()):
return True
else:
return False
def expand(self) -> 'DataDict':
"""
Expand nested values in the data fields.
Flattens all value arrays. If nested dimensions
are present, all data with non-nested dims will be repeated
accordingly -- each record is repeated to match the size of
the nested dims.
:return: The flattened dataset.
:raises: ``ValueError`` if data is not expandable.
"""
self.validate()
if not self.is_expandable():
raise ValueError('Data cannot be expanded.')
struct = misc.unwrap_optional(self.structure(add_shape=False))
ret = DataDict(**struct)
if self.is_expanded():
return self.copy()
ishp = self._inner_shapes()
size = max([np.prod(s) for s in ishp.values()])
for k, v in self.data_items():
reps = size // np.prod(ishp[k])
if reps > 1:
ret[k]['values'] = \
self[k]['values'].repeat(reps, axis=0).reshape(-1)
else:
ret[k]['values'] = self[k]['values'].reshape(-1)
return ret
# validation and sanitizing
def validate(self) -> bool:
"""
Check dataset validity.
Beyond the checks performed in the base class ``DataDictBase``,
check whether the number of records is the same for all data fields.
:return: ``True`` if valid.
:raises: ``ValueError`` if invalid.
"""
if super().validate():
nvals = None
nvalsrc = None
msg = '\n'
for n, v in self.data_items():
if type(v['values']) not in [np.ndarray,
np.ma.core.MaskedArray]:
self[n]['values'] = np.array(v['values'])
if nvals is None:
nvals = len(v['values'])
nvalsrc = n
else:
if len(v['values']) != nvals:
msg += " * '{}' has length {}, but have found {} in " \
"'{}'\n".format(
n, len(v['values']), nvals, nvalsrc)
if msg != '\n':
raise ValueError(msg)
return True
def sanitize(self) -> "DataDict":
"""
Clean-up.
Beyond the tasks of the base class ``DataDictBase``:
* remove invalid entries as far as reasonable.
:return: sanitized DataDict
"""
ret = super().sanitize()
return ret.remove_invalid_entries()
def remove_invalid_entries(self) -> 'DataDict':
"""
Remove all rows that are ``None`` or ``np.nan`` in *all* dependents.
:return: the cleaned DataDict.
"""
ishp = self._inner_shapes()
idxs = []
ret = self.copy()
# collect rows that are completely invalid
for d in self.dependents():
# need to discriminate whether there are nested dims or not
if len(ishp[d]) == 0:
rows = self.data_vals(d)
else:
datavals = self.data_vals(d)
assert isinstance(datavals, np.ndarray)
rows = datavals.reshape(-1, np.prod(ishp[d]))
_idxs = np.array([])
# get indices of all rows that are fully None
if len(ishp[d]) == 0:
_newidxs = np.where(rows is None)[0]
else:
_newidxs = np.where(np.all(rows is None, axis=-1))[0]
_idxs = np.append(_idxs, _newidxs)
# get indices for all rows that are fully NaN. works only
# for some dtypes, so except TypeErrors.
try:
if len(ishp[d]) == 0:
_newidxs = np.where(np.isnan(rows))[0]
else:
_newidxs = np.where(np.all(np.isnan(rows), axis=-1))[0]
_idxs = np.append(_idxs, _newidxs)
except TypeError:
pass
idxs.append(_idxs)
if len(idxs) > 0:
remove_idxs = reduce(np.intersect1d,
tuple(np.array(idxs).astype(int)))
for k, v in ret.data_items():
v['values'] = np.delete(v['values'], remove_idxs, axis=0)
return ret
class MeshgridDataDict(DataDictBase):
"""
A dataset where the axes form a grid on which the dependent values reside.
This is a more special case than ``DataDict``, but a very common scenario.
To support flexible grids, this class requires that all axes specify values
for each datapoint, rather than a single row/column/dimension.
For example, if we want to specify a 3-dimensional grid with axes x, y, z,
the values of x, y, z all need to be 3-dimensional arrays; the same goes
for all dependents that live on that grid.
Then, say, x[i,j,k] is the x-coordinate of point i,j,k of the grid.
This implies that a ``MeshgridDataDict`` can only have a single shape,
i.e., all data values share the exact same nesting structure.
For grids where the axes do not depend on each other, the correct values for
the axes can be obtained from np.meshgrid (hence the name of the class).
Example: a simple uniform 3x2 grid might look like this; x and y are the
coordinates of the grid, and z is a function of the two::
x = [[0, 0],
[1, 1],
[2, 2]]
y = [[0, 1],
[0, 1],
[0, 1]]
z = x * y =
[[0, 0],
[0, 1],
[0, 2]]
Note: Internally we will typically assume that the nested axes are
ordered from slow to fast, i.e., dimension 1 is the most outer axis, and
dimension N of an N-dimensional array the most inner (i.e., the fastest
changing one). This guarantees, for example, that the default implementation
of np.reshape has the expected outcome. If, for some reason, the specified
axes are not in that order (e.g., we might have ``z`` with
``axes = ['x', 'y']``, but ``x`` is the fast axis in the data).
In such a case, the guideline is that at creation of the meshgrid, the data
should be transposed such that it conforms correctly to the order as given
in the ``axis = [...]`` specification of the data.
The function ``datadict_to_meshgrid`` provides options for that.
"""
def shape(self) -> Union[None, Tuple[int, ...]]:
"""
Return the shape of the meshgrid.
:returns: the shape as tuple. None if no data in the set.
"""
for d, _ in self.data_items():
return np.array(self.data_vals(d)).shape
return None
def validate(self) -> bool:
"""
Validation of the dataset.
Performs the following checks:
* all dependents must have the same axes
* all shapes need to be identical
:return: ``True`` if valid.
:raises: ``ValueError`` if invalid.
"""
if not super().validate():
return False
msg = '\n'
axes = None
axessrc = ''
for d in self.dependents():
if axes is None:
axes = self.axes(d)
else:
if axes != self.axes(d):
msg += f" * All dependents must have the same axes, but "
msg += f"{d} has {self.axes(d)} and {axessrc} has {axes}\n"
shp = None
shpsrc = ''
for n, v in self.data_items():
if type(v['values']) not in [np.ndarray, np.ma.core.MaskedArray]:
self[n]['values'] = np.array(v['values'])
if shp is None:
shp = v['values'].shape
shpsrc = n
else:
if v['values'].shape != shp:
msg += f" * shapes need to match, but '{n}' has"
msg += f" {v['values'].shape}, "
msg += f"and '{shpsrc}' has {shp}.\n"
if msg != '\n':
raise ValueError(msg)
return True
def reorder_axes(self, data_names: Union[str, Sequence[str], None] = None,
**pos: int) -> 'MeshgridDataDict':
"""
Reorder the axes for all data.
This includes transposing the data, since we're on a grid.
:param pos: new axes position in the form ``axis_name = new_position``.
non-specified axes positions are adjusted automatically.
:return: Dataset with re-ordered axes.
"""
if data_names is None:
data_names = self.dependents()
if isinstance(data_names, str):
data_names = [data_names]
transposed = []
ret: "MeshgridDataDict" = self.copy()
for n in data_names:
neworder, newaxes = self.reorder_axes_indices(n, **pos)
ret[n]['axes'] = newaxes
ret[n]['values'] = self[n]['values'].transpose(neworder)
for ax in self.axes(n):
if ax not in transposed:
ret[ax]['values'] = self[ax]['values'].transpose(neworder)
transposed.append(ax)
ret.validate()
return ret
# Tools for converting between different data types
def guess_shape_from_datadict(data: DataDict) -> \
Dict[str, Union[None, Tuple[List[str], Tuple[int, ...]]]]:
"""
Try to guess the shape of the datadict dependents from the axes values.
:param data: dataset to examine.
:return: a dictionary with the dependents as keys, and inferred shapes as
values. value is None, if the shape could not be inferred.
"""
shapes = {}
for d in data.dependents():
axnames = data.axes(d)
axes: Dict[str, np.ndarray] = {}
for a in axnames:
axdata = data.data_vals(a)
assert isinstance(axdata, np.ndarray)
axes[a] = axdata
shapes[d] = num.guess_grid_from_sweep_direction(**axes)
return shapes
def datadict_to_meshgrid(data: DataDict,
target_shape: Union[Tuple[int, ...], None] = None,
inner_axis_order: Union[None, List[str]] = None,
use_existing_shape: bool = False) \
-> MeshgridDataDict:
"""
Try to make a meshgrid from a dataset.
:param data: input DataDict.
:param target_shape: target shape. if ``None`` we use
``guess_shape_from_datadict`` to infer.
:param inner_axis_order: if axes of the datadict are not specified in the
'C' order (1st the slowest, last the fastest axis) then the
'true' inner order can be specified as a list of axes names, which has
to match the specified axes in all but order. The data is then
transposed to conform to the specified order.
**Note**: if this is given, then `target_shape` needs to be given in
in the order of this inner_axis_order. The output data will keep the
axis ordering specified in the `axes` property.
:param use_existing_shape: if ``True``, simply use the shape that the data
already has. For numpy-array data, this might already be present.
if ``False``, flatten and reshape.
:returns: the generated ``MeshgridDataDict``.
"""
# if the data is empty, return empty MeshgridData
if len([k for k, _ in data.data_items()]) == 0:
return MeshgridDataDict()
if not data.axes_are_compatible():
raise ValueError('Non-compatible axes, cannot grid that.')
if not use_existing_shape and data.is_expandable():
data = data.expand()
elif use_existing_shape:
target_shape = list(data.shapes().values())[0]
# guess what the shape likely is.
if target_shape is None:
shp_specs = guess_shape_from_datadict(data)
shps = []
for order_shape in shp_specs.values():
assert order_shape is not None
shps.append(order_shape[1])
if len(set(shps)) > 1:
raise ValueError('Cannot determine unique shape for all data.')
ret = list(shp_specs.values())[0]
if ret is None:
raise ValueError('Shape could not be inferred.')
# the guess-function returns both axis order as well as shape.
inner_axis_order, target_shape = ret
# construct new data
newdata = MeshgridDataDict(**misc.unwrap_optional(data.structure(add_shape=False)))
axlist = data.axes(data.dependents()[0])
for k, v in data.data_items():
vals = num.array1d_to_meshgrid(v['values'], target_shape, copy=True)
# if an inner axis order is given, we transpose to transform from that
# to the specified order.
if inner_axis_order is not None:
transpose_idxs = misc.reorder_indices(
inner_axis_order, axlist)
vals = vals.transpose(transpose_idxs)
newdata[k]['values'] = vals
newdata = newdata.sanitize()
newdata.validate()
return newdata
def meshgrid_to_datadict(data: MeshgridDataDict) -> DataDict:
"""
Make a DataDict from a MeshgridDataDict by reshaping the data.
:param data: input ``MeshgridDataDict``
:return: flattened ``DataDict``
"""
newdata = DataDict(**misc.unwrap_optional(data.structure(add_shape=False)))
for k, v in data.data_items():
val = v['values'].copy().reshape(-1)
newdata[k]['values'] = val
newdata = newdata.sanitize()
newdata.validate()
return newdata
# Tools for manipulating and transforming data
def _find_replacement_name(ddict: DataDictBase, name: str) -> str:
"""
Find a replacement name for a data field that already exists in a
datadict.
Appends '-<index>' to the name.
:param ddict: datadict that contains the already existing field
:param name: the name that needs to be replaced
:return: a suitable replacement
"""
if name not in ddict:
return name
else:
idx = 0
newname = name + f"_{idx}"
while newname in ddict:
idx += 1
newname = name + f"_{idx}"
return newname
def combine_datadicts(*dicts: DataDict) -> Union[DataDictBase, DataDict]:
"""
Try to make one datadict out of multiple.
Basic rules:
- we try to maintain the input type
- return type is 'downgraded' to DataDictBase if the contents are not
compatible (i.e., different numbers of records in the inputs)
:returns: combined data
"""
# TODO: deal correctly with MeshGridData when combined with other types
# TODO: should we strictly copy all values?
# TODO: we should try to consolidate axes as much as possible. Currently
# axes in the return can be separated even if they match (caused
# by earlier mismatches)
ret = None
rettype = None
for d in dicts:
if ret is None:
ret = d.copy()
rettype = type(d)
else:
# if we don't have a well defined number of records anymore,
# need to revert the type to DataDictBase
if hasattr(d, 'nrecords') and hasattr(ret, 'nrecords'):
if d.nrecords() != ret.nrecords():
rettype = DataDictBase
else:
rettype = DataDictBase
ret = rettype(**ret)
# First, parse the axes in the to-be-added ddict.
# if dimensions with same names are present already in the current
# return ddict and are not compatible with what's to be added,
# rename the incoming dimension.
ax_map = {}
for d_ax in d.axes():
if d_ax in ret.axes():
if num.arrays_equal(d.data_vals(d_ax), ret.data_vals(d_ax)):
ax_map[d_ax] = d_ax
else:
newax = _find_replacement_name(ret, d_ax)
ax_map[d_ax] = newax
ret[newax] = d[d_ax]
elif d_ax in ret.dependents():
newax = _find_replacement_name(ret, d_ax)
ax_map[d_ax] = newax
ret[newax] = d[d_ax]
else:
ax_map[d_ax] = d_ax
ret[d_ax] = d[d_ax]
for d_dep in d.dependents():
if d_dep in ret:
newdep = _find_replacement_name(ret, d_dep)
else:
newdep = d_dep
dep_axes = [ax_map[ax] for ax in d[d_dep]['axes']]
ret[newdep] = d[d_dep]
ret[newdep]['axes'] = dep_axes
assert ret is not None
ret.validate()
return ret
|
py | b4132b2f1b7aad366bbfb3c6c9ba1a7281da3243 | """Elementwise operators"""
from __future__ import absolute_import as _abs
import tvm
from .. import tag
from ..util import get_const_int
@tvm.tag_scope(tag=tag.ELEMWISE)
def relu(x):
"""Take relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
Returns
-------
y : tvm.Tensor
The result.
"""
return tvm.compute(x.shape, lambda *i: tvm.max(x(*i), tvm.const(0, x.dtype)))
@tvm.tag_scope(tag=tag.ELEMWISE)
def leaky_relu(x, alpha):
"""Take leaky relu of input x.
Parameters
----------
x : tvm.Tensor
Input argument.
alpha : float
The slope for the small gradient when x < 0
Returns
-------
y : tvm.Tensor
The result.
"""
def _compute(*indices):
value = x(*indices)
calpha = tvm.const(alpha, value.dtype)
return tvm.select(value > 0, value, value * calpha)
return tvm.compute(x.shape, _compute)
@tvm.tag_scope(tag=tag.BROADCAST)
def prelu(x, slope, axis=1):
""" PReLU.
It accepts two arguments: an input ``x`` and a weight array ``W``
and computes the output as :math:`PReLU(x) y = x > 0 ? x : W * x`,
where :math:`*` is an elementwise multiplication for each sample in the
batch.
Arguments:
x : tvm.Tensor
Input argument.
slope : tvm.Tensor
Channelised slope tensor for prelu
axis : int
The axis where the channel data needs to be applied
Returns:
y : tvm.Tensor
The result.
Links:
[http://arxiv.org/pdf/1502.01852v1.pdf]
"""
assert len(x.shape) == 4 and len(slope.shape) == 1
assert axis < len(x.shape)
assert get_const_int(slope.shape[0]) == get_const_int(x.shape[axis])
def _compute_channelwise(*indices):
return tvm.select(x(*indices) > 0, x(*indices), x(*indices) * slope(indices[axis]))
return tvm.compute(x.shape, _compute_channelwise)
|
py | b4132c353ae2d4d9ef4c34101b2eb73e9f73ebf0 | # Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple
from synapse.api.constants import ReadReceiptEventFields, ReceiptTypes
from synapse.appservice import ApplicationService
from synapse.streams import EventSource
from synapse.types import JsonDict, ReadReceipt, UserID, get_domain_from_id
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class ReceiptsHandler:
def __init__(self, hs: "HomeServer"):
self.notifier = hs.get_notifier()
self.server_name = hs.config.server.server_name
self.store = hs.get_datastores().main
self.event_auth_handler = hs.get_event_auth_handler()
self.hs = hs
# We only need to poke the federation sender explicitly if its on the
# same instance. Other federation sender instances will get notified by
# `synapse.app.generic_worker.FederationSenderHandler` when it sees it
# in the receipts stream.
self.federation_sender = None
if hs.should_send_federation():
self.federation_sender = hs.get_federation_sender()
# If we can handle the receipt EDUs we do so, otherwise we route them
# to the appropriate worker.
if hs.get_instance_name() in hs.config.worker.writers.receipts:
hs.get_federation_registry().register_edu_handler(
"m.receipt", self._received_remote_receipt
)
else:
hs.get_federation_registry().register_instances_for_edu(
"m.receipt",
hs.config.worker.writers.receipts,
)
self.clock = self.hs.get_clock()
self.state = hs.get_state_handler()
async def _received_remote_receipt(self, origin: str, content: JsonDict) -> None:
"""Called when we receive an EDU of type m.receipt from a remote HS."""
receipts = []
for room_id, room_values in content.items():
# If we're not in the room just ditch the event entirely. This is
# probably an old server that has come back and thinks we're still in
# the room (or we've been rejoined to the room by a state reset).
is_in_room = await self.event_auth_handler.check_host_in_room(
room_id, self.server_name
)
if not is_in_room:
logger.info(
"Ignoring receipt for room %r from server %s as we're not in the room",
room_id,
origin,
)
continue
for receipt_type, users in room_values.items():
for user_id, user_values in users.items():
if get_domain_from_id(user_id) != origin:
logger.info(
"Received receipt for user %r from server %s, ignoring",
user_id,
origin,
)
continue
receipts.append(
ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=user_values["event_ids"],
data=user_values.get("data", {}),
)
)
await self._handle_new_receipts(receipts)
async def _handle_new_receipts(self, receipts: List[ReadReceipt]) -> bool:
"""Takes a list of receipts, stores them and informs the notifier."""
min_batch_id: Optional[int] = None
max_batch_id: Optional[int] = None
for receipt in receipts:
res = await self.store.insert_receipt(
receipt.room_id,
receipt.receipt_type,
receipt.user_id,
receipt.event_ids,
receipt.data,
)
if not res:
# res will be None if this read receipt is 'old'
continue
stream_id, max_persisted_id = res
if min_batch_id is None or stream_id < min_batch_id:
min_batch_id = stream_id
if max_batch_id is None or max_persisted_id > max_batch_id:
max_batch_id = max_persisted_id
# Either both of these should be None or neither.
if min_batch_id is None or max_batch_id is None:
# no new receipts
return False
affected_room_ids = list({r.room_id for r in receipts})
self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
# Note that the min here shouldn't be relied upon to be accurate.
await self.hs.get_pusherpool().on_new_receipts(
min_batch_id, max_batch_id, affected_room_ids
)
return True
async def received_client_receipt(
self, room_id: str, receipt_type: str, user_id: str, event_id: str, hidden: bool
) -> None:
"""Called when a client tells us a local user has read up to the given
event_id in the room.
"""
receipt = ReadReceipt(
room_id=room_id,
receipt_type=receipt_type,
user_id=user_id,
event_ids=[event_id],
data={"ts": int(self.clock.time_msec()), "hidden": hidden},
)
is_new = await self._handle_new_receipts([receipt])
if not is_new:
return
if self.federation_sender and not (
self.hs.config.experimental.msc2285_enabled and hidden
):
await self.federation_sender.send_read_receipt(receipt)
class ReceiptEventSource(EventSource[int, JsonDict]):
def __init__(self, hs: "HomeServer"):
self.store = hs.get_datastores().main
self.config = hs.config
@staticmethod
def filter_out_hidden(events: List[JsonDict], user_id: str) -> List[JsonDict]:
visible_events = []
# filter out hidden receipts the user shouldn't see
for event in events:
content = event.get("content", {})
new_event = event.copy()
new_event["content"] = {}
for event_id in content.keys():
event_content = content.get(event_id, {})
m_read = event_content.get(ReceiptTypes.READ, {})
# If m_read is missing copy over the original event_content as there is nothing to process here
if not m_read:
new_event["content"][event_id] = event_content.copy()
continue
new_users = {}
for rr_user_id, user_rr in m_read.items():
try:
hidden = user_rr.get("hidden")
except AttributeError:
# Due to https://github.com/matrix-org/synapse/issues/10376
# there are cases where user_rr is a string, in those cases
# we just ignore the read receipt
continue
if hidden is not True or rr_user_id == user_id:
new_users[rr_user_id] = user_rr.copy()
# If hidden has a value replace hidden with the correct prefixed key
if hidden is not None:
new_users[rr_user_id].pop("hidden")
new_users[rr_user_id][
ReadReceiptEventFields.MSC2285_HIDDEN
] = hidden
# Set new users unless empty
if len(new_users.keys()) > 0:
new_event["content"][event_id] = {ReceiptTypes.READ: new_users}
# Append new_event to visible_events unless empty
if len(new_event["content"].keys()) > 0:
visible_events.append(new_event)
return visible_events
async def get_new_events(
self,
user: UserID,
from_key: int,
limit: Optional[int],
room_ids: Iterable[str],
is_guest: bool,
explicit_room_id: Optional[str] = None,
) -> Tuple[List[JsonDict], int]:
from_key = int(from_key)
to_key = self.get_current_key()
if from_key == to_key:
return [], to_key
events = await self.store.get_linearized_receipts_for_rooms(
room_ids, from_key=from_key, to_key=to_key
)
if self.config.experimental.msc2285_enabled:
events = ReceiptEventSource.filter_out_hidden(events, user.to_string())
return events, to_key
async def get_new_events_as(
self, from_key: int, service: ApplicationService
) -> Tuple[List[JsonDict], int]:
"""Returns a set of new read receipt events that an appservice
may be interested in.
Args:
from_key: the stream position at which events should be fetched from
service: The appservice which may be interested
Returns:
A two-tuple containing the following:
* A list of json dictionaries derived from read receipts that the
appservice may be interested in.
* The current read receipt stream token.
"""
from_key = int(from_key)
to_key = self.get_current_key()
if from_key == to_key:
return [], to_key
# Fetch all read receipts for all rooms, up to a limit of 100. This is ordered
# by most recent.
rooms_to_events = await self.store.get_linearized_receipts_for_all_rooms(
from_key=from_key, to_key=to_key
)
# Then filter down to rooms that the AS can read
events = []
for room_id, event in rooms_to_events.items():
if not await service.matches_user_in_member_list(room_id, self.store):
continue
events.append(event)
return events, to_key
def get_current_key(self, direction: str = "f") -> int:
return self.store.get_max_receipt_stream_id()
|
py | b4132df8ae5cf0d3023c1d4e6d9e2f18d78024aa | import os, sys, time
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from reader import batch_patcher as patcher
from network.WideResNet.WideResNet import *
from network.DenseNet.DenseNet import *
os.environ['TF_CPP_MIN_LOG_LEVEL']='3'
tf.logging.set_verbosity(tf.logging.ERROR)
def AdaBatch(gpu_id, input_reader, model_type, training_epochs, batch_size, lr_boundaries, lr_values, optimizer_type, update_method, warm_up_period, s_e=100.0, pretrain=0, log_dir="log"):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
text_log = []
text_log.append("epoch, time(s), learning rate, minibatch loss, minibatch error, test loss, test error")
num_train_images = input_reader.num_train_images
num_val_images = input_reader.num_val_images
num_label = input_reader.num_classes
image_shape = [input_reader.width, input_reader.height, input_reader.depth]
train_batch_patcher = patcher.BatchPatcher(num_train_images, batch_size, num_label, s_e=s_e, update_method=update_method)
validation_batch_patcher = patcher.BatchPatcher(num_val_images, batch_size, num_label, update_method=update_method)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.visible_device_list = str(gpu_id)
config.gpu_options.allow_growth = True
graph = tf.Graph()
with graph.as_default():
with tf.device('/gpu:'+str(gpu_id)):
with tf.Session(config=config) as sess:
# Input Graph Generation #############################################################################
t_ids, t_images, t_labels = input_reader.data_read(batch_size, train=True)
v_ids, v_images, v_labels = input_reader.data_read(batch_size, train=False)
# Model Graph Construction ###########################################################################
if model_type == "DenseNet-25-12":
model = DenseNet(25, 12, image_shape, num_label, batch_size, batch_size)
elif model_type == "WideResNet16-8":
model = WideResNet(16, 8, image_shape, num_label, batch_size, batch_size)
train_loss_op, train_accuracy_op, train_op, _, train_distance_op = model.build_train_op(lr_boundaries, lr_values, optimizer_type)
test_loss_op, test_accuracy_op, _ = model.build_test_op()
# Data load in memeory ###############################################################################
print("start to load data set.")
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
train_batch_patcher.bulk_load_in_memory(sess, t_ids, t_images, t_labels)
validation_batch_patcher.bulk_load_in_memory(sess, v_ids, v_images, v_labels)
start_time = time.time()
# Model Initialization ###########################################################################
# init params: we share the initial epochs. See paper.
if pretrain != 0:
start_time = time.time()
saver = tf.train.Saver()
file_dir = "init_weight/" + input_reader.dataset_name + "/" + model_type + "_" + optimizer_type + "_lr=" + str(lr_values[0]) + "_e=" + str(pretrain) + "/"
minus_start_time = 0
with open(file_dir + "text_log.csv") as f:
for line in f:
print(line, end="")
text_log.append(line.rstrip())
minus_start_time = line.split(",")[1]
start_time = start_time - float(minus_start_time)
saver.restore(sess, file_dir + "model.ckpt")
for i in range(train_batch_patcher.num_iters_per_epoch):
ids, images, labels = train_batch_patcher.get_init_mini_batch(i)
distance = sess.run(train_distance_op, feed_dict={model.train_image_placeholder: images, model.train_label_placeholder: labels})
train_batch_patcher.update_prob_table(ids, distance)
print(train_batch_patcher.prob_table.table)
print("shared weight is successfully loaded")
else:
sess.run(tf.global_variables_initializer())
# Traing Process #####################################################################################
for epoch in range(pretrain, training_epochs):
if epoch < warm_up_period:
is_warm_up = True
else:
is_warm_up = False
# (1) Mini-batch loss and error along with netowrk updates
avg_mini_loss = 0.0
avg_mini_acc = 0.0
for i in range(train_batch_patcher.num_iters_per_epoch):
# Next batch depends on the method: {Ada_Boundary, Ada-Hard, Ada-Uniform}
ids, images, labels = train_batch_patcher.get_next_mini_batch(num_of_sample=batch_size, is_warm_up=is_warm_up)
mini_loss, mini_acc, _, distance = sess.run([train_loss_op, train_accuracy_op, train_op, train_distance_op], feed_dict={model.train_image_placeholder: images, model.train_label_placeholder: labels})
train_batch_patcher.update_prob_table(ids, distance)
avg_mini_loss += mini_loss
avg_mini_acc += mini_acc
avg_mini_loss /= train_batch_patcher.num_iters_per_epoch
avg_mini_acc /= train_batch_patcher.num_iters_per_epoch
# (2) Compute training loss and error
avg_train_loss = 0.0
avg_train_acc = 0.0
for i in range(train_batch_patcher.num_iters_per_epoch):
ids, images, labels = train_batch_patcher.get_init_mini_batch(i)
train_loss, train_acc = sess.run([test_loss_op, test_accuracy_op], feed_dict={model.test_image_placeholder: images, model.test_label_placeholder: labels})
avg_train_loss += train_loss
avg_train_acc += train_acc
avg_train_loss /= train_batch_patcher.num_iters_per_epoch
avg_train_acc /= train_batch_patcher.num_iters_per_epoch
# (3) Validation (or test) loss and error
avg_val_loss = 0.0
avg_val_acc = 0.0
for i in range(validation_batch_patcher.num_iters_per_epoch):
ids, images, labels = validation_batch_patcher.get_init_mini_batch(i)
val_loss, val_acc = sess.run([test_loss_op, test_accuracy_op], feed_dict={model.test_image_placeholder: images, model.test_label_placeholder: labels})
avg_val_loss += val_loss
avg_val_acc += val_acc
avg_val_loss /= validation_batch_patcher.num_iters_per_epoch
avg_val_acc /= validation_batch_patcher.num_iters_per_epoch
# Log Writing ####################################################################################
cur_lr = sess.run(model.learning_rate)
print((epoch + 1), ", ", int(time.time() - start_time) ,", ", cur_lr, ", ", avg_mini_loss, ", ", (1.0-avg_mini_acc), ", ", avg_train_loss, ", ", (1.0-avg_train_acc), ", ", avg_val_loss, ", ", (1.0-avg_val_acc))
text_log.append(str(epoch + 1) + ", " + str(int(time.time() - start_time)) + ", " + str(cur_lr) + ", " + str(avg_mini_loss) + ", " + str(1.0-avg_mini_acc) + ", " + str(avg_train_loss) + ", " + str(1.0-avg_train_acc) + ", " + str(avg_val_loss) + ", " + str(1.0-avg_val_acc))
coord.request_stop()
coord.join(threads)
sess.close()
# Log Flushing
f = open(log_dir + "/text_log.csv", "w")
for text in text_log:
f.write(text + "\n")
f.close()
|
py | b4132e0ed2eb527a4f9388952aae8e5c01cd13a0 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://github.com/kennethreitz/setup.py ❤️ ✨ 🍰 ✨
import os
from setuptools import setup, find_packages
NAME = 'stones'
DESCRIPTION = 'Library for Persistent key-value containers, compatible with Python dict.'
KEYWORDS = 'persistent dict'
URL = 'https://github.com/croqaz/Stones'
AUTHOR = 'Cristi Constantin'
EMAIL = '[email protected]'
here = os.path.abspath(os.path.dirname(__file__))
about = {}
try:
with open(os.path.join(here, 'README.md')) as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
setup(
version=about['__version__'],
name=NAME,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
keywords=KEYWORDS,
url=URL,
author=AUTHOR,
author_email=EMAIL,
license='MIT',
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=True,
python_requires='>= 3.6',
extras_require={
'dev': ['flake8', 'codecov'],
'test': ['pytest', 'pytest-cov'],
},
classifiers=[
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development',
'Topic :: Database',
])
|
py | b4132fab3ed7e79a1b5cca2b3987fda554a6cbd6 |
"""
Python module for getting stock data
@module: pinance
@author: neberej (https://github.com/neberej)
@version: 1.00
"""
# Dependencies
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "engine"))
import gfinance
import gfinancenews
import yfinance
import yfinance2
from datetime import datetime, timedelta
millnames = ['','T','M','B','T']
# Human readable numbers
def millify(n):
n = float(n)
millidx = max(0,min(len(millnames)-1,
int(math.floor(0 if n == 0 else math.log10(abs(n))/3))))
return '{:.0f}{}'.format(n / 10**(3 * millidx), millnames[millidx])
# Check if dictionary
def isValid(item):
if(type(item) is dict):
return True
return False
# Combine data from all three sources and remove all keys which has 'None' as values
def combine_objects(a, b, c):
data = {}
if isValid(a):
data.update(a)
if isValid(b):
data.update(b)
if isValid(c):
data.update(c)
res = {k:v for k,v in data.items() if v is not None}
return res
# Convert date/time to unix time for options
def totimestamp(inputdate, epoch=datetime(1970,1,1)):
dt = datetime.strptime(inputdate, '%Y-%m-%d')
td = dt - epoch
timestamp = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 1e6 # td.total_seconds()
return int(timestamp)
# Extract expiry, call/put and strike price (allows to enter them on any order)
def options_params(a, b, c):
expiry = totimestamp(a)
return expiry, b, c
class Base(object):
def __init__(self, symbol):
self.symbol = symbol
def __request_quotes__(self):
gfinance_data = gfinance.get_quotes(self.symbol)
yfinance_data = yfinance.get_quotes(self.symbol)
yfinance2_data = yfinance2.get_quotes(self.symbol)
return combine_objects(gfinance_data, yfinance_data, yfinance2_data)
def __request_news__(self):
gfinance_news = gfinancenews.get_news(self.symbol)
return gfinance_news
def __request_options__(self, a, b, c):
yfinance2_options = yfinance2.get_options(self.symbol, a, b, c)
return yfinance2_options
def __getQuotes__(self):
self.quotes_data = self.__request_quotes__()
def __getNews__(self):
self.news_data = self.__request_news__()
def __getOptions__(self, a, b, c):
expiry, type, strike = options_params(a, b, c)
self.options_data = self.__request_options__(expiry, type, strike)
class Pinance(Base):
def __init__(self, symbol):
super(Pinance, self).__init__(symbol)
self.symbol = symbol
def get_quotes(self):
self.__getQuotes__()
def get_news(self):
self.__getNews__()
def get_options(self, a, b, c):
self.__getOptions__(a, b, c)
def quotes_data(self):
return self.quotes_data
def news_data(self):
return self.news_data
def options_data(self):
return self.options_data
|
py | b41331a27d9174b401a838ed7a4383ce20b13af6 | from easygraphics.turtle import *
def main():
create_world()
set_speed(50)
while is_run():
fd(100)
rt(90)
fd(100)
rt(90)
fd(50)
rt(90)
fd(50)
rt(90)
fd(100)
rt(90)
fd(25)
rt(90)
fd(25)
rt(90)
fd(50)
rt(10)
forward(50)
close_world()
easy_run(main) |
py | b41331b2ee01211723cfba1ff86dbbda96942fc5 | ACCEPTED_SEMTYPES = {
'T020', # Acquired Abnormality, ex.: Hemorrhoids; Hernia, Femoral; Cauliflower ear
# 'T052', # Activity, ex.: Expeditions; Information Distribution; Social Planning
# 'T100', # Age Group, ex.: Adult; Infant, Premature; Adolescent (age group)
'T087', # Amino Acid Sequence, ex.: Signal Peptides; Homologous Sequences, Amino Acid; Abnormal amino acid
'T116', # Amino Acid, Peptide, or Protein, ex.: Amino Acids, Cyclic; Glycopeptides; Keratin
'T011', # Amphibian, ex.: Salamandra; Urodela; Brazilian horned frog
'T190', # Anatomical Abnormality, ex.: Bronchial Fistula; Foot Deformities; Hyperostosis of skull
'T017', # Anatomical Structure, ex.: Cadaver; Pharyngostome; Anatomic structures
'T008', # Animal, ex.: Animals; Animals, Laboratory; Carnivore
'T195', # Antibiotic, ex.: Antibiotics; bactericide; Thienamycins
'T194', # Archaeon, ex.: Thermoproteales; Haloferax volcanii; Methanospirillum
'T007', # Bacterium, ex.: Acetobacter; Bacillus cereus; Cytophaga
# 'T053', # Behavior, ex.: Homing Behavior; Sexuality; Habitat Selection
'T038', # Biologic Function, ex.: Antibody Formation; Drug resistance; Homeostasis
'T123', # Biologically Active Substance, ex.: Cytokinins; Pheromone
'T091', # Biomedical Occupation or Discipline, ex.: Adolescent Medicine; Cellular Neurobiology; Dentistry
'T122', # Biomedical or Dental Material, ex.: Acrylic Resins; Bone Cements; Dentifrices
# 'T012', # Bird, ex.: Serinus; Ducks; Quail
'T029', # Body Location or Region, ex.: Forehead; Sublingual Region; Base of skull structure
'T023', # Body Part, Organ, or Organ Component, ex.: Aorta; Brain Stem; Structure of neck of femur
# 'T030', # Body Space or Junction, ex.: Knee joint; Greater sac of peritoneum; Synapses
'T031', # Body Substance, ex.: Amniotic Fluid; saliva; Smegma
'T022', # Body System, ex.: Endocrine system; Renin-angiotensin system; Reticuloendothelial System
'T088', # Carbohydrate Sequence, ex.: Carbohydrate Sequence; Abnormal carbohydrate sequence
'T025', # Cell, ex.: B-Lymphocytes; Dendritic Cells; Fibroblasts
'T026', # Cell Component, ex.: Axon; Golgi Apparatus; Organelles
'T043', # Cell Function, ex.: Cell Cycle; Cell division; Phagocytosis
'T049', # Cell or Molecular Dysfunction, ex.: DNA Damage; Wallerian Degeneration; Atypical squamous metaplasia
'T103', # Chemical, ex.: Acids; Chemicals; Ionic Liquids
'T120', # Chemical Viewed Functionally, ex.: Aerosol Propellants; Detergents; Stabilizing Agents
'T104', # Chemical Viewed Structurally, ex.: Ammonium Compounds; Cations; Sulfur Compounds
# 'T185', # Classification, ex.: Anatomy (MeSH Category); Tumor Stage Classification; axis i
'T201', # Clinical Attribute, ex.: Bone Density; heart rate; Range of Motion, Articular
'T200', # Clinical Drug, ex.: Ranitidine 300 MG Oral Tablet [Zantac]; Aspirin 300 MG Delayed Release Oral
# 'T077', # Conceptual Entity, ex.: Geographic Factors; Fractals; Secularism
# 'T019', # Congenital Abnormality, ex.: Albinism; Cleft palate with cleft lip; Polydactyly of toes
# 'T056', # Daily or Recreational Activity, ex.: Badminton; Dancing; Swimming
'T060', # Diagnostic Procedure, ex.: Biopsy; Heart Auscultation; Magnetic Resonance Imaging
'T047', # Disease or Syndrome, ex.: Diabetes Mellitus; Drug Allergy; Malabsorption Syndrome
'T203', # Drug Delivery Device, ex.: Nordette 21 Day Pack; {7 (Terazosin 1 MG Oral Tablet) / 7 (Terazosin 2 MG
# 'T065', # Educational Activity, ex.: Academic Training; Family Planning Training; Preceptorship
'T196', # Element, Ion, or Isotope, ex.: Carbon; Chromium Isotopes; Radioisotopes
'T018', # Embryonic Structure, ex.: Blastoderm; Fetus; Neural Crest
# 'T071', # Entity, ex.: Gifts, Financial; Image; Product Part
'T069', # Environmental Effect of Humans, ex.: Air Pollution; Desertification; Bioremediation
'T126', # Enzyme, ex.: GTP Cyclohydrolase II; enzyme substrate complex; arginine amidase
'T204', # Eukaryote, ex.: Order Acarina; Bees; Plasmodium malariae
'T051', # Event, ex.: Anniversaries; Exposure to Mumps virus (event); Device Unattended
'T050', # Experimental Model of Disease, ex.: Alloxan Diabetes; Liver Cirrhosis, Experimental; Transient Gene Knock-Out
# 'T099', # Family Group, ex.: Daughter; Is an only child; Unmarried Fathers
'T033', # Finding, ex.: Birth History; Downward displacement of diaphragm; Decreased glucose level
# 'T013', # Fish, ex.: Bass; Salmonidae; Whitefish
# 'T168', # Food, ex.: Beverages; Egg Yolk (Dietary); Ice Cream
# 'T021', # Fully Formed Anatomical Structure, ex.: Entire body as a whole; Female human body; Set of parts of human body
'T169', # Functional Concept, ex.: Interviewer Effect; Problem Formulation; Endogenous
'T004', # Fungus, ex.: Aspergillus clavatus; Blastomyces; Neurospora
'T028', # Gene or Genome, ex.: Alleles; Genome, Human; rRNA Operon
'T045', # Genetic Function, ex.: Early Gene Transcription; Gene Amplification; RNA Splicing
'T083', # Geographic Area, ex.: Baltimore; Canada; Far East
'T064', # Governmental or Regulatory Activity, ex.: Certification; Credentialing; Public Policy
'T096', # Group, ex.: Focus Groups; jury; teams
# 'T102', # Group Attribute, ex.: Family Size; Group Structure; Life Expectancy
'T131', # Hazardous or Poisonous Substance, ex.: Carcinogens; Fumigant; Mutagens
'T058', # Health Care Activity, ex.: ambulatory care services; Clinic Activities; Preventive Health Services
# 'T093', # Health Care Related Organization, ex.: Centers for Disease Control and Prevention (U.S.); Halfway Houses;
# 'T125', # Hormone, ex.: Enteric Hormones; thymic humoral factor; Prohormone
# 'T016', # Human, ex.: Homo sapiens; jean piaget; Member of public
'T068', # Human-caused Phenomenon or Process, ex.: Baby Boom; Cultural Evolution; Mass Media
# 'T078', # Idea or Concept, ex.: Capitalism; Civil Rights; Ethics
# 'T129', # Immunologic Factor, ex.: Antigens; Immunologic Factors; Blood group antigen P
'T130', # Indicator, Reagent, or Diagnostic Aid, ex.: Fluorescent Dyes; Indicators and Reagents; India ink stain
'T055', # Individual Behavior, ex.: Assertiveness; Grooming; Risk-Taking
'T037', # Injury or Poisoning, ex.: Accidental Falls; Carbon Monoxide Poisoning; Snake Bites
'T197', # Inorganic Chemical, ex.: Carbonic Acid; aluminum nitride; ferric citrate
'T170', # Intellectual Product, ex.: Decision Support Techniques; Information Systems; Literature
'T034', # Laboratory or Test Result, ex.: Blood Flow Velocity; Serum Calcium Level; Spinal Fluid Pressure
'T059', # Laboratory Procedure, ex.: Blood Protein Electrophoresis; Crystallography; Radioimmunoassay
'T171', # Language, ex.: Armenian language; braille; Bilingualism
'T066', # Machine Activity, ex.: Computer Simulation; Equipment Failure; Natural Language Processing
'T015', # Mammal, ex.: Ursidae Family; Hamsters; Macaca
'T073', # Manufactured Object, ex.: car seat; Cooking and Eating Utensils; Goggles
'T074', # Medical Device, ex.: Bone Screws; Headgear, Orthodontic; Compression Stockings
'T048', # Mental or Behavioral Dysfunction, ex.: Agoraphobia; Cyclothymic Disorder; Frigidity
# 'T041', # Mental Process, ex.: Anger; Auditory Fatigue; Avoidance Learning
'T063', # Molecular Biology Research Technique, ex.: Northern Blotting; Genetic Engineering; In Situ Hybridization
'T044', # Molecular Function, ex.: Binding, Competitive; Electron Transport; Glycolysis
'T085', # Molecular Sequence, ex.: Genetic Code; Homologous Sequences; Molecular Sequence
'T070', # Natural Phenomenon or Process, ex.: Air Movements; Corrosion; Lightning (phenomenon)
'T191', # Neoplastic Process, ex.: Abdominal Neoplasms; Bowen's Disease; Polyp in nasopharynx
'T114', # Nucleic Acid, Nucleoside, or Nucleotide, ex.: Cytosine Nucleotides; Guanine; Oligonucleotides
'T086', # Nucleotide Sequence, ex.: Base Sequence; Direct Repeat; RNA Sequence
'T090', # Occupation or Discipline, ex.: Aviation; Craniology; Ecology
'T057', # Occupational Activity, ex.: Collective Bargaining; Commerce; Containment of Biohazards
'T042', # Organ or Tissue Function, ex.: Osteogenesis; Renal Circulation; Tooth Calcification
'T109', # Organic Chemical, ex.: Benzene Derivatives
'T001', # Organism, ex.: Organism; Infectious agent; Heterotroph
# 'T032', # Organism Attribute, ex.: Age; Birth Weight; Eye Color
'T040', # Organism Function, ex.: Breeding; Hibernation; Motor Skills
# 'T092', # Organization, ex.: Labor Unions; United Nations; Boarding school
'T046', # Pathologic Function, ex.: Inflammation; Shock; Thrombosis
# 'T101', # Patient or Disabled Group, ex.: Amputees; Institutionalized Child; Mentally Ill Persons
'T121', # Pharmacologic Substance, ex.: Antiemetics; Cardiovascular Agents; Alka-Seltzer
'T067', # Phenomenon or Process, ex.: Disasters; Motor Traffic Accidents; Depolymerization
# 'T072', # Physical Object, ex.: Printed Media; Meteors; Physical object
'T039', # Physiologic Function, ex.: Biorhythms; Hearing; Vasodilation
# 'T002', # Plant, ex.: Aloe; Pollen; Helianthus species
# 'T098', # Population Group, ex.: Asian Americans; Ethnic group; Adult Offenders
# 'T097', # Professional or Occupational Group, ex.: Clergy; Demographers; Hospital Volunteers
# 'T094', # Professional Society, ex.: American Medical Association; International Council of Nurses; Library
# 'T080', # Qualitative Concept, ex.: Clinical Competence; Consumer Satisfaction; Health Status
# 'T081', # Quantitative Concept, ex.: Age Distribution; Metric System; Selection Bias
# 'T192', # Receptor, ex.: Binding Sites; Lymphocyte antigen CD4 receptor; integrin alpha11beta1
'T089', # Regulation or Law, ex.: Building Codes; Criminal Law; Health Planning Guidelines
# 'T014', # Reptile, ex.: Alligators; Water Mocassin; Genus Python (organism)
# 'T062', # Research Activity, ex.: Animal Experimentation; Biomedical Research; Experimental Replication
# 'T075', # Research Device, ex.: Electrodes, Enzyme; DNA Microarray Chip; Particle Count and Size Analyzer
# 'T095', # Self-help or Relief Organization, ex.: Alcoholics Anonymous; Charities - organization; Red Cross
'T184', # Sign or Symptom, ex.: Dyspnea; Nausea; Pain
# 'T054', # Social Behavior, ex.: Acculturation; Communication; Interpersonal Relations
# 'T082', # Spatial Concept, ex.: Mandibular Rest Position; Lateral; Extrinsic
'T167', # Substance, ex.: Air (substance); Fossils; Plastics
# 'T079', # Temporal Concept, ex.: Birth Intervals; Half-Life; Postoperative Period
'T061', # Therapeutic or Preventive Procedure, ex.: Cesarean section; Dermabrasion; Family psychotherapy
'T024', # Tissue, ex.: Cartilage; Endothelium; Epidermis
'T010', # Vertebrate, ex.: Vertebrates; Gnathostomata vertebrate; Craniata <chordata>
'T005', # Virus, ex.: Coliphages; Echovirus; Parvoviridae
'T127' # Vitamin, ex.: 5,25-Dihydroxy cholecalciferol; alpha-tocopheryl oxalate; Vitamin A [EPC]
} |
py | b4133256f8ee472601cd212ce75405d08e94afaf | from modules.utilities import check_dir
from shutil import copytree, copy2, make_archive
from glob import glob
project_dir = '/home/dakorda/Python/NN/'
backup_dir_pref = '/home/dakorda/Python/NN/backup/'
def make_backup(version: str) -> None:
# definition of backup dirs and creating them
backup_dir = "".join((backup_dir_pref, version))
backup_dir_modules = "".join((backup_dir, '/modules/'))
backup_dir_models = "".join((backup_dir, '/Models/'))
backup_dir_datasets = "".join((backup_dir, '/Datasets/'))
backup_dir_range_test = "".join((backup_dir, '/range_test_data/'))
check_dir(backup_dir_modules)
# check_dir(backup_dir_models) # copytree creates the folder
# check_dir(backup_dir_datasets) # copytree creates the folder
# check_dir(backup_dir_range_test) # copytree creates the folder
# copy main* and make_backup
source = "".join((project_dir, '/ma*.py'))
for file in glob(source):
copy2(file, backup_dir)
# copy modules
source = "".join((project_dir, '/modules/*.py'))
for file in glob(source):
copy2(file, backup_dir_modules)
# copy models
source = "".join((project_dir, '/Models/'))
copytree(source, backup_dir_models)
# copy datasets
source = "".join((project_dir, '/Datasets/'))
copytree(source, backup_dir_datasets)
source = "".join((project_dir, '/range_test_data/'))
copytree(source, backup_dir_range_test)
# zip the folder
make_archive(backup_dir, 'zip', backup_dir)
if __name__ == '__main__':
make_backup('v1.1')
|
py | b41332e661db0d8b175c89d3a0dc62400e673e45 | from pathlib import Path
from PIL import Image
import numpy as np
# Constants
TRAINING_PATH = "/Users/gantlaborde/Downloads/rps"
SPRITE_SIZE = 64
# Initialization
x_data = []
y_data = []
final_image = np.array([])
y_offset = 0
new_im = Image.new('RGB', (SPRITE_SIZE*SPRITE_SIZE, 2520))
# Load the training sprite by looping over every image file
for image_file in Path(TRAINING_PATH).glob("**/*.png"):
# Load the current image file
src_image = Image.open(image_file)
# make it smaller
downsized = src_image.resize((SPRITE_SIZE,SPRITE_SIZE))
# get 1px high version
pixels = list(downsized.getdata())
smoosh = Image.new('RGB', (SPRITE_SIZE * SPRITE_SIZE, 1))
smoosh.putdata(pixels)
# store image
x_data.append(smoosh)
# Use image path to build our answer key
if "rock" in image_file.stem:
y_data.append(1)
elif "paper" in image_file.stem:
y_data.append(2)
else:
y_data.append(3)
# Now randomize X and Y the same way before making data
# (the JS code splits then randomizes) DERP!!!
assert len(y_data) == len(x_data)
p = np.random.permutation(len(y_data))
npy = np.array(y_data)
shuffled_y = npy[p].tolist()
one_hot_y = []
# Build the data image and 1-hot encoded answer array
for idx in p:
# build master sprite 1 pixel down at a time
new_im.paste(x_data[idx], (0, y_offset))
# build 1-hot encoded answer key
if shuffled_y[y_offset] == 1:
one_hot_y.append(1)
one_hot_y.append(0)
one_hot_y.append(0)
elif shuffled_y[y_offset] == 2:
one_hot_y.append(0)
one_hot_y.append(1)
one_hot_y.append(0)
else:
one_hot_y.append(0)
one_hot_y.append(0)
one_hot_y.append(1)
# NEEEEXXXXXXT
y_offset += 1
# Save answers file (Y)
newFile = open("labels_uint8", "wb")
newFileByteArray = bytearray(one_hot_y)
bytesWritte = newFile.write(newFileByteArray)
# should be num classes * original answer key size
assert bytesWritte == (3 * len(y_data))
# Save Data Sprite (X)
new_im.save('data.png')
# Good ol Debugging Stuff
# new_im.show()
# print(str(shuffled_y))
# print(str(one_hot_y)) |
py | b413351405738f763ba947e6383f9a6174f43ba5 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bytcoyn Core Developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it
into sending us something it shouldn't.
Also test that nodes that send unsupported service bits to arcanad are disconnected
and don't receive a VERACK. Unsupported service bits are currently 1 << 5 and
1 << 7 (until August 1st 2018)."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self):
self.ever_connected = True
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_reject(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_feefilter(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if arcanad ban behavior changes
def on_open(self):
super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, message): pass
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
self.nodes[0].setmocktime(1501545600) # August 1st 2017
no_version_bannode = self.nodes[0].add_p2p_connection(CNodeNoVersionBan(), send_version=False)
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle())
unsupported_service_bit5_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
unsupported_service_bit7_node = self.nodes[0].add_p2p_connection(CLazyNode(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
network_thread_start()
wait_until(lambda: no_version_bannode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_version_idlenode.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: no_verack_idlenode.version_received, timeout=10, lock=mininode_lock)
wait_until(lambda: unsupported_service_bit5_node.ever_connected, timeout=10, lock=mininode_lock)
wait_until(lambda: unsupported_service_bit7_node.ever_connected, timeout=10, lock=mininode_lock)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert no_version_bannode.state != "connected"
# These nodes should have been disconnected
assert unsupported_service_bit5_node.state != "connected"
assert unsupported_service_bit7_node.state != "connected"
self.nodes[0].disconnect_p2ps()
# Wait until all connections are closed and the network thread has terminated
wait_until(lambda: len(self.nodes[0].getpeerinfo()) == 0)
network_thread_join()
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
assert not unsupported_service_bit5_node.unexpected_msg
assert not unsupported_service_bit7_node.unexpected_msg
self.log.info("Service bits 5 and 7 are allowed after August 1st 2018")
self.nodes[0].setmocktime(1533168000) # August 2nd 2018
allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
# Network thread stopped when all previous P2PInterfaces disconnected. Restart it
network_thread_start()
wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock)
wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock)
if __name__ == '__main__':
P2PLeakTest().main()
|
py | b41336b3f4a190b1e4e207fc024b3a40f6cde310 | # -*- coding: utf-8 -*-
from django.contrib import admin
from models import *
class LinksListAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'nb_links')
class LinkAdmin(admin.ModelAdmin):
list_display = ('text', 'target', 'plugin', 'order')
list_filter = ('plugin',)
ordering = ('text',)
admin.site.register(Plugin_LinksList, LinksListAdmin)
admin.site.register(Link, LinkAdmin)
|
py | b41337b9f4957c73fd5e4b6d3bbcd90372c90d80 | from .hello_world import hello_world
from .load_fannie_dataframes import load_dataframes_fannie
|
py | b41337f69d8bef08b571a40cadf79582f59cc9c7 | #!/usr/bin/python
# Classification (U)
"""Program: list_nodes.py
Description: Integration testing of list_nodes in elastic_db_admin.py.
Usage:
test/integration/elastic_db_admin/list_nodes.py
Arguments:
"""
# Libraries and Global Variables
# Standard
import sys
import os
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
# Third-party
# Local
sys.path.append(os.getcwd())
import elastic_db_admin
import lib.gen_libs as gen_libs
import elastic_lib.elastic_class as elastic_class
import version
__version__ = version.__version__
class UnitTest(unittest.TestCase):
"""Class: UnitTest
Description: Class which is a representation of a unit testing.
Methods:
setUp
test_list_nodes
"""
def setUp(self):
"""Function: setUp
Description: Initialization for unit testing.
Arguments:
"""
self.base_dir = "test/integration/elastic_db_admin"
self.test_path = os.path.join(os.getcwd(), self.base_dir)
self.config_path = os.path.join(self.test_path, "config")
self.cfg = gen_libs.load_module("elastic", self.config_path)
self.user = self.cfg.user if hasattr(self.cfg, "user") else None
self.japd = self.cfg.japd if hasattr(self.cfg, "japd") else None
self.ca_cert = self.cfg.ssl_client_ca if hasattr(
self.cfg, "ssl_client_ca") else None
self.scheme = self.cfg.scheme if hasattr(
self.cfg, "scheme") else "https"
self.els = elastic_class.ElasticSearchStatus(
self.cfg.host, port=self.cfg.port, user=self.user, japd=self.japd,
ca_cert=self.ca_cert, scheme=self.scheme)
self.els.connect()
def test_list_nodes(self):
"""Function: test_list_nodes
Description: Test list_nodes function.
Arguments:
"""
with gen_libs.no_std_out():
self.assertFalse(elastic_db_admin.list_nodes(self.els))
if __name__ == "__main__":
unittest.main()
|
py | b413380d63fb95664ccbb0facb05276f4bd047f5 | #===============================================================================
# Copyright 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
import pytest
import numpy as np
from numpy.testing import assert_allclose
from onedal.primitives import (linear_kernel, rbf_kernel,
poly_kernel, sigmoid_kernel)
from sklearn.metrics.pairwise import rbf_kernel as sklearn_rbf_kernel
from onedal.tests.utils._device_selection import (get_queues,
pass_if_not_implemented_for_gpu)
# TODO: investigate sporadic failures on GPU
@pytest.mark.parametrize('queue', get_queues('host,cpu'))
def test_dense_self_linear_kernel(queue):
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((10, 4)))
result = linear_kernel(X, queue=queue)
expected = np.dot(X, np.array(X).T)
assert_allclose(result, expected, rtol=1e-15)
def _test_dense_small_linear_kernel(queue, scale, shift, dtype):
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((10, 4)), dtype=dtype)
Y = np.array(5 * rng.random_sample((15, 4)), dtype=dtype)
result = linear_kernel(X, Y, scale=scale, shift=shift, queue=queue)
expected = np.dot(X, np.array(Y).T) * scale + shift
tol = 1e-14 if dtype == np.float64 else 1e-6
assert_allclose(result, expected, rtol=tol)
# TODO: investigate sporadic failures on GPU
@pytest.mark.parametrize('queue', get_queues('host,cpu'))
@pytest.mark.parametrize('scale', [1.0, 2.0])
@pytest.mark.parametrize('shift', [0.0, 1.0])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_dense_small_linear_kernel(queue, scale, shift, dtype):
_test_dense_small_linear_kernel(queue, scale, shift, dtype)
@pytest.mark.parametrize('queue', get_queues())
def test_dense_self_rbf_kernel(queue):
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((10, 4)))
result = rbf_kernel(X, queue=queue)
expected = sklearn_rbf_kernel(X)
assert_allclose(result, expected, rtol=1e-14)
def _test_dense_small_rbf_kernel(queue, gamma, dtype):
rng = np.random.RandomState(0)
X = np.array(5 * rng.random_sample((10, 4)), dtype=dtype)
Y = np.array(5 * rng.random_sample((15, 4)), dtype=dtype)
result = rbf_kernel(X, Y, gamma=gamma, queue=queue)
expected = sklearn_rbf_kernel(X, Y, gamma)
tol = 1e-14 if dtype == np.float64 else 1e-5
assert_allclose(result, expected, rtol=tol)
@pytest.mark.parametrize('gamma', [0.1, None])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
@pytest.mark.parametrize('queue', get_queues())
def test_dense_small_rbf_kernel(queue, gamma, dtype):
_test_dense_small_rbf_kernel(queue, gamma, dtype)
@pass_if_not_implemented_for_gpu(reason="poly kernel is not implemented")
@pytest.mark.parametrize('queue', get_queues())
def test_dense_self_poly_kernel(queue):
rng = np.random.RandomState(0)
X = np.array(2 * rng.random_sample((10, 4)))
degree = 2
result = poly_kernel(X, degree=degree, queue=queue)
expected = np.dot(X, np.array(X).T) ** degree
assert_allclose(result, expected, rtol=1e-14)
def _test_dense_small_poly_kernel(queue, gamma, coef0, degree, dtype):
rng = np.random.RandomState(0)
X = np.array(2 * rng.random_sample((10, 4)), dtype=dtype)
Y = np.array(2 * rng.random_sample((15, 4)), dtype=dtype)
result = poly_kernel(X, Y, gamma=gamma, coef0=coef0, degree=degree, queue=queue)
expected = (gamma * np.dot(X, np.array(Y).T) + coef0) ** degree
tol = 1e-14 if dtype == np.float64 else 1e-5
assert_allclose(result, expected, rtol=tol)
@pass_if_not_implemented_for_gpu(reason="poly kernel is not implemented")
@pytest.mark.parametrize('queue', get_queues())
@pytest.mark.parametrize('gamma', [0.1, 1.0])
@pytest.mark.parametrize('coef0', [0.0, 1.0])
@pytest.mark.parametrize('degree', [2, 3])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_dense_small_poly_kernel(queue, gamma, coef0, degree, dtype):
_test_dense_small_poly_kernel(queue, gamma, coef0, degree, dtype)
@pass_if_not_implemented_for_gpu(reason="sigmoid kernel is not implemented")
@pytest.mark.parametrize('queue', get_queues())
def test_dense_self_sigmoid_kernel(queue):
rng = np.random.RandomState(0)
X = np.array(2 * rng.random_sample((15, 4)))
result = sigmoid_kernel(X, queue=queue)
expected = np.tanh(np.dot(X, np.array(X).T))
assert_allclose(result, expected)
def _test_dense_small_sigmoid_kernel(queue, gamma, coef0, dtype):
rng = np.random.RandomState(0)
X = np.array(2 * rng.random_sample((10, 4)), dtype=dtype)
Y = np.array(2 * rng.random_sample((15, 4)), dtype=dtype)
result = sigmoid_kernel(X, Y, gamma=gamma, coef0=coef0, queue=queue)
expected = np.tanh(gamma * np.dot(X, np.array(Y).T) + coef0)
tol = 1e-14 if dtype == np.float64 else 1e-6
assert_allclose(result, expected, rtol=tol)
@pass_if_not_implemented_for_gpu(reason="sigmoid kernel is not implemented")
@pytest.mark.parametrize('queue', get_queues())
@pytest.mark.parametrize('gamma', [0.1, 1.0, 2.4])
@pytest.mark.parametrize('coef0', [0.0, 1.0, 5.5])
@pytest.mark.parametrize('dtype', [np.float32, np.float64])
def test_dense_small_sigmoid_kernel(queue, gamma, coef0, dtype):
_test_dense_small_sigmoid_kernel(queue, gamma, coef0, dtype)
|
py | b41339a362ad5297b4fd88c695dcc2e97568faf4 | ## some utils functions for scanpy
import os
import anndata
import numpy as np
import pandas as pd
import scanpy as sc
from scipy import io
from scipy.sparse import hstack
def adata_hstack(blocks, sample_ids=None, layer_keys=None):
if layer_keys is None:
layer_keys = blocks[0].layers.keys()
layers = {}
for _key in layer_keys:
layers[_key] = hstack([adata.layers[_key].T for adata in blocks]).T
if len(layer_keys) == 0:
layers = None
X_blocks = [adata.X.transpose() for adata in blocks]
obs_blocks = [adata.obs for adata in blocks]
new_X = hstack(X_blocks).transpose()
new_obs = pd.concat(obs_blocks, axis=0)
new_var = blocks[0].var
new_adata = anndata.AnnData(X=new_X, obs=new_obs, var=new_var,
layers=layers)
sample_ids_default = []
for i in range(len(blocks)):
sample_ids_default += ["S%d" %i] * blocks[i].shape[0]
if sample_ids is not None:
if len(sample_ids) != len(new_obs):
print("sample ids has different size to observations, change to default.")
sample_ids = sample_ids_default
else:
sample_ids = sample_ids_default
cell_ids = [
new_adata.obs.index.values[i] + ":" +
sample_ids[i] for i in range(len(sample_ids))]
new_adata.obs['cell_id'] = cell_ids
new_adata.obs['sample_id'] = sample_ids
return new_adata
def adata_preprocess(adata, min_cells=3, min_genes=500, max_genes=5000,
max_percent_mito=0.1):
## first filtering
sc.pp.filter_cells(adata, min_genes=min_genes)
print(adata.shape)
sc.pp.filter_genes(adata, min_cells=min_cells)
print(adata.shape)
## basic info
mito_genes = [name for name in adata.var_names if name.startswith('MT-')]
adata.obs['n_counts'] = np.sum(adata.X, axis=1).A1
adata.obs['n_genes'] = np.sum(adata.X>=1, axis=1).A1
adata.obs['n_mito'] = np.sum(adata[:, mito_genes].X, axis=1).A1
adata.obs['percent_mito'] = adata.obs['n_mito'] / adata.obs['n_counts']
## filter cells
adata = adata[adata.obs['n_genes'] < max_genes, :]
adata = adata[adata.obs['percent_mito'] < max_percent_mito, :]
## log transform
adata.raw = sc.pp.log1p(adata, copy=True)
sc.pp.normalize_per_cell(adata, counts_per_cell_after=1e4)
## filter genes
filter_result = sc.pp.filter_genes_dispersion(adata.X, min_mean=0.0125,
max_mean=3, min_disp=0.2)
adata = adata[:, filter_result.gene_subset]
## regress and scale
sc.pp.log1p(adata)
sc.pp.regress_out(adata, ['n_counts', 'percent_mito'])
sc.pp.scale(adata, max_value=10)
### PCA, t-SNE, and UMAP
sc.tl.pca(adata)
adata.obsm['X_pca'] *= -1 # multiply by -1 to match Seurat
sc.tl.tsne(adata, random_state=2, n_pcs=10)
sc.pp.neighbors(adata, n_neighbors=10)
sc.tl.umap(adata)
return adata
def load_10X(path, min_counts=None, min_cells=None, version3=False):
"""
Load 10X data from cellranger output matrix, into
scipy csr matrix, arrays for genes and cell barcodes
Filter cells by min_counts and filter genes by min_cells
"""
## load 10X matrix folder
if version3:
mat = io.mmread(path + "/matrix.mtx.gz").tocsr()
genes = np.genfromtxt(path + "/features.tsv.gz", dtype="str", delimiter="\t")
cells = np.genfromtxt(path + "/barcodes.tsv.gz", dtype="str", delimiter="\t")
else:
mat = io.mmread(path + "/matrix.mtx").tocsr()
genes = np.genfromtxt(path + "/genes.tsv", dtype="str", delimiter="\t")
cells = np.genfromtxt(path + "/barcodes.tsv", dtype="str", delimiter="\t")
## filter cells
if min_counts is not None and min_counts > 0:
n_counts = np.array(np.sum(mat, axis=0)).reshape(-1)
idx = n_counts >= min_counts
mat = mat[:, idx]
cells = cells[idx]
## filter genes
if min_cells is not None and min_cells > 0:
n_cells = np.array(np.sum(mat, axis=1)).reshape(-1)
idx = n_counts >= min_counts
mat = mat[idx, :]
genes = genes[idx, ]
return mat, genes, cells
def save_10X(path, mat, genes, barcodes, version3=False):
"""
Save 10X matrix, genes and cell barcodes into under the path.
"""
if not os.path.exists(path):
os.makedirs(path)
io.mmwrite(path + '/matrix.mtx', mat)
if version3:
fid = open(path + '/features.tsv', 'w')
else:
fid = open(path + '/genes.tsv', 'w')
for ii in range(genes.shape[0]):
fid.writelines("\t".join(genes[ii, :]) + "\n")
fid.close()
fid = open(path + '/barcodes.tsv', 'w')
for _cell in barcodes:
fid.writelines("%s\n" %(_cell))
fid.close()
if version3:
import subprocess
bashCommand = "gzip -f %s %s %s" %(path + '/matrix.mtx',
path + '/features.tsv',
path + '/barcodes.tsv')
pro = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
pro.communicate()[0]
def read_dropEst(path, cell_file = 'barcodes.tsv',
gene_file = 'genes.tsv',
layer_keys = ['exon', 'intron', 'spanning'],
layer_files = ['cell.counts.exon.mtx',
'cell.counts.intron.mtx',
'cell.counts.spanning.mtx'],
combine_unspliced = True):
"""
Load dropEst matrices produced by this script:
"""
## load 10X matrix folder
# genes = np.genfromtxt(path + "/" + gene_file, dtype="str", delimiter="\t")
# cells = np.genfromtxt(path + "/" + cell_file, dtype="str", delimiter="\t")
genes = pd.read_csv(path + "/" + gene_file, sep="\t", index_col=0, header=None)
cells = pd.read_csv(path + "/" + cell_file, sep="\t", index_col=0, header=None)
mat_list = []
for _mxt_file in layer_files:
mat_list.append(io.mmread(path + "/" + _mxt_file).tocsr().T)
if len(mat_list) == 0:
print('Error: requiring at least one matrix.')
return None
# change layer names
if combine_unspliced and len(mat_list) == 3:
mat_list[1] += mat_list[2]
mat_list = mat_list[:2]
layer_keys = ['spliced', 'unspliced']
if len(layer_keys) != len(mat_list):
print('Warning: len(layer_keys) != len(mat_list). Use index instead.')
layer_keys = ['matrix%d' %(x + 1) for x in range(len(mat_list))]
layers = {}
for i in range(len(mat_list)):
layers[layer_keys[i]] = mat_list[i]
X = mat_list[0].copy()
adata = sc.AnnData(X, obs=cells, var=genes, layers=layers)
return adata
|
py | b4133a8189cce00852f99ca81e57db76d4d5cf14 | from rest_framework import serializers
from .models import Post
from django.contrib.auth import get_user_model
class PostSerializer(serializers.ModelSerializer):
user = serializers.StringRelatedField(read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='posts:detail-post', read_only=True
)
class Meta:
model = Post
fields = ('url', 'id', 'title','user', 'created_at',)
class PostCreateSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = ('title', 'image', 'body')
class PostDetailSerializer(serializers.ModelSerializer):
class Meta:
model = Post
fields = '__all__' |
py | b4133a974f818e11b9cea8806addfce510a0ccce | import _plotly_utils.basevalidators
class ExponentformatValidator(
_plotly_utils.basevalidators.EnumeratedValidator
):
def __init__(
self,
plotly_name='exponentformat',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(ExponentformatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'style'),
values=kwargs.pop(
'values', ['none', 'e', 'E', 'power', 'SI', 'B']
),
**kwargs
)
|
py | b4133b1699662174d76514a7c3de19f6e0149c2c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
with open(os.path.join(package, "__init__.py")) as f:
return re.search("__version__ = ['\"]([^'\"]+)['\"]", f.read()).group(1)
def get_long_description():
"""
Return the README.
"""
with open("README.md", encoding="utf8") as f:
return f.read()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, "__init__.py"))
]
setup(
name="databases",
version=get_version("databases"),
python_requires='>=3.6',
url="https://github.com/encode/databases",
license="BSD",
description="Async database support for Python.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Tom Christie",
author_email="[email protected]",
packages=get_packages("databases"),
package_data={"databases": ["py.typed"]},
data_files=[("", ["LICENSE.md"])],
install_requires=['sqlalchemy>=1.4,<1.5', 'aiocontextvars;python_version<"3.7"'],
extras_require={
"postgresql": ["asyncpg"],
"mysql": ["aiomysql"],
"sqlite": ["aiosqlite"],
"postgresql+aiopg": ["aiopg"]
},
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
zip_safe=False,
)
|
py | b4133caf3fff9c5c680fb882274a5d83d2574b64 | # encoding: utf-8
# module torch._C
# from /Users/rook/anaconda/lib/python3.6/site-packages/torch/_C.cpython-36m-darwin.so
# by generator 1.145
# no doc
# imports
import torch._C._functions as _functions # <module 'torch._C._functions'>
from .object import object
class CudaCharStorageBase(object):
# no doc
def copy_(self, *args, **kwargs): # real signature unknown
pass
def data_ptr(self, *args, **kwargs): # real signature unknown
pass
def element_size(self, *args, **kwargs): # real signature unknown
pass
def fill_(self, *args, **kwargs): # real signature unknown
pass
def get_device(self, *args, **kwargs): # real signature unknown
pass
def is_pinned(self, *args, **kwargs): # real signature unknown
pass
def is_shared(self, *args, **kwargs): # real signature unknown
pass
def new(self, *args, **kwargs): # real signature unknown
pass
def resize_(self, *args, **kwargs): # real signature unknown
pass
def size(self, *args, **kwargs): # real signature unknown
pass
def _get_shared_fd(self, *args, **kwargs): # real signature unknown
pass
def _new_shared_cuda(self, *args, **kwargs): # real signature unknown
pass
def _new_view(self, *args, **kwargs): # real signature unknown
pass
def _new_with_file(self, *args, **kwargs): # real signature unknown
pass
@classmethod
def _new_with_weak_ptr(cls, *args, **kwargs): # real signature unknown
pass
def _root_storage(self, *args, **kwargs): # real signature unknown
pass
def _set_cdata(self, *args, **kwargs): # real signature unknown
pass
def _set_from_file(self, *args, **kwargs): # real signature unknown
pass
def _shared_decref(self, *args, **kwargs): # real signature unknown
pass
def _shared_incref(self, *args, **kwargs): # real signature unknown
pass
def _share_cuda_(self, *args, **kwargs): # real signature unknown
pass
def _weak_ref(self, *args, **kwargs): # real signature unknown
pass
def _write_file(self, *args, **kwargs): # real signature unknown
pass
def __delitem__(self, *args, **kwargs): # real signature unknown
""" Delete self[key]. """
pass
def __getitem__(self, *args, **kwargs): # real signature unknown
""" Return self[key]. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __len__(self, *args, **kwargs): # real signature unknown
""" Return len(self). """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __setitem__(self, *args, **kwargs): # real signature unknown
""" Set self[key] to value. """
pass
_cdata = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
|
py | b4133d95adcb8e9f3ca32243d42fc4562e53e12e | import time
import dataset
db = dataset.connect('sqlite:///record.db')
groups = db['groups']
times = db['times']
notes = db['notes']
def add_group(name, inactive=False):
# TODO: validate conditions: group does not exist, name is valid str
groups.insert(dict(name=name, inactive=False, punched_in=False))
def punch_in(group_id, in_time):
# TODO: validate conditions: group is punched out,
# no out times are None within group
db.begin()
times.insert(dict(group_id=group_id, in_time=in_time))
group = groups.find_one(id=group_id)
group['punched_in'] = True
groups.update(group, ['id'])
db.commit()
def punch_out(group_id, out_time):
# TODO: validate conditions: exactly one time in group is None,
# group is punched in
if not 'out_time' in times.columns:
times.create_column('out_time', db.types.float)
db.begin()
interval = times.find_one(group_id=group_id, out_time=None)
interval['out_time'] = out_time
times.update(interval, ['id'])
group = groups.find_one(id=group_id)
group['punched_in'] = False
groups.update(group, ['id'])
db.commit()
def toggle_punch(group_id, tog_time):
# TODO: validate conditions: group exists
punched_in = groups.find_one(id=group_id)['punched_in']
if punched_in:
punch_out(group_id, tog_time)
else:
punch_in(group_id, tog_time)
def add_note(group_id, note, note_time):
# TODO: validate group exists, note is valid str
notes.insert(dict(group_id=group_id, time=note_time, note=note))
|
py | b4133de35f6fb5cc2162f3de7875b06f982976ab | from collections import OrderedDict
import numpy as np
from gym.spaces import Dict , Box
from metaworld.envs.env_util import get_stat_in_paths, \
create_stats_ordered_dict, get_asset_full_path
from metaworld.core.multitask_env import MultitaskEnv
from metaworld.envs.mujoco.sawyer_xyz.base import SawyerXYZEnv
from metaworld.envs.mujoco.utils.rotation import euler2quat
from metaworld.envs.mujoco.sawyer_xyz.base import OBS_TYPE
class SawyerReachPushPickPlaceWallEnv(SawyerXYZEnv):
def __init__(
self,
random_init=False,
obs_type='plain',
task_types=['pick_place', 'reach', 'push'],
task_type='pick_place',
goal_low=(-0.05, 0.85, 0.05),
goal_high=(0.05, 0.9, 0.3),
liftThresh = 0.04,
sampleMode='equal',
rewMode = 'orig',
rotMode='fixed',
**kwargs
):
hand_low=(-0.5, 0.40, 0.05)
hand_high=(0.5, 1, 0.5)
obj_low=(-0.05, 0.6, 0.015)
obj_high=(0.05, 0.65, 0.015)
SawyerXYZEnv.__init__(
self,
frame_skip=5,
action_scale=1./100,
hand_low=hand_low,
hand_high=hand_high,
model_name=self.model_name,
**kwargs
)
self.task_type = task_type
self.init_config = {
'obj_init_angle': .3,
'obj_init_pos': np.array([0, 0.6, 0.02]),
'hand_init_pos': np.array([0, .6, .2]),
}
# we only do one task from [pick_place, reach, push]
# per instance of SawyerReachPushPickPlaceEnv.
# Please only set task_type from constructor.
if self.task_type == 'pick_place':
self.goal = np.array([0.05, 0.8, 0.2])
elif self.task_type == 'reach':
self.goal = np.array([-0.05, 0.8, 0.2])
elif self.task_type == 'push':
self.goal = np.array([0.05, 0.8, 0.015])
else:
raise NotImplementedError
self.obj_init_angle = self.init_config['obj_init_angle']
self.obj_init_pos = self.init_config['obj_init_pos']
self.hand_init_pos = self.init_config['hand_init_pos']
assert obs_type in OBS_TYPE
self.obs_type = obs_type
if goal_low is None:
goal_low = self.hand_low
if goal_high is None:
goal_high = self.hand_high
self.random_init = random_init
self.liftThresh = liftThresh
self.max_path_length = 150
self.rewMode = rewMode
self.rotMode = rotMode
self.sampleMode = sampleMode
self.task_types = task_types
if rotMode == 'fixed':
self.action_space = Box(
np.array([-1, -1, -1, -1]),
np.array([1, 1, 1, 1]),
)
elif rotMode == 'rotz':
self.action_rot_scale = 1./50
self.action_space = Box(
np.array([-1, -1, -1, -np.pi, -1]),
np.array([1, 1, 1, np.pi, 1]),
)
elif rotMode == 'quat':
self.action_space = Box(
np.array([-1, -1, -1, 0, -1, -1, -1, -1]),
np.array([1, 1, 1, 2*np.pi, 1, 1, 1, 1]),
)
else:
self.action_space = Box(
np.array([-1, -1, -1, -np.pi/2, -np.pi/2, 0, -1]),
np.array([1, 1, 1, np.pi/2, np.pi/2, np.pi*2, 1]),
)
self.obj_and_goal_space = Box(
np.hstack((obj_low, goal_low)),
np.hstack((obj_high, goal_high)),
)
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
if self.obs_type == 'plain':
self.observation_space = Box(
np.hstack((self.hand_low, obj_low,)),
np.hstack((self.hand_high, obj_high,)),
)
elif self.obs_type == 'with_goal':
self.observation_space = Box(
np.hstack((self.hand_low, obj_low, goal_low)),
np.hstack((self.hand_high, obj_high, goal_high)),
)
else:
raise NotImplementedError
self.num_resets = 0
self.reset()
def get_goal(self):
return {
'state_desired_goal': self._state_goal,
}
@property
def model_name(self):
return get_asset_full_path('sawyer_xyz/sawyer_reach_push_pick_and_place_wall.xml')
def step(self, action):
if self.rotMode == 'euler':
action_ = np.zeros(7)
action_[:3] = action[:3]
action_[3:] = euler2quat(action[3:6])
self.set_xyz_action_rot(action_)
elif self.rotMode == 'fixed':
self.set_xyz_action(action[:3])
elif self.rotMode == 'rotz':
self.set_xyz_action_rotz(action[:4])
else:
self.set_xyz_action_rot(action[:7])
self.do_simulation([action[-1], -action[-1]])
# The marker seems to get reset every time you do a simulation
self._set_goal_marker(self._state_goal)
ob = self._get_obs()
obs_dict = self._get_obs_dict()
reward , reachRew, reachDist, pushRew, pushDist, pickRew, placeRew , placingDist = self.compute_reward(action, obs_dict, mode=self.rewMode, task_type=self.task_type)
self.curr_path_length +=1
goal_dist = placingDist if self.task_type == 'pick_place' else pushDist
if self.task_type == 'reach':
success = float(reachDist <= 0.05)
else:
success = float(goal_dist <= 0.07)
info = {'reachDist': reachDist, 'pickRew':pickRew, 'epRew' : reward, 'goalDist': goal_dist, 'success': success}
info['goal'] = self.goal
return ob, reward, False, info
def _get_obs(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate((hand, objPos))
if self.obs_type == 'with_goal_and_id':
return np.concatenate([
flat_obs,
self._state_goal,
self._state_goal_idx
])
elif self.obs_type == 'with_goal':
return np.concatenate([
flat_obs,
self._state_goal
])
elif self.obs_type == 'plain':
return np.concatenate([flat_obs,]) # TODO ZP do we need the concat?
else:
return np.concatenate([flat_obs, self._state_goal_idx])
def _get_obs_dict(self):
hand = self.get_endeff_pos()
objPos = self.data.get_geom_xpos('objGeom')
flat_obs = np.concatenate((hand, objPos))
return dict(
state_observation=flat_obs,
state_desired_goal=self._state_goal,
state_achieved_goal=objPos,
)
def _get_info(self):
pass
def _set_goal_marker(self, goal):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
self.data.site_xpos[self.model.site_name2id('goal_{}'.format(self.task_type))] = (
goal[:3]
)
for task_type in self.task_types:
if task_type != self.task_type:
self.data.site_xpos[self.model.site_name2id('goal_{}'.format(task_type))] = (
np.array([10.0, 10.0, 10.0])
)
def _set_objCOM_marker(self):
"""
This should be use ONLY for visualization. Use self._state_goal for
logging, learning, etc.
"""
objPos = self.data.get_geom_xpos('objGeom')
self.data.site_xpos[self.model.site_name2id('objSite')] = (
objPos
)
def _set_obj_xyz(self, pos):
qpos = self.data.qpos.flat.copy()
qvel = self.data.qvel.flat.copy()
qpos[9:12] = pos.copy()
qvel[9:15] = 0
self.set_state(qpos, qvel)
def adjust_initObjPos(self, orig_init_pos):
#This is to account for meshes for the geom and object are not aligned
#If this is not done, the object could be initialized in an extreme position
diff = self.get_body_com('obj')[:2] - self.data.get_geom_xpos('objGeom')[:2]
adjustedPos = orig_init_pos[:2] + diff
#The convention we follow is that body_com[2] is always 0, and geom_pos[2] is the object height
return [adjustedPos[0], adjustedPos[1],self.data.get_geom_xpos('objGeom')[-1]]
def reset_model(self):
self._reset_hand()
self._state_goal = self.goal.copy()
self.obj_init_pos = self.adjust_initObjPos(self.init_config['obj_init_pos'])
self.obj_init_angle = self.init_config['obj_init_angle']
self.objHeight = self.data.get_geom_xpos('objGeom')[2]
self.heightTarget = self.objHeight + self.liftThresh
if self.random_init:
goal_pos = np.random.uniform(
self.obj_and_goal_space.low,
self.obj_and_goal_space.high,
size=(self.obj_and_goal_space.low.size),
)
self._state_goal = goal_pos[3:]
while np.linalg.norm(goal_pos[:2] - self._state_goal[:2]) < 0.15:
goal_pos = np.random.uniform(
self.obj_and_goal_space.low,
self.obj_and_goal_space.high,
size=(self.obj_and_goal_space.low.size),
)
self._state_goal = goal_pos[3:]
if self.task_type == 'push':
self._state_goal = np.concatenate((goal_pos[-3:-1], [self.obj_init_pos[-1]]))
self.obj_init_pos = np.concatenate((goal_pos[:2], [self.obj_init_pos[-1]]))
else:
self._state_goal = goal_pos[-3:]
self.obj_init_pos = goal_pos[:3]
self._set_goal_marker(self._state_goal)
self._set_obj_xyz(self.obj_init_pos)
self.curr_path_length = 0
self.maxReachDist = np.linalg.norm(self.init_fingerCOM - np.array(self._state_goal))
self.maxPushDist = np.linalg.norm(self.obj_init_pos[:2] - np.array(self._state_goal)[:2])
self.maxPlacingDist = np.linalg.norm(np.array([self.obj_init_pos[0], self.obj_init_pos[1], self.heightTarget]) - np.array(self._state_goal)) + self.heightTarget
self.target_rewards = [1000*self.maxPlacingDist + 1000*2, 1000*self.maxReachDist + 1000*2, 1000*self.maxPushDist + 1000*2]
if self.task_type == 'pick_place':
idx = 0
elif self.task_type == 'reach':
idx = 1
else:
idx = 2
self.target_reward = self.target_rewards[idx]
self.num_resets += 1
return self._get_obs()
def _reset_hand(self):
for _ in range(10):
self.data.set_mocap_pos('mocap', self.hand_init_pos)
self.data.set_mocap_quat('mocap', np.array([1, 0, 1, 0]))
self.do_simulation([-1,1], self.frame_skip)
#self.do_simulation(None, self.frame_skip)
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
self.init_fingerCOM = (rightFinger + leftFinger)/2
self.pickCompleted = False
def get_site_pos(self, siteName):
_id = self.model.site_names.index(siteName)
return self.data.site_xpos[_id].copy()
def compute_rewards(self, actions, obsBatch):
#Required by HER-TD3
assert isinstance(obsBatch, dict) == True
obsList = obsBatch['state_observation']
rewards = [self.compute_reward(action, obs, task_type=self.task_type)[0] for action, obs in zip(actions, obsList)]
return np.array(rewards)
def compute_reward(self, actions, obs, mode = 'general', task_type='reach'):
if isinstance(obs, dict):
obs = obs['state_observation']
objPos = obs[3:6]
rightFinger, leftFinger = self.get_site_pos('rightEndEffector'), self.get_site_pos('leftEndEffector')
fingerCOM = (rightFinger + leftFinger)/2
heightTarget = self.heightTarget
goal = self._state_goal
def compute_reward_reach(actions, obs, mode):
c1 = 1000 ; c2 = 0.01 ; c3 = 0.001
reachDist = np.linalg.norm(fingerCOM - goal)
# reachRew = -reachDist
# if reachDist < 0.1:
# reachNearRew = 1000*(self.maxReachDist - reachDist) + c1*(np.exp(-(reachDist**2)/c2) + np.exp(-(reachDist**2)/c3))
# else:
# reachNearRew = 0.
reachRew = c1*(self.maxReachDist - reachDist) + c1*(np.exp(-(reachDist**2)/c2) + np.exp(-(reachDist**2)/c3))
reachRew = max(reachRew, 0)
# reachNearRew = max(reachNearRew,0)
# reachRew = -reachDist
reward = reachRew# + reachNearRew
return [reward, reachRew, reachDist, None, None, None, None, None]
def compute_reward_push(actions, obs, mode):
c1 = 1000 ; c2 = 0.01 ; c3 = 0.001
assert np.all(goal == self.get_site_pos('goal_push'))
reachDist = np.linalg.norm(fingerCOM - objPos)
pushDist = np.linalg.norm(objPos[:2] - goal[:2])
reachRew = -reachDist
if reachDist < 0.05:
# pushRew = -pushDist
pushRew = 1000*(self.maxPushDist - pushDist) + c1*(np.exp(-(pushDist**2)/c2) + np.exp(-(pushDist**2)/c3))
pushRew = max(pushRew, 0)
else:
pushRew = 0
reward = reachRew + pushRew
return [reward, reachRew, reachDist, pushRew, pushDist, None, None, None]
def compute_reward_pick_place(actions, obs, mode):
reachDist = np.linalg.norm(objPos - fingerCOM)
placingDist = np.linalg.norm(objPos - goal)
assert np.all(goal == self.get_site_pos('goal_pick_place'))
def reachReward():
reachRew = -reachDist# + min(actions[-1], -1)/50
reachDistxy = np.linalg.norm(objPos[:-1] - fingerCOM[:-1])
zRew = np.linalg.norm(fingerCOM[-1] - self.init_fingerCOM[-1])
if reachDistxy < 0.05: #0.02
reachRew = -reachDist
else:
reachRew = -reachDistxy - 2*zRew
#incentive to close fingers when reachDist is small
if reachDist < 0.05:
reachRew = -reachDist + max(actions[-1],0)/50
return reachRew , reachDist
def pickCompletionCriteria():
tolerance = 0.01
if objPos[2] >= (heightTarget- tolerance):
return True
else:
return False
if pickCompletionCriteria():
self.pickCompleted = True
def objDropped():
return (objPos[2] < (self.objHeight + 0.005)) and (placingDist >0.02) and (reachDist > 0.02)
# Object on the ground, far away from the goal, and from the gripper
#Can tweak the margin limits
def objGrasped(thresh = 0):
sensorData = self.data.sensordata
return (sensorData[0]>thresh) and (sensorData[1]> thresh)
def orig_pickReward():
# hScale = 50
hScale = 100
# hScale = 1000
if self.pickCompleted and not(objDropped()):
return hScale*heightTarget
# elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
elif (reachDist < 0.1) and (objPos[2]> (self.objHeight + 0.005)) :
return hScale* min(heightTarget, objPos[2])
else:
return 0
def general_pickReward():
hScale = 50
if self.pickCompleted and objGrasped():
return hScale*heightTarget
elif objGrasped() and (objPos[2]> (self.objHeight + 0.005)):
return hScale* min(heightTarget, objPos[2])
else:
return 0
def placeReward():
# c1 = 1000 ; c2 = 0.03 ; c3 = 0.003
c1 = 1000 ; c2 = 0.01 ; c3 = 0.001
if mode == 'general':
cond = self.pickCompleted and objGrasped()
else:
cond = self.pickCompleted and (reachDist < 0.1) and not(objDropped())
if cond:
placeRew = 1000*(self.maxPlacingDist - placingDist) + c1*(np.exp(-(placingDist**2)/c2) + np.exp(-(placingDist**2)/c3))
placeRew = max(placeRew,0)
return [placeRew , placingDist]
else:
return [0 , placingDist]
reachRew, reachDist = reachReward()
if mode == 'general':
pickRew = general_pickReward()
else:
pickRew = orig_pickReward()
placeRew , placingDist = placeReward()
assert ((placeRew >=0) and (pickRew>=0))
reward = reachRew + pickRew + placeRew
return [reward, reachRew, reachDist, None, None, pickRew, placeRew, placingDist]
if task_type == 'reach':
return compute_reward_reach(actions, obs, mode)
elif task_type == 'push':
return compute_reward_push(actions, obs, mode)
else:
return compute_reward_pick_place(actions, obs, mode)
def get_diagnostics(self, paths, prefix=''):
statistics = OrderedDict()
return statistics
def log_diagnostics(self, paths = None, logger = None):
pass
|
py | b4133e7550d985c4a11ccf0ef0f0e925a108a57f | """Monitor the NZBGet API."""
from datetime import timedelta
import logging
from typing import Callable, List, Optional
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
DATA_MEGABYTES,
DATA_RATE_MEGABYTES_PER_SECOND,
DEVICE_CLASS_TIMESTAMP,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from . import NZBGetEntity
from .const import DATA_COORDINATOR, DOMAIN
from .coordinator import NZBGetDataUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {
"article_cache": ["ArticleCacheMB", "Article Cache", DATA_MEGABYTES],
"average_download_rate": [
"AverageDownloadRate",
"Average Speed",
DATA_RATE_MEGABYTES_PER_SECOND,
],
"download_paused": ["DownloadPaused", "Download Paused", None],
"download_rate": ["DownloadRate", "Speed", DATA_RATE_MEGABYTES_PER_SECOND],
"download_size": ["DownloadedSizeMB", "Size", DATA_MEGABYTES],
"free_disk_space": ["FreeDiskSpaceMB", "Disk Free", DATA_MEGABYTES],
"post_job_count": ["PostJobCount", "Post Processing Jobs", "Jobs"],
"post_paused": ["PostPaused", "Post Processing Paused", None],
"remaining_size": ["RemainingSizeMB", "Queue Size", DATA_MEGABYTES],
"uptime": ["UpTimeSec", "Uptime", None],
}
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up NZBGet sensor based on a config entry."""
coordinator: NZBGetDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
sensors = []
for sensor_config in SENSOR_TYPES.values():
sensors.append(
NZBGetSensor(
coordinator,
entry.entry_id,
entry.data[CONF_NAME],
sensor_config[0],
sensor_config[1],
sensor_config[2],
)
)
async_add_entities(sensors)
class NZBGetSensor(NZBGetEntity):
"""Representation of a NZBGet sensor."""
def __init__(
self,
coordinator: NZBGetDataUpdateCoordinator,
entry_id: str,
entry_name: str,
sensor_type: str,
sensor_name: str,
unit_of_measurement: Optional[str] = None,
):
"""Initialize a new NZBGet sensor."""
self._sensor_type = sensor_type
self._unique_id = f"{entry_id}_{sensor_type}"
self._unit_of_measurement = unit_of_measurement
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
name=f"{entry_name} {sensor_name}",
)
@property
def device_class(self):
"""Return the device class."""
if "UpTimeSec" in self._sensor_type:
return DEVICE_CLASS_TIMESTAMP
return None
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def unit_of_measurement(self) -> str:
"""Return the unit that the state of sensor is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the sensor."""
value = self.coordinator.data["status"].get(self._sensor_type)
if value is None:
_LOGGER.warning("Unable to locate value for %s", self._sensor_type)
return None
if "DownloadRate" in self._sensor_type and value > 0:
# Convert download rate from Bytes/s to MBytes/s
return round(value / 2 ** 20, 2)
if "UpTimeSec" in self._sensor_type and value > 0:
uptime = utcnow() - timedelta(seconds=value)
return uptime.replace(microsecond=0).isoformat()
return value
|
py | b4133efc910736c49f51c3cfcdeb02cca259b8b1 | from hachiko.hachiko import AIOEventHandler
from os.path import join, isdir
from pathlib import Path
from os import remove, stat, chown
import stat as lib_stat
from shutil import copy2, move, rmtree
import logging
class PlexLocalFileBackupHandler(AIOEventHandler):
def __init__(self, plex_local_path, backup_path, name):
super().__init__()
self._logger = logging.getLogger(name)
self._backup_path = backup_path
self._plex_local_path = plex_local_path
def get_backup_basename(self, path, plex_local_path=None):
if plex_local_path is None:
plex_local_path = self._plex_local_path + "/"
else:
plex_local_path = plex_local_path + "/"
backup_path = path
self._logger.debug("Backup Basename Path: %s", path)
if path.startswith(plex_local_path):
backup_path = path[len(plex_local_path):]
self._logger.debug("New Basename: %s", backup_path)
return backup_path
def delete_file(self, path):
plex_file = self.get_backup_basename(path)
delete_path = join(self._backup_path, plex_file)
self._logger.debug("Removing file: %s", delete_path)
try:
if isdir(delete_path):
rmtree(delete_path)
else:
remove(delete_path)
except (FileNotFoundError, NotADirectoryError):
self._logger.error("File %s has already been deleted",
delete_path)
def backup_file(self, path):
self._logger.debug("Original file path: %s", path)
plex_file = self.get_backup_basename(path)
backup_path = join(self._backup_path, plex_file)
self._logger.debug("Backup file path: %s", backup_path)
try:
stat_info = stat(path)
UID = stat_info[lib_stat.ST_UID]
GID = stat_info[lib_stat.ST_GID]
if isdir(path):
Path(backup_path).mkdir(parents=True, exist_ok=True)
else:
copy2(path, backup_path, follow_symlinks=False)
self._logger.debug("Setting UID:GID %s:%s for %s",
UID, GID, backup_path)
chown(backup_path, UID, GID, follow_symlinks=False)
except FileNotFoundError:
self._logger.info("File %s was deleted before it could be copied",
path)
async def on_created(self, event):
if event.src_path != self._plex_local_path:
self._logger.info("File created: Backing up %s",
self.get_backup_basename(event.src_path))
self.backup_file(event.src_path)
async def on_moved(self, event):
if event.src_path != self._plex_local_path:
self._logger.info("File moved: old: %s, new: %s",
self.get_backup_basename(event.src_path),
self.get_backup_basename(event.dest_path))
plex_file = self.get_backup_basename(event.src_path)
plex_new_name = self.get_backup_basename(event.dest_path)
old_path = join(self._backup_path, plex_file)
self._logger.debug("Old file path: %s", old_path)
new_path = join(self._backup_path, plex_new_name)
self._logger.debug("New file path: %s", new_path)
stat_info = stat(event.dest_path)
UID = stat_info[lib_stat.ST_UID]
GID = stat_info[lib_stat.ST_GID]
try:
move(old_path, new_path)
self._logger.debug("Setting UID:GID %s:%s for %s",
UID, GID, new_path)
chown(new_path, UID, GID, follow_symlinks=False)
except FileNotFoundError:
self._logger.info("File %s was already moved to new Dest %s",
old_path, new_path)
async def on_modified(self, event):
if event.src_path != self._plex_local_path:
self._logger.info("File modified: Backing up %s",
self.get_backup_basename(event.src_path))
self.backup_file(event.src_path)
async def on_deleted(self, event):
if event.src_path != self._plex_local_path:
self._logger.info("File deleted: Removing %s",
self.get_backup_basename(event.src_path))
self.delete_file(event.src_path)
|
py | b4133fb077975700d3cac229b8857a38f2b86e7c | import re
from django.template import Node, Variable, VariableNode, _render_value_in_context
from django.template import TemplateSyntaxError, TokenParser, Library
from django.template import TOKEN_TEXT, TOKEN_VAR
from django.utils import translation
from django.utils.encoding import force_unicode
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
from django.conf import settings
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop):
self.noop = noop
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, basestring):
self.filter_expression.var = Variable(u"'%s'" % self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
output = self.filter_expression.resolve(context)
return _render_value_in_context(output, context)
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents)
elif token.token_type == TOKEN_VAR:
result.append(u'%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context):
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.render(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
result = translation.ugettext(singular)
# Escape all isolated '%' before substituting in the context.
result = re.sub(u'%(?!\()', u'%%', result)
data = dict([(v, _render_value_in_context(context[v], context)) for v in vars])
context.pop()
return result % data
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
pos = None
m = re.match("^'([^']+)'(\|.*$)",value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'),m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
if self.more():
if self.tag() == 'noop':
noop = True
else:
raise TemplateSyntaxError("only option for 'trans' is 'noop'")
else:
noop = False
return (value, noop)
value, noop = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop)
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count var|length as count %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
"""
class BlockTranslateParser(TokenParser):
def top(self):
countervar = None
counter = None
extra_context = {}
while self.more():
tag = self.tag()
if tag == 'with' or tag == 'and':
value = self.value()
if self.tag() != 'as':
raise TemplateSyntaxError("variable bindings in 'blocktrans' must be 'with value as variable'")
extra_context[self.tag()] = VariableNode(
parser.compile_filter(value))
elif tag == 'count':
counter = parser.compile_filter(self.value())
if self.tag() != 'as':
raise TemplateSyntaxError("counter specification in 'blocktrans' must be 'count value as variable'")
countervar = self.tag()
else:
raise TemplateSyntaxError("unknown subtag %s for 'blocktrans' found" % tag)
return (countervar, counter, extra_context)
countervar, counter, extra_context = BlockTranslateParser(token.contents).top()
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter)
register.tag('get_available_languages', do_get_available_languages)
register.tag('get_current_language', do_get_current_language)
register.tag('get_current_language_bidi', do_get_current_language_bidi)
register.tag('trans', do_translate)
register.tag('blocktrans', do_block_translate)
|
py | b413402246c9022f12d4949d9cd40a08fb158570 | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# referenced from
# Library Name: torchtext
# Authors: torchtext authors and @sluks
# Date: 2021-11-25
# Link:
import itertools
from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union
import torch
from torch import Tensor, tensor
from torchmetrics import Metric
from torchmetrics.functional.text.chrf import _chrf_score_compute, _chrf_score_update, _prepare_n_grams_dicts
_N_GRAM_LEVELS = ("char", "word")
_TEXT_LEVELS = ("ref", "hyp", "matching")
_DICT_STATES_NAMES = (
"total_ref_char_n_grams",
"total_ref_word_n_grams",
"total_hyp_char_n_grams",
"total_hyp_word_n_grams",
"total_matching_char_n_grams",
"total_matching_word_n_grams",
)
_DICT_STATES_TYPES = Tuple[
Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor], Dict[int, Tensor]
]
class CHRFScore(Metric):
"""Calculate `chrf score`_ of machine translated text with one or more references. This implementation supports
both ChrF score computation introduced in [1] and chrF++ score introduced in `chrF++ score_`. This
implementation follows the implmenetaions from https://github.com/m-popovic/chrF and
https://github.com/mjpost/sacrebleu/blob/master/sacrebleu/metrics/chrf.py.
Args:
n_char_order:
A character n-gram order. If `n_char_order=6`, the metrics refers to the official chrF/chrF++.
n_word_order:
A word n-gram order. If `n_word_order=2`, the metric refers to the official chrF++. If `n_word_order=0`, the
metric is equivalent to the original ChrF.
beta:
A parameter determining an importance of recall w.r.t. precision. If `beta=1`, their importance is equal.
lowercase:
An indication whether to enable case-insesitivity.
whitespace:
An indication whether keep whitespaces during n-gram extraction.
return_sentence_level_score:
An indication whether a sentence-level chrF/chrF++ score to be returned.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False.
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step.
process_group:
Specify the process group on which synchronization is called.
dist_sync_fn:
Callback that performs the allgather operation on the metric state. When `None`, DDP
will be used to perform the allgather.
Raises:
ValueError:
If ``n_char_order`` is not an integer greater than or equal to 1.
ValueError:
If ``n_word_order`` is not an integer greater than or equal to 0.
ValueError:
If ``beta`` is smaller than 0.
Example:
>>> hypothesis_corpus = ['the cat is on the mat']
>>> reference_corpus = [['there is a cat on the mat', 'a cat is on the mat']]
>>> metric = CHRFScore()
>>> metric(reference_corpus, hypothesis_corpus)
tensor(0.8640)
References:
[1] chrF: character n-gram F-score for automatic MT evaluation by Maja Popović `chrF score`_
[2] chrF++: words helping character n-grams by Maja Popović `chrF++ score`_
"""
is_differentiable = False
higher_is_better = True
sentence_chrf_score: Optional[List[Tensor]] = None
def __init__(
self,
n_char_order: int = 6,
n_word_order: int = 2,
beta: float = 2.0,
lowercase: bool = False,
whitespace: bool = False,
return_sentence_level_score: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
dist_sync_fn: Optional[Callable] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if not isinstance(n_char_order, int) or n_char_order < 1:
raise ValueError("Expected argument `n_char_order` to be an integer greater than or equal to 1.")
self.n_char_order = n_char_order
if not isinstance(n_word_order, int) or n_word_order < 0:
raise ValueError("Expected argument `n_word_order` to be an integer greater than or equal to 0.")
self.n_word_order = n_word_order
if beta < 0:
raise ValueError("Expected argument `beta` to be greater than 0.")
self.beta = beta
self.lowercase = lowercase
self.whitespace = whitespace
self.return_sentence_level_score = return_sentence_level_score
self.n_order = float(n_char_order + n_word_order)
# Adding state dynamically
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
state_name = self._get_state_name(text, n_gram_level, n)
self.add_state(state_name, tensor(0.0), dist_reduce_fx="sum")
if self.return_sentence_level_score:
self.add_state("sentence_chrf_score", [], dist_reduce_fx="cat")
def update( # type: ignore
self, reference_corpus: Sequence[Sequence[str]], hypothesis_corpus: Sequence[str]
) -> None:
"""Compute Precision Scores.
Args:
reference_corpus:
An iterable of iterables of reference corpus.
hypothesis_corpus:
An iterable of hypothesis corpus.
"""
n_grams_dicts_tuple = _chrf_score_update(
reference_corpus,
hypothesis_corpus,
*self._convert_states_to_dicts(),
self.n_char_order,
self.n_word_order,
self.n_order,
self.beta,
self.lowercase,
self.whitespace,
self.sentence_chrf_score if self.return_sentence_level_score else None,
)
self._update_states_from_dicts(n_grams_dicts_tuple[:-1])
if self.sentence_chrf_score is not None:
self.sentence_chrf_score = n_grams_dicts_tuple[-1]
def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:
"""Calculate chrF/chrF++ score.
Return:
A corpus-level chrF/chrF++ score.
(Optionally) A list of sentence-level chrF/chrF++ scores if `return_sentence_level_score=True`.
"""
if self.sentence_chrf_score is not None:
return (
_chrf_score_compute(*self._convert_states_to_dicts(), self.n_order, self.beta),
torch.cat(self.sentence_chrf_score),
)
return _chrf_score_compute(*self._convert_states_to_dicts(), self.n_order, self.beta)
def _convert_states_to_dicts(self) -> _DICT_STATES_TYPES:
"""Convert global metric states to the n-gram dictionaries to be passed in `_chrf_score_update`."""
n_grams_dicts: Dict[str, Dict[int, Tensor]] = {
name: n_gram_dict
for name, n_gram_dict in zip(
_DICT_STATES_NAMES, _prepare_n_grams_dicts(self.n_char_order, self.n_word_order)
)
}
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
dict_name = self._get_dict_name(text, n_gram_level)
state_name = self._get_state_name(text, n_gram_level, n)
n_grams_dicts[dict_name][n] = getattr(self, state_name)
return tuple(n_grams_dicts.values()) # type: ignore
def _update_states_from_dicts(self, n_grams_dicts_tuple: _DICT_STATES_TYPES) -> None:
"""Update global metric states based on the n-gram dictionaries calculated on the current batch."""
n_grams_dicts = {name: n_gram_dict for name, n_gram_dict, in zip(_DICT_STATES_NAMES, n_grams_dicts_tuple)}
for (n_gram_level, n_gram_order), text in self._get_text_n_gram_iterator():
for n in range(1, n_gram_order + 1):
dict_name = self._get_dict_name(text, n_gram_level)
state_name = self._get_state_name(text, n_gram_level, n)
setattr(self, state_name, n_grams_dicts[dict_name][n])
@staticmethod
def _get_dict_name(text: str, n_gram_level: str) -> str:
"""Return a dictionary name w.r.t input args."""
return f"total_{text}_{n_gram_level}_n_grams"
@staticmethod
def _get_state_name(text: str, n_gram_level: str, n: int) -> str:
"""Return a metric state name w.r.t input args."""
return f"total_{text}_{n_gram_level}_{n}_grams"
def _get_text_n_gram_iterator(self) -> Iterator[Tuple[Tuple[str, int], str]]:
"""Get iterator over char/word and reference/hypothesis/matching n-gram level."""
return itertools.product(zip(_N_GRAM_LEVELS, [self.n_char_order, self.n_word_order]), _TEXT_LEVELS)
|
py | b41340f8ce9a14791df072a64c98c0e4124945cb | # pylint:disable=unused-argument,arguments-differ
import logging
from typing import Dict, List, Set
from collections import defaultdict
import ailment
from ..analysis import Analysis
from .sequence_walker import SequenceWalker
from .structurer_nodes import SequenceNode, CodeNode, MultiNode, LoopNode, ConditionNode, EmptyBlockNotice, \
ContinueNode, CascadingConditionNode
from .condition_processor import ConditionProcessor
from .utils import insert_node
l = logging.getLogger(name=__name__)
class NodeAddressFinder(SequenceWalker):
"""
Walk the entire node and collect all addresses of nodes.
"""
def __init__(self, node):
handlers = {
ailment.Block: self._handle_Block,
}
super().__init__(handlers=handlers)
self.addrs: Set[int] = set()
self.walk(node)
def _handle_Block(self, node: ailment.Block, **kwargs):
self.addrs.add(node.addr)
class GotoSimplifier(SequenceWalker):
"""
Remove unnecessary Jump statements.
"""
def __init__(self, node):
handlers = {
SequenceNode: self._handle_sequencenode,
CodeNode: self._handle_codenode,
MultiNode: self._handle_multinode,
LoopNode: self._handle_loopnode,
ConditionNode: self._handle_conditionnode,
CascadingConditionNode: self._handle_cascadingconditionnode,
ailment.Block: self._handle_block,
}
super().__init__(handlers)
self._node_addrs: Set[int] = NodeAddressFinder(node).addrs
self.walk(node)
def _handle_sequencenode(self, node, successor=None, **kwargs):
"""
:param SequenceNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._handle(n0, successor=n1)
def _handle_codenode(self, node, successor=None, **kwargs):
"""
:param CodeNode node:
:return:
"""
self._handle(node.node, successor=successor)
def _handle_conditionnode(self, node, successor=None, **kwargs):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None:
self._handle(node.true_node, successor=successor)
if node.false_node is not None:
self._handle(node.false_node, successor=successor)
def _handle_cascadingconditionnode(self, node: CascadingConditionNode, successor=None, **kwargs):
for _, child_node in node.condition_and_nodes:
self._handle(child_node, successor=successor)
if node.else_node is not None:
self._handle(node.else_node)
def _handle_loopnode(self, node, successor=None, **kwargs):
"""
:param LoopNode node:
:param successor:
:return:
"""
self._handle(node.sequence_node,
successor=node, # the end of a loop always jumps to the beginning of its body
)
def _handle_multinode(self, node, successor=None, **kwargs):
"""
:param MultiNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._handle(n0, successor=n1)
def _handle_block(self, block, successor=None, **kwargs): # pylint:disable=no-self-use
"""
:param ailment.Block block:
:return:
"""
if block.statements and isinstance(block.statements[-1], ailment.Stmt.Jump):
goto_stmt = block.statements[-1] # ailment.Stmt.Jump
if isinstance(goto_stmt.target, ailment.Expr.Const):
goto_target = goto_stmt.target.value
if successor and goto_target == successor.addr:
can_remove = True
elif goto_target not in self._node_addrs:
# the target block has been removed and is no longer exist. we assume this goto is useless
can_remove = True
else:
can_remove = False
if can_remove:
# we can remove this statement
block.statements = block.statements[:-1]
class LoopSimplifier(SequenceWalker):
def __init__(self, node):
handlers = {
SequenceNode: self._handle_sequencenode,
CodeNode: self._handle_codenode,
MultiNode: self._handle_multinode,
LoopNode: self._handle_loopnode,
ConditionNode: self._handle_conditionnode,
CascadingConditionNode: self._handle_cascadingconditionnode,
ailment.Block: self._handle_block,
}
super().__init__(handlers)
self.continue_preludes: Dict[LoopNode, List[ailment.Block]] = defaultdict(list)
self.walk(node)
@staticmethod
def _control_transferring_statement(stmt: ailment.Stmt.Statement) -> bool:
return isinstance(stmt,
(ailment.Stmt.Call, ailment.Stmt.Return, ailment.Stmt.Jump, ailment.Stmt.ConditionalJump))
def _handle_sequencenode(self, node, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs):
for n0, n1, n2 in zip(node.nodes, node.nodes[1:] + [successor], [predecessor] + node.nodes[:-1]):
self._handle(n0, predecessor=n2, successor=n1, loop=loop, loop_successor=loop_successor)
def _handle_codenode(self, node, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs):
self._handle(node.node, predecessor=predecessor, successor=successor, loop=loop, loop_successor=loop_successor)
def _handle_conditionnode(self, node, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs):
if node.true_node is not None:
self._handle(node.true_node, predecessor=predecessor, successor=successor, loop=loop, loop_successor=loop_successor)
if node.false_node is not None:
self._handle(node.false_node, predecessor=predecessor, successor=successor, loop=loop, loop_successor=loop_successor)
def _handle_cascadingconditionnode(self, node: CascadingConditionNode, predecessor=None, successor=None, loop=None,
loop_successor=None, **kwargs):
for _, child_node in node.condition_and_nodes:
self._handle(child_node, predecessor=predecessor, successor=successor, loop=loop,
loop_successor=loop_successor)
if node.else_node is not None:
self._handle(node.else_node, predecessor=predecessor, successor=successor, loop=loop,
loop_successor=loop_successor)
def _handle_loopnode(self, node: LoopNode, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs):
self._handle(node.sequence_node, predecessor=predecessor, successor=successor, loop=node, loop_successor=successor)
# find for-loop iterators
if node.sort == 'while' and self.continue_preludes[node] and \
(node.condition is not None or len(self.continue_preludes[node]) > 1):
if all(block.statements for block in self.continue_preludes[node]) and \
all(not self._control_transferring_statement(block.statements[-1])
for block in self.continue_preludes[node]) and \
all(block.statements[-1] == self.continue_preludes[node][0].statements[-1]
for block in self.continue_preludes[node]):
node.sort = 'for'
node.iterator = self.continue_preludes[node][0].statements[-1]
for block in self.continue_preludes[node]:
block.statements = block.statements[:-1]
# find for-loop initializers
if isinstance(predecessor, MultiNode):
predecessor = predecessor.nodes[-1]
if node.sort == 'for' and isinstance(predecessor, ailment.Block) and predecessor.statements and \
isinstance(predecessor.statements[-1], (ailment.Stmt.Assignment, ailment.Stmt.Store)):
node.initializer = predecessor.statements[-1]
predecessor.statements = predecessor.statements[:-1]
def _handle_multinode(self, node, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs):
for n0, n1, n2 in zip(node.nodes, node.nodes[1:] + [successor], [predecessor] + node.nodes[:-1]):
self._handle(n0, predecessor=n2, successor=n1, loop=loop, loop_successor=loop_successor)
def _handle_block(self, block, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs): # pylint:disable=no-self-use
if isinstance(successor, ContinueNode) or successor is loop_successor:
self.continue_preludes[loop].append(block)
class IfSimplifier(SequenceWalker):
"""
Remove unnecessary jump or conditional jump statements if they jump to the successor right afterwards.
"""
def __init__(self, node):
handlers = {
SequenceNode: self._handle_sequencenode,
CodeNode: self._handle_codenode,
MultiNode: self._handle_multinode,
LoopNode: self._handle_loopnode,
ConditionNode: self._handle_conditionnode,
CascadingConditionNode: self._handle_cascadingconditionnode,
ailment.Block: self._handle_block,
}
super().__init__(handlers)
self.walk(node)
def _handle_sequencenode(self, node, successor=None, **kwargs):
"""
:param SequenceNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._handle(n0, successor=n1)
def _handle_codenode(self, node, successor=None, **kwargs):
"""
:param CodeNode node:
:return:
"""
self._handle(node.node, successor=successor)
def _handle_conditionnode(self, node, successor=None, **kwargs):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None:
self._handle(node.true_node, successor=successor)
if node.false_node is not None:
self._handle(node.false_node, successor=successor)
def _handle_cascadingconditionnode(self, node: CascadingConditionNode, successor=None, **kwargs):
for _, child_node in node.condition_and_nodes:
self._handle(child_node, successor=successor)
if node.else_node is not None:
self._handle(node.else_node,successor=successor)
def _handle_loopnode(self, node, successor=None, **kwargs):
"""
:param LoopNode node:
:param successor:
:return:
"""
self._handle(node.sequence_node, successor=successor)
def _handle_multinode(self, node, successor=None, **kwargs):
"""
:param MultiNode node:
:return:
"""
for n0, n1 in zip(node.nodes, node.nodes[1:] + [successor]):
self._handle(n0, successor=n1)
def _handle_block(self, block, successor=None, **kwargs): # pylint:disable=no-self-use
"""
Remove unnecessary jump or conditional jump statements if they jump to the successor right afterwards.
:param ailment.Block block:
:return:
"""
if block.statements and isinstance(block.statements[-1], ailment.Stmt.ConditionalJump):
cond_stmt = block.statements[-1] # ailment.Stmt.ConditionalJump
if isinstance(successor, ConditionNode):
true_cond = False
if cond_stmt.true_target is not None and successor.true_node is not None:
# True branch exists. Test if the true target is the address
if isinstance(cond_stmt.true_target, ailment.Expr.Const) \
and cond_stmt.true_target.value == successor.true_node.addr:
true_cond = True
if cond_stmt.true_target is not None and successor.false_node is not None:
# True branch exists. Test if the true target is the address
if isinstance(cond_stmt.true_target, ailment.Expr.Const) \
and cond_stmt.true_target.value == successor.false_node.addr:
true_cond = True
false_cond = False
if cond_stmt.false_target is not None and successor.false_node is not None:
# False branch exists. Test if the false target is the address
if isinstance(cond_stmt.true_target, ailment.Expr.Const) \
and cond_stmt.false_target.value == successor.false_node.addr:
false_cond = True
if cond_stmt.false_target is not None and successor.true_node is not None:
# True branch exists. Test if the true target is the address
if isinstance(cond_stmt.true_target, ailment.Expr.Const) \
and cond_stmt.false_target.value == successor.true_node.addr:
false_cond = True
if true_cond or false_cond:
# We can safely remove this statement
block.statements = block.statements[:-1]
else:
l.error("An unexpected successor %s follows the conditional statement %s.",
successor, cond_stmt
)
class IfElseFlattener(SequenceWalker):
"""
Remove unnecessary else branches and make the else node a direct successor of the previous If node if the If node
always returns.
"""
def __init__(self, node, functions):
handlers = {
SequenceNode: self._handle_Sequence,
CodeNode: self._handle_Code,
MultiNode: self._handle_MultiNode,
LoopNode: self._handle_Loop,
ConditionNode: self._handle_Condition,
CascadingConditionNode: self._handle_CascadingCondition,
}
super().__init__(handlers)
self.functions = functions
self.walk(node)
def _handle_Condition(self, node, parent=None, index=None, **kwargs):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None and node.false_node is not None:
try:
last_stmts = ConditionProcessor.get_last_statements(node.true_node)
except EmptyBlockNotice:
last_stmts = None
if last_stmts is not None and all(self._is_statement_terminating(stmt) for stmt in last_stmts):
# all end points in the true node are returning
# remove the else node and make it a new node following node
else_node = node.false_node
node.false_node = None
insert_node(parent, index + 1, else_node, index)
if node.true_node is not None:
self._handle(node.true_node, parent=node, index=0)
if node.false_node is not None:
self._handle(node.false_node, parent=node, index=1)
def _is_statement_terminating(self, stmt):
if isinstance(stmt, ailment.Stmt.Return):
return True
if isinstance(stmt, ailment.Stmt.Call) and isinstance(stmt.target, ailment.Expr.Const):
# is it calling a non-returning function?
target_func_addr = stmt.target.value
try:
func = self.functions.get_by_addr(target_func_addr)
return func.returning is False
except KeyError:
pass
return False
class CascadingIfsRemover(SequenceWalker):
"""
Coalesce cascading If constructs. Transforming the following construct::
if (cond_a) {
if (cond_b) {
true_body
} else { }
} else { }
into::
if (cond_a and cond_b) {
true_body
} else { }
"""
def __init__(self, node):
handlers = {
SequenceNode: self._handle_Sequence,
CodeNode: self._handle_Code,
MultiNode: self._handle_MultiNode,
LoopNode: self._handle_Loop,
ConditionNode: self._handle_Condition,
CascadingConditionNode: self._handle_CascadingCondition,
}
super().__init__(handlers)
self.walk(node)
def _handle_Condition(self, node, parent=None, index=None, **kwargs):
"""
:param ConditionNode node:
:param successor:
:return:
"""
if node.true_node is not None:
self._handle(node.true_node, parent=node, index=0)
if node.false_node is not None:
self._handle(node.false_node, parent=node, index=1)
if node.true_node is not None and node.false_node is None:
if isinstance(node.true_node, SequenceNode):
last_node = None
if len(node.true_node.nodes) > 1 and all(self.is_empty_node(node_) for node_ in node.true_node.nodes[:-1]):
last_node = node.true_node.nodes[-1]
elif len(node.true_node.nodes) == 1:
last_node = node.true_node.nodes[0]
true_node = last_node
if isinstance(true_node, ConditionNode) and true_node.true_node is not None and true_node.false_node is None:
node.condition = ailment.BinaryOp(None, "LogicalAnd", (node.condition, true_node.condition), False,
**node.condition.tags)
node.true_node = true_node.true_node
@staticmethod
def is_empty_node(node):
if isinstance(node, ailment.Block):
return not node.statements
if isinstance(node, SequenceNode):
return all(CascadingIfsRemover.is_empty_node(n) for n in node.nodes)
return False
class RegionSimplifier(Analysis):
def __init__(self, region):
self.region = region
self.result = None
self._simplify()
def _simplify(self):
"""
RegionSimplifier performs the following simplifications:
- Remove redundant Gotos
- Remove redundant If/If-else statements
"""
r = self.region
# Remove unnecessary Jump statements
r = self._simplify_gotos(r)
# Remove unnecessary jump or conditional jump statements if they jump to the successor right afterwards
r = self._simplify_ifs(r)
# Remove unnecessary else branches if the if branch will always return
r = self._simplify_ifelses(r)
#
r = self._simplify_cascading_ifs(r)
#
r = self._simplify_loops(r)
self.result = r
#
# Simplifiers
#
@staticmethod
def _simplify_gotos(region):
GotoSimplifier(region)
return region
@staticmethod
def _simplify_ifs(region):
IfSimplifier(region)
return region
def _simplify_ifelses(self, region):
IfElseFlattener(region, self.kb.functions)
return region
@staticmethod
def _simplify_cascading_ifs(region):
CascadingIfsRemover(region)
return region
@staticmethod
def _simplify_loops(region):
LoopSimplifier(region)
return region
from ...analyses import AnalysesHub
AnalysesHub.register_default('RegionSimplifier', RegionSimplifier)
|
py | b41340f8d43d16b0e6e8ac0510348096393dbdec | # Import librairies
import time
import busio
import board
import displayio
import terminalio
import digitalio
import usb_hid
import adafruit_matrixkeypad
import adafruit_displayio_ssd1306
from adafruit_display_text import label
from digitalio import DigitalInOut
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_datetime import datetime, date, timezone
# Setup HID
keyboard = Keyboard(usb_hid.devices)
# Setup Matrix for buttons
cols = [DigitalInOut(x) for x in (board.GP2, board.GP3, board.GP4, board.GP5, board.GP6, board.GP7, board.GP8)]
rows = [DigitalInOut(x) for x in (board.GP10, board.GP9)]
keys = ((1, 2, 3, 4, 5, 6, 7),
(8, 9, 10, 11, 12, 13, 14))
keypad = adafruit_matrixkeypad.Matrix_Keypad(rows, cols, keys)
# Remove all previous connections to displayio
displayio.release_displays()
# Display and border setup
i2c = busio.I2C(scl=board.GP1, sda=board.GP0)
display_bus = displayio.I2CDisplay(i2c, device_address = 0x3C)
oled = adafruit_displayio_ssd1306.SSD1306(display_bus, width=128, height=32)
splash = displayio.Group(max_size=10)
oled.show(splash)
color_bitmap = displayio.Bitmap(128, 32, 1) # Full screen white
color_palette = displayio.Palette(1)
color_palette[0] = 0xFFFFFF # White
bg_sprite = displayio.TileGrid(color_bitmap, pixel_shader=color_palette, x=0, y=0)
splash.append(bg_sprite)
inner_bitmap = displayio.Bitmap(118, 22, 1)
inner_palette = displayio.Palette(1)
inner_palette[0] = 0x000000
inner_sprite = displayio.TileGrid(inner_bitmap, pixel_shader=inner_palette, x=5, y=5)
splash.append(inner_sprite)
text_area = label.Label(terminalio.FONT, text=" "*32, color=0xFFFF00, x=10, y=15)
splash.append(text_area)
# Setup
displayTurnOffTime = 0
# Turn off display
oled.sleep()
# Function to display the string for 5s
def display_screen(str):
oled.wake()
global text_area
global displayTurnOffTime
text_area.text = str
displayTurnOffTime = time.time()
while True:
keys = keypad.pressed_keys
# Keypad if (Feel free to change the function of all keys)
if keys:
if keys == [1]:
keyboard.press(Keycode.F14)
time.sleep(0.1)
keyboard.release(Keycode.F14)
display_screen("Vol -")
elif keys == [2]:
keyboard.press(Keycode.F15)
time.sleep(0.1)
keyboard.release(Keycode.F15)
display_screen("Vol +")
elif keys == [6]:
keyboard.send(Keycode.F16)
display_screen("Bulb Brightness -")
elif keys == [7]:
keyboard.send(Keycode.F17)
display_screen("Bulb Brightness +")
elif keys == [8]:
keyboard.send(Keycode.F18)
elif keys == [9]:
keyboard.send(Keycode.F19)
elif keys == [10]:
keyboard.send(Keycode.F20)
elif keys == [11]:
keyboard.send(Keycode.F21)
elif keys == [12]:
keyboard.send(Keycode.F22)
elif keys == [13]:
keyboard.send(Keycode.F23)
display_screen("Bulb Color Temp -")
elif keys == [14]:
keyboard.send(Keycode.F24)
display_screen("Bulb Color Temp +")
time.sleep(0.1)
# Check if it has been more than 5s since the screen was turned on, if yes, screen will turn off
if ((displayTurnOffTime + 5) < time.time()) and displayTurnOffTime != 0:
text_area.text = " "
displayTurnOffTime = 0
oled.sleep() |
py | b413410dc34d8585e3061b805102d8ebcd915a2f | import json
import re
import zipfile
from typing import Optional
from django.http import HttpResponse, FileResponse, HttpResponseServerError
from core.model import Result
from tools import store, http_utils
class Service:
@classmethod
def get_prefix_pattern(cls) -> str:
pass
@classmethod
def make_url(cls, index) -> str:
pass
@classmethod
def get_url(cls, text: str) -> Optional[str]:
urls = re.findall(r'(?<=' + cls.get_prefix_pattern() + ')\w+', text, re.I | re.M)
if urls:
return cls.make_url(urls[0])
return None
@classmethod
def index(cls, url) -> Optional[str]:
index = re.findall(r'(?<=com\/)\w+', url)
try:
return index[0]
except IndexError:
return None
@classmethod
def fetch(cls, url: str, mode=0) -> Result:
"""
获取视频地址
:param url:
:param mode:
:return:
"""
pass
@staticmethod
def download_header():
pass
@classmethod
def download(cls, url: str) -> HttpResponse:
"""
下载视频
:param url:
:return:
"""
result = cls.fetch(url)
if result.is_success():
name = cls.index(url) + '.mp4'
url = result.get_data()
data = json.dumps({'name': name, 'url': url})
return HttpResponse(data)
return HttpResponseServerError(result.get_data())
@classmethod
def proxy_download(cls, vtype, url, header: dict, mode=1) -> HttpResponse:
# 检查文件
index = cls.index(url)
file, filename = store.find(vtype, index)
if file is not None:
return Service.stream(file, filename)
result = cls.fetch(url, mode=mode)
if not result.is_success():
return HttpResponseServerError(result.get_data())
if mode == 1:
header = header.copy()
header['referer'] = result.ref
if result.is_image():
res = store.save_image(vtype, result.get_data(), index)
if res is not None:
return res
else:
res = http_utils.get(url=result.get_data(), header=header)
if http_utils.is_error(res):
return HttpResponseServerError(str(res))
store.save(vtype, res, index)
res.close()
file, filename = store.find(vtype, index)
return Service.stream(file, filename)
@staticmethod
def stream(file, filename) -> HttpResponse:
try:
# 设置响应头
# StreamingHttpResponse将文件内容进行流式传输,数据量大可以用这个方法
response = FileResponse(file)
# 以流的形式下载文件,这样可以实现任意格式的文件下载
response['Content-Type'] = 'application/octet-stream'
# Content-Disposition就是当用户想把请求所得的内容存为一个文件的时候提供一个默认的文件名
response['Content-Disposition'] = 'attachment;filename="{}"'.format(filename)
except Exception as e:
response = HttpResponse(e)
# finally:
# file.close()
return response
|
py | b413410e618d817b56d0cb712a1aa3c9c1b6b1f9 | class Intcode:
def __init__(self, instructions):
self.instructions = instructions
def interpreter(self, index, args=[]):
instruction = self.instructions[index] % 100
mode = int(self.instructions[index] / 100)
if instruction == 99:
return [-1]
elif instruction == 1: # add
return self.add(index, mode)
elif instruction == 2: # multiply
return self.multiply(index, mode)
elif instruction == 3: # read and store
return self.readAndStore(index, mode, args)
elif instruction == 4: # return value
return self.returnVal(index, mode)
def add(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
self.instructions[self.instructions[index + 3]] = arg1 + arg2
return [index + 4]
def multiply(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
mode = int(mode / 10)
if mode % 10 == 0:
arg2 = self.instructions[self.instructions[index + 2]]
elif mode % 10 == 1:
arg2 = self.instructions[index + 2]
self.instructions[self.instructions[index + 3]] = arg1 * arg2
return [index + 4]
def readAndStore(self, index, mode, args):
self.instructions[self.instructions[index + 1]] = args[0]
return [index + 2]
def returnVal(self, index, mode):
if mode % 10 == 0:
arg1 = self.instructions[self.instructions[index + 1]]
elif mode % 10 == 1:
arg1 = self.instructions[index + 1]
return [index + 2, arg1]
with open("input.txt") as f:
data = f.read()
instructions = list(map(int, data.split(",")))
iterpreter = Intcode(instructions)
index = 0
args = [1]
while index != -1:
res = iterpreter.interpreter(index, args)
index = res[0]
if res.__len__() > 1:
print(res[1])
|
py | b41341624d8cfd646a5417e93ee28972bab3de69 | from layers import *
import cPickle as pickle
import make_data
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='trainingprocess.log',
filemode='w')
class BiLSTMQA(object):
def __init__(self, voca_size, hidden_size, ydim, learn_rate=0.1):
self.nnet = StackedBiLSTM(voca_size, hidden_size, ydim)
def fit(self, x, y, vx=None, vy=None, max_epochs=10000, batch_size=5):
mask = self.__get_mask(x)
num_batches = x.shape[1] // batch_size
batch_idx = 0
for ep in range(max_epochs):
btx = x[:, batch_idx*batch_size:(batch_idx+1)*batch_size]
bty = y[batch_idx*batch_size:(batch_idx+1)*batch_size]
btmask = mask[:, batch_idx*batch_size:(batch_idx+1)*batch_size]
loss = self.nnet.train(btx, btmask, bty)
if ep%20 == 0:
print "in epoch %d/%d..."%(ep, max_epochs)
if batch_idx == 0:
ot = "in epoch %d/%d..."%(ep, max_epochs) + " loss: "+str(loss)
print ot
logging.info(ot)
"""
validate
if vx != None:
print self.score(vx, vy)
"""
batch_idx = (batch_idx+1) % num_batches
def predict(self, x):
mask = self.__get_mask(x)
return self.nnet.predict(x, mask)
def score(self, x, y):
prd = self.predict(x)
s = 0
for i in range(len(y)):
s += 1. if prd[i] == y[i] else 0.
return s/len(y)
def self_pickl(self, path="./data/bilstm_model.pkl"):
with open(path, "wb") as mf:
pickle.dump(self, mf)
def __get_mask(self, data):
mask = np.not_equal(data, 0).astype("int32")
return mask
if __name__ == '__main__':
train, valid = make_data.get_data(0.9)
train_x, train_y = train
valid_x, valid_y = valid
model = BiLSTMQA(12448, 100, 2)
model.fit(train_x, train_y, vx=valid_x, vy=valid_y)
|
py | b41341818015422816a85090221e313776ed0698 | import helpers
from colorama import Fore
def aceptarOpcionMenu():
opcion = -1
while (True):
inputUsuario = input(Fore.GREEN + '· Dime, ¿que opción deseas? ' + Fore.WHITE)
if (inputUsuario == 'F' or inputUsuario == 'f'):
# Finalizar
return -1 # =========================================>
elif (helpers.testInputInt(inputUsuario, 1, 7)):
# Seleccionada una opción corredta
return int(inputUsuario) # ==========================>
else:
# Opción incorrecta
print(Fore.RED + '* ATENCION: Selecciona una opción valida ...' + Fore.WHITE)
def show():
helpers.clear()
print(Fore.GREEN + 'MENU')
print('====')
print('1 - Precio con impuestos')
print('2 - Importe intereses generados')
print('3 - Media aritmetica de tres números')
print('4 - Media ponderada de tres números')
print('5 - Area de un triángulo' )
print('6 - Calcular importe nómina')
print('7 - Gestión cuenta bancaria')
print('F - Finalizar')
print(Fore.WHITE)
return aceptarOpcionMenu() |
py | b41345a72d4f454e9c6eadb3703ff6e72f044feb | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import os
import random
import pickle
import esm
from torch.utils.data import TensorDataset, DataLoader
import emb_classifier
torch.cuda.empty_cache()
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-indir', type=str, required=False, default=None)
parser.add_argument('-outdir', type=str, required=False, default=None)
parser.add_argument('-data_dir', type=str, required=False, default='data/')
args = parser.parse_args()
return args
def suffle_n_batch(data, batch_size):
batched = []
random.shuffle(data)
for i in range(len(data)//batch_size+1):
batched.append(data[i*batch_size:i*batch_size+batch_size])
if len(batched[-1])==0:
return batched[:-1]
else:
return batched
def get_emb_esm1b(seq, LM_model, average=True):
B, L = seq.shape
L = L - 2 # remove start and end token
#LM_model.eval()
with torch.no_grad():
output = LM_model(seq, repr_layers=[33], return_contacts=True) # get the output from the language model
embedding = output['representations'][33][:,1:-1,:] # embedding size (1, L, 1280)
attention_map = output['attentions'][:,:,:,1:-1,1:-1] # attention map size (1, 33, 20, L, L)
attention_map = attention_map.reshape(B, 33*20, L, L).permute(0,2,3,1) # (1, L, L, 660)
# if you wanna average the embeddings along the sequence dimension -- i think this could be really cool too
if (average):
embedding = embedding.mean(1)
return embedding,attention_map
def load_data(args):
with open(os.path.join(args.data_dir,'train_tuple_data.pkl'), 'rb') as f:
train = pickle.load(f)
with open(os.path.join(args.data_dir,'valid_tuple_data.pkl'), 'rb') as f:
valid = pickle.load(f)
return train, valid
#get arguments
args = get_args()
indir = args.indir
outdir = args.outdir
print('Args got')
print(f'indir {indir}')
print(f'outdir {outdir}')
# Loading and processing the data:
train, valid = load_data(args)
print('Data loaded')
#Preprocess data into tensors
LM_model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
print('ESM1b loaded')
#Convert data into format that esm1b will like
y_train, _, x_train = batch_converter(train)
y_val, _, x_val = batch_converter(valid)
y_train = torch.tensor(y_train)
y_val = torch.tensor(y_val)
# Instantiate the network
classifier = emb_classifier.Net()
# Load model from previous state if indir arg is specified
if indir is not None:
if os.path.exists(indir):
classifier.load_state_dict(torch.load(indir))
print(f'loaded model from {indir}')
# Instantiate the cross-entropy loss
criterion = nn.CrossEntropyLoss()
# Instantiate the Adam optimizer
optimizer = optim.Adam(classifier.parameters(),lr=3e-4)
print('Classifier, optimizer, and criterion compiled')
# Moving tensors over to gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'Using device {device}')
#x_train = x_train.to(device)
#y_train = y_train.to(device)
#x_val = x_val.to(device)
#y_val = y_val.to(device)
classifier = classifier.to(device)
LM_model = LM_model.to(device)
LM_model.eval()
print(f'Moved tensors to {device}')
trainset = TensorDataset(x_train, y_train)
valset = TensorDataset(x_val, y_val)
train_loader = DataLoader(trainset, shuffle=True, batch_size=1)
valid_loader = DataLoader(valset, shuffle=True, batch_size=1)
print('Dataloaders built')
num_parameters = sum(p.numel() for p in classifier.parameters() if p.requires_grad)
print('Number of parameters classifier: ', num_parameters)
num_parameters = sum(p.numel() for p in LM_model.parameters() if p.requires_grad)
print('Number of parameters esm1b: ', num_parameters)
grad_accum = 256
# CNN model training
count = 0
loss_list = []
iteration_list = []
accuracy_list = []
num_epochs = 100
output_dict = {}
print('Now beginning training')
torch.cuda.empty_cache()
for epoch in range(num_epochs):
for i, data in enumerate(train_loader):
seq, labels = data
seq = seq.to(device)
labels = labels.to(device)
x, _ = get_emb_esm1b(seq, LM_model=LM_model, average=True)
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = classifier(x)
# Calculate relu and cross entropy loss
loss = criterion(outputs, labels)/grad_accum
print(f'outputs {outputs.tolist()} lables {labels.tolist()}')
# Calculating gradients
loss.backward()
if (i+1) % grad_accum == 0:
total_norm = torch.nn.utils.clip_grad_norm_(classifier.parameters(),1.0)
if not (total_norm == total_norm):
print('Gradient are NaN')
optimizer.zero_grad()
continue
optimizer.step()
print('Train - epoch: '+str(epoch)+' batch: '+str(int((i+1)/grad_accum))+' loss: '+str(float(loss.data)*grad_accum))
count += 1
correct = 0
total = 0
valid_loss = 0
for j, val_data in enumerate(valid_loader):
with torch.no_grad():
val_seq, val_labels = val_data
val_seq = val_seq.to(device)
val_labels = val_labels.to(device)
val_x, _ = get_emb_esm1b(val_seq, LM_model=LM_model, average=True)
outputs = classifier(val_x)
loss_valid = criterion(outputs, val_labels)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += len(val_labels)
correct += (predicted == val_labels).sum()
valid_loss += float(loss_valid.data)
# print('valid_loss: ', valid_loss)
accuracy = 100 * correct / float(total)
print('Valid - epcoh: '+str(epoch) +
' loss: '+str(float(valid_loss/(j+1)))+' accuracy: '+str(float(accuracy)))
path = os.path.join(outdir,'save_model/model_'+str(epoch)+'.pt')
torch.save(classifier.state_dict(), path)
print('Model '+str(epoch)+' was saved.')
|
py | b4134621128be59b558afceda218c2108291f7b5 | from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from .common import *
from .common import _BulkAdExtensionBase
from .common import _BulkCampaignAdExtensionAssociation
from .common import _BulkAccountAdExtensionAssociation
_LocationAdExtension = type(_CAMPAIGN_OBJECT_FACTORY_V13.create('LocationAdExtension'))
class BulkLocationAdExtension(_BulkAdExtensionBase):
""" Represents an location ad extension.
This class exposes the :attr:`location_ad_extension` property that can be read and written
as fields of the Location Ad Extension record in a bulk file.
For more information, see Location Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self, account_id=None, ad_extension=None):
if ad_extension and not isinstance(ad_extension, _LocationAdExtension):
raise ValueError('The type of ad_extension is: {0}, should be: {1}'.format(
type(ad_extension),
'LocationAdExtension'
))
super(BulkLocationAdExtension, self).__init__(
account_id=account_id,
ad_extension=ad_extension
)
@property
def location_ad_extension(self):
""" The location ad extension.
see Location Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
"""
return self._ad_extension
@location_ad_extension.setter
def location_ad_extension(self, value):
self._ad_extension = value
_MAPPINGS = [
_SimpleBulkMapping(
header=_StringTable.BusinessName,
field_to_csv=lambda c: c.location_ad_extension.CompanyName,
csv_to_field=lambda c, v: setattr(c.location_ad_extension, 'CompanyName', v)
),
_SimpleBulkMapping(
header=_StringTable.PhoneNumber,
field_to_csv=lambda c: bulk_optional_str(c.location_ad_extension.PhoneNumber, c.location_ad_extension.Id),
csv_to_field=lambda c, v: setattr(c.location_ad_extension, 'PhoneNumber', v if v else '')
),
_SimpleBulkMapping(
header=_StringTable.GeoCodeStatus,
field_to_csv=lambda c: bulk_str(c.location_ad_extension.GeoCodeStatus),
csv_to_field=lambda c, v: setattr(c.location_ad_extension, 'GeoCodeStatus', v if v else None)
),
_SimpleBulkMapping(
header=_StringTable.AddressLine1,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(
c,
lambda x: x.StreetAddress
),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'StreetAddress', v)
)
),
_SimpleBulkMapping(
header=_StringTable.AddressLine2,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(
c,
lambda x: bulk_optional_str(x.StreetAddress2, c.location_ad_extension.Id)
),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'StreetAddress2', v if v else '')
)
),
_SimpleBulkMapping(
header=_StringTable.City,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(c, lambda x: x.CityName),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'CityName', v)
)
),
_SimpleBulkMapping(
header=_StringTable.ProvinceName,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(c, lambda x: x.ProvinceName),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'ProvinceName', v)
)
),
_SimpleBulkMapping(
header=_StringTable.StateOrProvince,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(c, lambda x: x.ProvinceCode),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'ProvinceCode', v)
)
),
_SimpleBulkMapping(
header=_StringTable.PostalCode,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(c, lambda x: x.PostalCode),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'PostalCode', v)
)
),
_SimpleBulkMapping(
header=_StringTable.CountryCode,
field_to_csv=lambda c: BulkLocationAdExtension.get_address_part(c, lambda x: x.CountryCode),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_address_part(
c,
lambda x: setattr(x, 'CountryCode', v)
)
),
_SimpleBulkMapping(
header=_StringTable.Latitude,
field_to_csv=lambda c: BulkLocationAdExtension.get_geo_point_part(
c,
lambda x: bulk_str(
float(x.LatitudeInMicroDegrees) / 1000000.0
)
if x.LatitudeInMicroDegrees is not None else None
),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_geo_point_part(
c,
lambda x, latitude: setattr(x, 'LatitudeInMicroDegrees', int(round(float(latitude) * 1000000))),
v
)
),
_SimpleBulkMapping(
header=_StringTable.Longitude,
field_to_csv=lambda c: BulkLocationAdExtension.get_geo_point_part(
c,
lambda x: bulk_str(
float(x.LongitudeInMicroDegrees) / 1000000.0
)
if x.LongitudeInMicroDegrees is not None else None
),
csv_to_field=lambda c, v: BulkLocationAdExtension.set_geo_point_part(
c,
lambda x, longitude: setattr(x, 'LongitudeInMicroDegrees', int(round(float(longitude) * 1000000))),
v
)
),
]
@staticmethod
def get_address_part(bulk_ad_extension, get_func):
if bulk_ad_extension.location_ad_extension.Address is not None:
return get_func(bulk_ad_extension.location_ad_extension.Address)
else:
return None
@staticmethod
def set_address_part(bulk_ad_extension, set_func):
if bulk_ad_extension.location_ad_extension.Address is None:
bulk_ad_extension.location_ad_extension.Address = _CAMPAIGN_OBJECT_FACTORY_V13.create('Address')
set_func(bulk_ad_extension.location_ad_extension.Address)
@staticmethod
def get_geo_point_part(bulk_ad_extension, get_func):
if bulk_ad_extension.location_ad_extension.GeoPoint is not None:
return get_func(bulk_ad_extension.location_ad_extension.GeoPoint)
else:
return None
@staticmethod
def set_geo_point_part(bulk_ad_extension, set_func, value):
if not value:
return
if bulk_ad_extension.location_ad_extension.GeoPoint is None:
bulk_ad_extension.location_ad_extension.GeoPoint = _CAMPAIGN_OBJECT_FACTORY_V13.create('GeoPoint')
set_func(bulk_ad_extension.location_ad_extension.GeoPoint, value)
def process_mappings_from_row_values(self, row_values):
self.location_ad_extension = _CAMPAIGN_OBJECT_FACTORY_V13.create('LocationAdExtension')
self.location_ad_extension.Type = 'LocationAdExtension'
if row_values[_StringTable.Latitude] or row_values[_StringTable.Longitude]:
self.location_ad_extension.GeoPoint = _CAMPAIGN_OBJECT_FACTORY_V13.create('GeoPoint')
super(BulkLocationAdExtension, self).process_mappings_from_row_values(row_values)
row_values.convert_to_entity(self, BulkLocationAdExtension._MAPPINGS)
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.location_ad_extension, 'location_ad_extension')
super(BulkLocationAdExtension, self).process_mappings_to_row_values(row_values, exclude_readonly_data)
self.convert_to_values(row_values, BulkLocationAdExtension._MAPPINGS)
class BulkAccountLocationAdExtension(_BulkAccountAdExtensionAssociation):
""" Represents an account level location ad extension.
This class exposes properties that can be read and written
as fields of the Account Location Ad Extension record in a bulk file.
For more information, see Account Location Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass
class BulkCampaignLocationAdExtension(_BulkCampaignAdExtensionAssociation):
""" Represents a campaign level location ad extension.
This class exposes properties that can be read and written
as fields of the Campaign Location Ad Extension record in a bulk file.
For more information, see Campaign Location Ad Extension at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
pass |
py | b41346e9a9df584f87f590ff328aa72aa2b5fe19 | import torch
import numpy as np
from torch import nn
from typing import Any, Dict, List, Type, Tuple, Union, Optional, Sequence
ModuleType = Type[nn.Module]
def miniblock(
input_size: int,
output_size: int = 0,
norm_layer: Optional[ModuleType] = None,
activation: Optional[ModuleType] = None,
) -> List[nn.Module]:
"""Construct a miniblock with given input/output-size, norm layer and \
activation."""
layers: List[nn.Module] = [nn.Linear(input_size, output_size)]
if norm_layer is not None:
layers += [norm_layer(output_size)] # type: ignore
if activation is not None:
layers += [activation()]
return layers
class MLP(nn.Module):
"""Simple MLP backbone.
Create a MLP of size input_dim * hidden_sizes[0] * hidden_sizes[1] * ...
* hidden_sizes[-1] * output_dim
:param int input_dim: dimension of the input vector.
:param int output_dim: dimension of the output vector. If set to 0, there
is no final linear layer.
:param hidden_sizes: shape of MLP passed in as a list, not incluing
input_dim and output_dim.
:param norm_layer: use which normalization before activation, e.g.,
``nn.LayerNorm`` and ``nn.BatchNorm1d``. Default to no normalization.
You can also pass a list of normalization modules with the same length
of hidden_sizes, to use different normalization module in different
layers. Default to no normalization.
:param activation: which activation to use after each layer, can be both
the same actvition for all layers if passed in nn.Module, or different
activation for different Modules if passed in a list. Default to
nn.ReLU.
"""
def __init__(
self,
input_dim: int,
output_dim: int = 0,
hidden_sizes: Sequence[int] = (),
norm_layer: Optional[Union[ModuleType, Sequence[ModuleType]]] = None,
activation: Optional[Union[ModuleType, Sequence[ModuleType]]]
= nn.ReLU,
device: Optional[Union[str, int, torch.device]] = None,
) -> None:
super().__init__()
self.device = device
if norm_layer:
if isinstance(norm_layer, list):
assert len(norm_layer) == len(hidden_sizes)
norm_layer_list = norm_layer
else:
norm_layer_list = [
norm_layer for _ in range(len(hidden_sizes))]
else:
norm_layer_list = [None] * len(hidden_sizes)
if activation:
if isinstance(activation, list):
assert len(activation) == len(hidden_sizes)
activation_list = activation
else:
activation_list = [
activation for _ in range(len(hidden_sizes))]
else:
activation_list = [None] * len(hidden_sizes)
hidden_sizes = [input_dim] + list(hidden_sizes)
model = []
for in_dim, out_dim, norm, activ in zip(
hidden_sizes[:-1], hidden_sizes[1:],
norm_layer_list, activation_list):
model += miniblock(in_dim, out_dim, norm, activ)
if output_dim > 0:
model += [nn.Linear(hidden_sizes[-1], output_dim)]
self.output_dim = output_dim or hidden_sizes[-1]
self.model = nn.Sequential(*model)
def forward(
self, x: Union[np.ndarray, torch.Tensor]
) -> torch.Tensor:
x = torch.as_tensor(
x, device=self.device, dtype=torch.float32) # type: ignore
return self.model(x.flatten(1))
class Net(nn.Module):
"""Wrapper of MLP to support more specific DRL usage.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
:param state_shape: int or a sequence of int of the shape of state.
:param action_shape: int or a sequence of int of the shape of action.
:param hidden_sizes: shape of MLP passed in as a list.
:param norm_layer: use which normalization before activation, e.g.,
``nn.LayerNorm`` and ``nn.BatchNorm1d``. Default to no normalization.
You can also pass a list of normalization modules with the same length
of hidden_sizes, to use different normalization module in different
layers. Default to no normalization.
:param activation: which activation to use after each layer, can be both
the same actvition for all layers if passed in nn.Module, or different
activation for different Modules if passed in a list. Default to
nn.ReLU.
:param device: specify the device when the network actually runs. Default
to "cpu".
:param bool softmax: whether to apply a softmax layer over the last layer's
output.
:param bool concat: whether the input shape is concatenated by state_shape
and action_shape. If it is True, ``action_shape`` is not the output
shape, but affects the input shape only.
:param int num_atoms: in order to expand to the net of distributional RL.
Default to 1 (not use).
:param bool dueling_param: whether to use dueling network to calculate Q
values (for Dueling DQN). If you want to use dueling option, you should
pass a tuple of two dict (first for Q and second for V) stating
self-defined arguments as stated in
class:`~tianshou.utils.net.common.MLP`. Default to None.
.. seealso::
Please refer to :class:`~tianshou.utils.net.common.MLP` for more
detailed explanation on the usage of activation, norm_layer, etc.
You can also refer to :class:`~tianshou.utils.net.continuous.Actor`,
:class:`~tianshou.utils.net.continuous.Critic`, etc, to see how it's
suggested be used.
"""
def __init__(
self,
state_shape: Union[int, Sequence[int]],
action_shape: Optional[Union[int, Sequence[int]]] = 0,
hidden_sizes: Sequence[int] = (),
norm_layer: Optional[ModuleType] = None,
activation: Optional[ModuleType] = nn.ReLU,
device: Union[str, int, torch.device] = "cpu",
softmax: bool = False,
concat: bool = False,
num_atoms: int = 1,
dueling_param: Optional[Tuple[Dict[str, Any], Dict[str, Any]]] = None,
) -> None:
super().__init__()
self.device = device
self.softmax = softmax
self.num_atoms = num_atoms
input_dim = np.prod(state_shape)
action_dim = np.prod(action_shape) * num_atoms
if concat:
input_dim += action_dim
self.use_dueling = dueling_param is not None
output_dim = action_dim if not self.use_dueling and not concat else 0
self.model = MLP(input_dim, output_dim, hidden_sizes,
norm_layer, activation, device)
self.output_dim = self.model.output_dim
if self.use_dueling: # dueling DQN
q_kwargs, v_kwargs = dueling_param # type: ignore
q_output_dim, v_output_dim = 0, 0
if not concat:
q_output_dim, v_output_dim = action_dim, num_atoms
q_kwargs: Dict[str, Any] = {
**q_kwargs, "input_dim": self.output_dim,
"output_dim": q_output_dim}
v_kwargs: Dict[str, Any] = {
**v_kwargs, "input_dim": self.output_dim,
"output_dim": v_output_dim}
self.Q, self.V = MLP(**q_kwargs), MLP(**v_kwargs)
self.output_dim = self.Q.output_dim
def forward(
self,
s: Union[np.ndarray, torch.Tensor],
state: Optional[Any] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Any]:
"""Mapping: s -> flatten (inside MLP)-> logits."""
logits = self.model(s)
bsz = logits.shape[0]
if self.use_dueling: # Dueling DQN
q, v = self.Q(logits), self.V(logits)
if self.num_atoms > 1:
q = q.view(bsz, -1, self.num_atoms)
v = v.view(bsz, -1, self.num_atoms)
logits = q - q.mean(dim=1, keepdim=True) + v
elif self.num_atoms > 1:
logits = logits.view(bsz, -1, self.num_atoms)
if self.softmax:
logits = torch.softmax(logits, dim=-1)
return logits, state
class Recurrent(nn.Module):
"""Simple Recurrent network based on LSTM.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(
self,
layer_num: int,
state_shape: Union[int, Sequence[int]],
action_shape: Union[int, Sequence[int]],
device: Union[str, int, torch.device] = "cpu",
hidden_layer_size: int = 128,
) -> None:
super().__init__()
self.device = device
self.nn = nn.LSTM(
input_size=hidden_layer_size,
hidden_size=hidden_layer_size,
num_layers=layer_num,
batch_first=True,
)
self.fc1 = nn.Linear(np.prod(state_shape), hidden_layer_size)
self.fc2 = nn.Linear(hidden_layer_size, np.prod(action_shape))
def forward(
self,
s: Union[np.ndarray, torch.Tensor],
state: Optional[Dict[str, torch.Tensor]] = None,
info: Dict[str, Any] = {},
) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]:
"""Mapping: s -> flatten -> logits.
In the evaluation mode, s should be with shape ``[bsz, dim]``; in the
training mode, s should be with shape ``[bsz, len, dim]``. See the code
and comment for more detail.
"""
s = torch.as_tensor(
s, device=self.device, dtype=torch.float32) # type: ignore
# s [bsz, len, dim] (training) or [bsz, dim] (evaluation)
# In short, the tensor's shape in training phase is longer than which
# in evaluation phase.
if len(s.shape) == 2:
s = s.unsqueeze(-2)
s = self.fc1(s)
self.nn.flatten_parameters()
if state is None:
s, (h, c) = self.nn(s)
else:
# we store the stack data in [bsz, len, ...] format
# but pytorch rnn needs [len, bsz, ...]
s, (h, c) = self.nn(s, (state["h"].transpose(0, 1).contiguous(),
state["c"].transpose(0, 1).contiguous()))
s = self.fc2(s[:, -1])
# please ensure the first dim is batch size: [bsz, len, ...]
return s, {"h": h.transpose(0, 1).detach(),
"c": c.transpose(0, 1).detach()}
|
py | b41347c7f5bf5311895ddb6b3c3f36f935df61f0 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._inputs import *
__all__ = ['PrivateEndpointConnection']
class PrivateEndpointConnection(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config_store_name: Optional[pulumi.Input[str]] = None,
private_endpoint: Optional[pulumi.Input[pulumi.InputType['PrivateEndpointArgs']]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
private_link_service_connection_state: Optional[pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A private endpoint connection
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] config_store_name: The name of the configuration store.
:param pulumi.Input[pulumi.InputType['PrivateEndpointArgs']] private_endpoint: The resource of private endpoint.
:param pulumi.Input[str] private_endpoint_connection_name: Private endpoint connection name
:param pulumi.Input[pulumi.InputType['PrivateLinkServiceConnectionStateArgs']] private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider.
:param pulumi.Input[str] resource_group_name: The name of the resource group to which the container registry belongs.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if config_store_name is None:
raise TypeError("Missing required property 'config_store_name'")
__props__['config_store_name'] = config_store_name
__props__['private_endpoint'] = private_endpoint
if private_endpoint_connection_name is None:
raise TypeError("Missing required property 'private_endpoint_connection_name'")
__props__['private_endpoint_connection_name'] = private_endpoint_connection_name
if private_link_service_connection_state is None:
raise TypeError("Missing required property 'private_link_service_connection_state'")
__props__['private_link_service_connection_state'] = private_link_service_connection_state
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:appconfiguration/latest:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:appconfiguration/v20191101preview:PrivateEndpointConnection"), pulumi.Alias(type_="azure-nextgen:appconfiguration/v20200701preview:PrivateEndpointConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(PrivateEndpointConnection, __self__).__init__(
'azure-nextgen:appconfiguration/v20200601:PrivateEndpointConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'PrivateEndpointConnection':
"""
Get an existing PrivateEndpointConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return PrivateEndpointConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> pulumi.Output[Optional['outputs.PrivateEndpointResponse']]:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> pulumi.Output['outputs.PrivateLinkServiceConnectionStateResponse']:
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | b413481331988e41e9b58de9eac10af8ec216917 | import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
def preprocess(df):
print('----------------------------------------------')
print("Before preprocessing")
print("Number of rows with 0 values for each variable")
for col in df.columns:
missing_rows = df.loc[df[col]==0].shape[0]
print(col + ": " + str(missing_rows))
print('----------------------------------------------')
# Replace 0 values with the mean of the existing values
df['Glucose'] = df['Glucose'].replace(0, np.nan)
df['BloodPressure'] = df['BloodPressure'].replace(0, np.nan)
df['SkinThickness'] = df['SkinThickness'].replace(0, np.nan)
df['Insulin'] = df['Insulin'].replace(0, np.nan)
df['BMI'] = df['BMI'].replace(0, np.nan)
df['Glucose'] = df['Glucose'].fillna(df['Glucose'].mean())
df['BloodPressure'] = df['BloodPressure'].fillna(df['BloodPressure'].mean())
df['SkinThickness'] = df['SkinThickness'].fillna(df['SkinThickness'].mean())
df['Insulin'] = df['Insulin'].fillna(df['Insulin'].mean())
df['BMI'] = df['BMI'].fillna(df['BMI'].mean())
print('----------------------------------------------')
print("After preprocessing")
print("Number of rows with 0 values for each variable")
for col in df.columns:
missing_rows = df.loc[df[col]==0].shape[0]
print(col + ": " + str(missing_rows))
print('----------------------------------------------')
# Standardization
df_scaled = preprocessing.scale(df)
df_scaled = pd.DataFrame(df_scaled, columns=df.columns)
df_scaled['Outcome'] = df['Outcome']
df = df_scaled
return df |
py | b41349ac931379207742f0e9ac6e1ee6f97e1716 | # Standard Library
import calendar
from datetime import datetime
from json import loads
from uuid import uuid4
# 3rd Party
from preggy import expect
import tests.func.base # NOQA isort:skip pylint:disable=unused-import
def test_update1(client):
"""
Given API and Worker are UP
When I update a job's details
Then I can see its updated in API
"""
task_id = uuid4()
date = datetime.utcnow()
unixtime = calendar.timegm(date.utctimetuple())
status, body, _ = client.post(
f"/tasks/{task_id}/",
data={
"image": "ubuntu",
"command": "echo 'it works'",
"startAt": unixtime + 5000,
},
)
expect(status).to_equal(200)
result = loads(body)
expect(result["jobId"]).not_to_be_null()
job_id = result["jobId"]
status, body, _ = client.put(
f"/tasks/{task_id}/jobs/{job_id}/",
data={
"image": "ubuntu",
"command": "echo 'it was updated'",
"startAt": unixtime + 2,
},
)
result = loads(body)
expect(result["jobId"]).not_to_be_null()
job_url = result["jobUrl"]
meta = {}
expect(job_url).to_have_execution(cli=client, execution=meta, timeout=30)
expect(meta).to_include("url")
expect(meta).to_include("executionId")
expect(meta["url"]).to_have_finished_with(
status="done", log="it was updated", exitCode=0, cli=client
)
|
py | b4134a563b0ddf51173c9124b36b72a47b0ac79e | import setuptools
with open("README.md", "r") as rdm:
desc = rdm.read()
setuptools.setup(
name="pyTigerDriver",
version="v1.0.12",
author="Zrouga Mohamed",
author_email="[email protected]",
description="GSQL client for TigerGraph",
long_description=desc,
long_description_content_type="text/markdown",
keywords=['gsql', 'client','tigergraph'],
requires=["requests"],
url="https://github.com/Zrouga-Mohamed/pyTigerDriver ",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Operating System :: OS Independent",
"Topic :: Database",
],
python_requires='>=3.5'
)
|
py | b4134a7f82f795df1d9ccc3c73d977772e26f864 | # encoding: utf-8
"""
@author: liaoxingyu
@contact: [email protected]
"""
import logging
import torch
import torch.nn.functional as F
from torch import nn
__all__ = ["IBN", "get_norm"]
class BatchNorm(nn.BatchNorm2d):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0, **kwargs):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: nn.init.constant_(self.weight, weight_init)
if bias_init is not None: nn.init.constant_(self.bias, bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class SyncBatchNorm(nn.SyncBatchNorm):
def __init__(self, num_features, eps=1e-05, momentum=0.1, weight_freeze=False, bias_freeze=False, weight_init=1.0,
bias_init=0.0):
super().__init__(num_features, eps=eps, momentum=momentum)
if weight_init is not None: nn.init.constant_(self.weight, weight_init)
if bias_init is not None: nn.init.constant_(self.bias, bias_init)
self.weight.requires_grad_(not weight_freeze)
self.bias.requires_grad_(not bias_freeze)
class IBN(nn.Module):
def __init__(self, planes, bn_norm, **kwargs):
super(IBN, self).__init__()
half1 = int(planes / 2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = get_norm(bn_norm, half2, **kwargs)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
class GhostBatchNorm(BatchNorm):
def __init__(self, num_features, num_splits=1, **kwargs):
super().__init__(num_features, **kwargs)
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
self.running_mean = self.running_mean.repeat(self.num_splits)
self.running_var = self.running_var.repeat(self.num_splits)
outputs = F.batch_norm(
input.view(-1, C * self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0)
return outputs
else:
return F.batch_norm(
input, self.running_mean, self.running_var,
self.weight, self.bias, False, self.momentum, self.eps)
class FrozenBatchNorm(nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
It contains non-trainable buffers called
"weight" and "bias", "running_mean", "running_var",
initialized to perform identity transformation.
The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
which are computed from the original four parameters of BN.
The affine transform `x * weight + bias` will perform the equivalent
computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
When loading a backbone model from Caffe2, "running_mean" and "running_var"
will be left unchanged as identity transformation.
Other pre-trained backbone models may contain all 4 parameters.
The forward is implemented by `F.batch_norm(..., training=False)`.
"""
_version = 3
def __init__(self, num_features, eps=1e-5, **kwargs):
super().__init__()
self.num_features = num_features
self.eps = eps
self.register_buffer("weight", torch.ones(num_features))
self.register_buffer("bias", torch.zeros(num_features))
self.register_buffer("running_mean", torch.zeros(num_features))
self.register_buffer("running_var", torch.ones(num_features) - eps)
def forward(self, x):
if x.requires_grad:
# When gradients are needed, F.batch_norm will use extra memory
# because its backward op computes gradients for weight/bias as well.
scale = self.weight * (self.running_var + self.eps).rsqrt()
bias = self.bias - self.running_mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return x * scale + bias
else:
# When gradients are not needed, F.batch_norm is a single fused op
# and provide more optimization opportunities.
return F.batch_norm(
x,
self.running_mean,
self.running_var,
self.weight,
self.bias,
training=False,
eps=self.eps,
)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
version = local_metadata.get("version", None)
if version is None or version < 2:
# No running_mean/var in early versions
# This will silent the warnings
if prefix + "running_mean" not in state_dict:
state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
if prefix + "running_var" not in state_dict:
state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
if version is not None and version < 3:
logger = logging.getLogger(__name__)
logger.info("FrozenBatchNorm {} is upgraded to version 3.".format(prefix.rstrip(".")))
# In version < 3, running_var are used without +eps.
state_dict[prefix + "running_var"] -= self.eps
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __repr__(self):
return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
@classmethod
def convert_frozen_batchnorm(cls, module):
"""
Convert BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
Args:
module (torch.nn.Module):
Returns:
If module is BatchNorm/SyncBatchNorm, returns a new module.
Otherwise, in-place convert module and return it.
Similar to convert_sync_batchnorm in
https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
"""
bn_module = nn.modules.batchnorm
bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
res = module
if isinstance(module, bn_module):
res = cls(module.num_features)
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for name, child in module.named_children():
new_child = cls.convert_frozen_batchnorm(child)
if new_child is not child:
res.add_module(name, new_child)
return res
def get_norm(norm, out_channels, **kwargs):
"""
Args:
norm (str or callable): either one of BN, GhostBN, FrozenBN, GN or SyncBN;
or a callable that takes a channel number and returns
the normalization layer as a nn.Module
out_channels: number of channels for normalization layer
Returns:
nn.Module or None: the normalization layer
"""
if isinstance(norm, str):
if len(norm) == 0:
return None
norm = {
"BN": BatchNorm,
"syncBN": SyncBatchNorm,
"GhostBN": GhostBatchNorm,
"FrozenBN": FrozenBatchNorm,
"GN": lambda channels, **args: nn.GroupNorm(32, channels),
}[norm]
return norm(out_channels, **kwargs)
|
py | b4134b1d242b5226a5fcf4392128f9ae4737e77c | # -*- coding: utf-8 -*-
import networkx as nx
import itertools as it
from nose.tools import (assert_equal, assert_not_equal, assert_greater_equal,
assert_raises, assert_in)
from networkx.utils import pairwise
from networkx.algorithms.connectivity import (
bridge_components,
EdgeComponentAuxGraph,
)
from networkx.algorithms.connectivity.edge_kcomponents import (
general_k_edge_subgraphs,
)
# ----------------
# Helper functions
# ----------------
def fset(list_of_sets):
""" allows == to be used for list of sets """
return set(map(frozenset, list_of_sets))
def _assert_subgraph_edge_connectivity(G, ccs_subgraph, k):
"""
tests properties of k-edge-connected subgraphs
the actual edge connectivity should be no less than k unless the cc is a
single node.
"""
for cc in ccs_subgraph:
C = G.subgraph(cc)
if len(cc) > 1:
connectivity = nx.edge_connectivity(C)
assert_greater_equal(connectivity, k)
def _memo_connectivity(G, u, v, memo):
edge = (u, v)
if edge in memo:
return memo[edge]
if not G.is_directed():
redge = (v, u)
if redge in memo:
return memo[redge]
memo[edge] = nx.edge_connectivity(G, *edge)
return memo[edge]
def _all_pairs_connectivity(G, cc, k, memo):
# Brute force check
for u, v in it.combinations(cc, 2):
# Use a memoization dict to save on computation
connectivity = _memo_connectivity(G, u, v, memo)
if G.is_directed():
connectivity = min(connectivity, _memo_connectivity(G, v, u, memo))
assert_greater_equal(connectivity, k)
def _assert_local_cc_edge_connectivity(G, ccs_local, k, memo):
"""
tests properties of k-edge-connected components
the local edge connectivity between each pair of nodes in the the original
graph should be no less than k unless the cc is a single node.
"""
for cc in ccs_local:
if len(cc) > 1:
# Strategy for testing a bit faster: If the subgraph has high edge
# connectivity then it must have local connectivity
C = G.subgraph(cc)
connectivity = nx.edge_connectivity(C)
if connectivity < k:
# Otherwise do the brute force (with memoization) check
_all_pairs_connectivity(G, cc, k, memo)
# Helper function
def _check_edge_connectivity(G):
"""
Helper - generates all k-edge-components using the aux graph. Checks the
both local and subgraph edge connectivity of each cc. Also checks that
alternate methods of computing the k-edge-ccs generate the same result.
"""
# Construct the auxiliary graph that can be used to make each k-cc or k-sub
aux_graph = EdgeComponentAuxGraph.construct(G)
# memoize the local connectivity in this graph
memo = {}
for k in it.count(1):
# Test "local" k-edge-components and k-edge-subgraphs
ccs_local = fset(aux_graph.k_edge_components(k))
ccs_subgraph = fset(aux_graph.k_edge_subgraphs(k))
# Check connectivity properties that should be garuenteed by the
# algorithms.
_assert_local_cc_edge_connectivity(G, ccs_local, k, memo)
_assert_subgraph_edge_connectivity(G, ccs_subgraph, k)
if k == 1 or k == 2 and not G.is_directed():
assert_equal(ccs_local, ccs_subgraph,
'Subgraphs and components should be the same '
'when k == 1 or (k == 2 and not G.directed())')
if G.is_directed():
# Test special case methods are the same as the aux graph
if k == 1:
alt_sccs = fset(nx.strongly_connected_components(G))
assert_equal(alt_sccs, ccs_local, 'k=1 failed alt')
assert_equal(alt_sccs, ccs_subgraph, 'k=1 failed alt')
else:
# Test special case methods are the same as the aux graph
if k == 1:
alt_ccs = fset(nx.connected_components(G))
assert_equal(alt_ccs, ccs_local, 'k=1 failed alt')
assert_equal(alt_ccs, ccs_subgraph, 'k=1 failed alt')
elif k == 2:
alt_bridge_ccs = fset(bridge_components(G))
assert_equal(alt_bridge_ccs, ccs_local, 'k=2 failed alt')
assert_equal(alt_bridge_ccs, ccs_subgraph, 'k=2 failed alt')
# if new methods for k == 3 or k == 4 are implemented add them here
# Check the general subgraph method works by itself
alt_subgraph_ccs = fset([set(C.nodes()) for C in
general_k_edge_subgraphs(G, k=k)])
assert_equal(alt_subgraph_ccs, ccs_subgraph,
'alt subgraph method failed')
# Stop once k is larger than all special case methods
# and we cannot break down ccs any further.
if k > 2 and all(len(cc) == 1 for cc in ccs_local):
break
# ----------------
# Misc tests
# ----------------
def test_zero_k_exception():
G = nx.Graph()
# functions that return generators error immediately
assert_raises(ValueError, nx.k_edge_components, G, k=0)
assert_raises(ValueError, nx.k_edge_subgraphs, G, k=0)
# actual generators only error when you get the first item
aux_graph = EdgeComponentAuxGraph.construct(G)
assert_raises(ValueError, list, aux_graph.k_edge_components(k=0))
assert_raises(ValueError, list, aux_graph.k_edge_subgraphs(k=0))
assert_raises(ValueError, list, general_k_edge_subgraphs(G, k=0))
def test_empty_input():
G = nx.Graph()
assert_equal([], list(nx.k_edge_components(G, k=5)))
assert_equal([], list(nx.k_edge_subgraphs(G, k=5)))
G = nx.DiGraph()
assert_equal([], list(nx.k_edge_components(G, k=5)))
assert_equal([], list(nx.k_edge_subgraphs(G, k=5)))
def test_not_implemented():
G = nx.MultiGraph()
assert_raises(nx.NetworkXNotImplemented, EdgeComponentAuxGraph.construct, G)
assert_raises(nx.NetworkXNotImplemented, nx.k_edge_components, G, k=2)
assert_raises(nx.NetworkXNotImplemented, nx.k_edge_subgraphs, G, k=2)
assert_raises(nx.NetworkXNotImplemented, bridge_components, G)
assert_raises(nx.NetworkXNotImplemented, bridge_components, nx.DiGraph())
def test_general_k_edge_subgraph_quick_return():
# tests quick return optimization
G = nx.Graph()
G.add_node(0)
subgraphs = list(general_k_edge_subgraphs(G, k=1))
assert_equal(len(subgraphs), 1)
for subgraph in subgraphs:
assert_equal(subgraph.number_of_nodes(), 1)
G.add_node(1)
subgraphs = list(general_k_edge_subgraphs(G, k=1))
assert_equal(len(subgraphs), 2)
for subgraph in subgraphs:
assert_equal(subgraph.number_of_nodes(), 1)
# ----------------
# Undirected tests
# ----------------
def test_random_gnp():
# seeds = [1550709854, 1309423156, 4208992358, 2785630813, 1915069929]
seeds = [12, 13]
for seed in seeds:
G = nx.gnp_random_graph(20, 0.2, seed=seed)
_check_edge_connectivity(G)
def test_configuration():
# seeds = [2718183590, 2470619828, 1694705158, 3001036531, 2401251497]
seeds = [14, 15]
for seed in seeds:
deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000)
G = nx.Graph(nx.configuration_model(deg_seq, seed=seed))
G.remove_edges_from(nx.selfloop_edges(G))
_check_edge_connectivity(G)
def test_shell():
# seeds = [2057382236, 3331169846, 1840105863, 476020778, 2247498425]
seeds = [20]
for seed in seeds:
constructor = [(12, 70, 0.8), (15, 40, 0.6)]
G = nx.random_shell_graph(constructor, seed=seed)
_check_edge_connectivity(G)
def test_karate():
G = nx.karate_club_graph()
_check_edge_connectivity(G)
def test_tarjan_bridge():
# graph from tarjan paper
# RE Tarjan - "A note on finding the bridges of a graph"
# Information Processing Letters, 1974 - Elsevier
# doi:10.1016/0020-0190(74)90003-9.
# define 2-connected components and bridges
ccs = [(1, 2, 4, 3, 1, 4), (5, 6, 7, 5), (8, 9, 10, 8),
(17, 18, 16, 15, 17), (11, 12, 14, 13, 11, 14)]
bridges = [(4, 8), (3, 5), (3, 17)]
G = nx.Graph(it.chain(*(pairwise(path) for path in ccs + bridges)))
_check_edge_connectivity(G)
def test_bridge_cc():
# define 2-connected components and bridges
cc2 = [(1, 2, 4, 3, 1, 4), (8, 9, 10, 8), (11, 12, 13, 11)]
bridges = [(4, 8), (3, 5), (20, 21), (22, 23, 24)]
G = nx.Graph(it.chain(*(pairwise(path) for path in cc2 + bridges)))
bridge_ccs = fset(bridge_components(G))
target_ccs = fset([
{1, 2, 3, 4}, {5}, {8, 9, 10}, {11, 12, 13}, {20},
{21}, {22}, {23}, {24}
])
assert_equal(bridge_ccs, target_ccs)
_check_edge_connectivity(G)
def test_undirected_aux_graph():
# Graph similar to the one in
# http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264
a, b, c, d, e, f, g, h, i = 'abcdefghi'
paths = [
(a, d, b, f, c),
(a, e, b),
(a, e, b, c, g, b, a),
(c, b),
(f, g, f),
(h, i)
]
G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
aux_graph = EdgeComponentAuxGraph.construct(G)
components_1 = fset(aux_graph.k_edge_subgraphs(k=1))
target_1 = fset([{a, b, c, d, e, f, g}, {h, i}])
assert_equal(target_1, components_1)
# Check that the undirected case for k=1 agrees with CCs
alt_1 = fset(nx.k_edge_subgraphs(G, k=1))
assert_equal(alt_1, components_1)
components_2 = fset(aux_graph.k_edge_subgraphs(k=2))
target_2 = fset([{a, b, c, d, e, f, g}, {h}, {i}])
assert_equal(target_2, components_2)
# Check that the undirected case for k=2 agrees with bridge components
alt_2 = fset(nx.k_edge_subgraphs(G, k=2))
assert_equal(alt_2, components_2)
components_3 = fset(aux_graph.k_edge_subgraphs(k=3))
target_3 = fset([{a}, {b, c, f, g}, {d}, {e}, {h}, {i}])
assert_equal(target_3, components_3)
components_4 = fset(aux_graph.k_edge_subgraphs(k=4))
target_4 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}])
assert_equal(target_4, components_4)
_check_edge_connectivity(G)
def test_local_subgraph_difference():
paths = [
(11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique
(21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique
# paths connecting each node of the 4 cliques
(11, 101, 21),
(12, 102, 22),
(13, 103, 23),
(14, 104, 24),
]
G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
aux_graph = EdgeComponentAuxGraph.construct(G)
# Each clique is returned separately in k-edge-subgraphs
subgraph_ccs = fset(aux_graph.k_edge_subgraphs(3))
subgraph_target = fset([{101}, {102}, {103}, {104},
{21, 22, 23, 24}, {11, 12, 13, 14}])
assert_equal(subgraph_ccs, subgraph_target)
# But in k-edge-ccs they are returned together
# because they are locally 3-edge-connected
local_ccs = fset(aux_graph.k_edge_components(3))
local_target = fset([{101}, {102}, {103}, {104},
{11, 12, 13, 14, 21, 22, 23, 24}])
assert_equal(local_ccs, local_target)
def test_local_subgraph_difference_directed():
dipaths = [
(1, 2, 3, 4, 1),
(1, 3, 1),
]
G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths]))
assert_equal(
fset(nx.k_edge_components(G, k=1)),
fset(nx.k_edge_subgraphs(G, k=1))
)
# Unlike undirected graphs, when k=2, for directed graphs there is a case
# where the k-edge-ccs are not the same as the k-edge-subgraphs.
# (in directed graphs ccs and subgraphs are the same when k=2)
assert_not_equal(
fset(nx.k_edge_components(G, k=2)),
fset(nx.k_edge_subgraphs(G, k=2))
)
assert_equal(
fset(nx.k_edge_components(G, k=3)),
fset(nx.k_edge_subgraphs(G, k=3))
)
_check_edge_connectivity(G)
def test_triangles():
paths = [
(11, 12, 13, 11), # first 3-clique
(21, 22, 23, 21), # second 3-clique
(11, 21), # connected by an edge
]
G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
# subgraph and ccs are the same in all cases here
assert_equal(
fset(nx.k_edge_components(G, k=1)),
fset(nx.k_edge_subgraphs(G, k=1))
)
assert_equal(
fset(nx.k_edge_components(G, k=2)),
fset(nx.k_edge_subgraphs(G, k=2))
)
assert_equal(
fset(nx.k_edge_components(G, k=3)),
fset(nx.k_edge_subgraphs(G, k=3))
)
_check_edge_connectivity(G)
def test_four_clique():
paths = [
(11, 12, 13, 14, 11, 13, 14, 12), # first 4-clique
(21, 22, 23, 24, 21, 23, 24, 22), # second 4-clique
# paths connecting the 4 cliques such that they are
# 3-connected in G, but not in the subgraph.
# Case where the nodes bridging them do not have degree less than 3.
(100, 13),
(12, 100, 22),
(13, 200, 23),
(14, 300, 24),
]
G = nx.Graph(it.chain(*[pairwise(path) for path in paths]))
# The subgraphs and ccs are different for k=3
local_ccs = fset(nx.k_edge_components(G, k=3))
subgraphs = fset(nx.k_edge_subgraphs(G, k=3))
assert_not_equal(local_ccs, subgraphs)
# The cliques ares in the same cc
clique1 = frozenset(paths[0])
clique2 = frozenset(paths[1])
assert_in(clique1.union(clique2).union({100}), local_ccs)
# but different subgraphs
assert_in(clique1, subgraphs)
assert_in(clique2, subgraphs)
assert_equal(G.degree(100), 3)
_check_edge_connectivity(G)
def test_five_clique():
# Make a graph that can be disconnected less than 4 edges, but no node has
# degree less than 4.
G = nx.disjoint_union(nx.complete_graph(5), nx.complete_graph(5))
paths = [
# add aux-connections
(1, 100, 6), (2, 100, 7), (3, 200, 8), (4, 200, 100),
]
G.add_edges_from(it.chain(*[pairwise(path) for path in paths]))
assert_equal(min(dict(nx.degree(G)).values()), 4)
# For k=3 they are the same
assert_equal(
fset(nx.k_edge_components(G, k=3)),
fset(nx.k_edge_subgraphs(G, k=3))
)
# For k=4 they are the different
# the aux nodes are in the same CC as clique 1 but no the same subgraph
assert_not_equal(
fset(nx.k_edge_components(G, k=4)),
fset(nx.k_edge_subgraphs(G, k=4))
)
# For k=5 they are not the same
assert_not_equal(
fset(nx.k_edge_components(G, k=5)),
fset(nx.k_edge_subgraphs(G, k=5))
)
# For k=6 they are the same
assert_equal(
fset(nx.k_edge_components(G, k=6)),
fset(nx.k_edge_subgraphs(G, k=6))
)
_check_edge_connectivity(G)
# ----------------
# Undirected tests
# ----------------
def test_directed_aux_graph():
# Graph similar to the one in
# http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0136264
a, b, c, d, e, f, g, h, i = 'abcdefghi'
dipaths = [
(a, d, b, f, c),
(a, e, b),
(a, e, b, c, g, b, a),
(c, b),
(f, g, f),
(h, i)
]
G = nx.DiGraph(it.chain(*[pairwise(path) for path in dipaths]))
aux_graph = EdgeComponentAuxGraph.construct(G)
components_1 = fset(aux_graph.k_edge_subgraphs(k=1))
target_1 = fset([{a, b, c, d, e, f, g}, {h}, {i}])
assert_equal(target_1, components_1)
# Check that the directed case for k=1 agrees with SCCs
alt_1 = fset(nx.strongly_connected_components(G))
assert_equal(alt_1, components_1)
components_2 = fset(aux_graph.k_edge_subgraphs(k=2))
target_2 = fset([{i}, {e}, {d}, {b, c, f, g}, {h}, {a}])
assert_equal(target_2, components_2)
components_3 = fset(aux_graph.k_edge_subgraphs(k=3))
target_3 = fset([{a}, {b}, {c}, {d}, {e}, {f}, {g}, {h}, {i}])
assert_equal(target_3, components_3)
def test_random_gnp_directed():
# seeds = [3894723670, 500186844, 267231174, 2181982262, 1116750056]
seeds = [21]
for seed in seeds:
G = nx.gnp_random_graph(20, 0.2, directed=True, seed=seed)
_check_edge_connectivity(G)
def test_configuration_directed():
# seeds = [671221681, 2403749451, 124433910, 672335939, 1193127215]
seeds = [67]
for seed in seeds:
deg_seq = nx.random_powerlaw_tree_sequence(20, seed=seed, tries=5000)
G = nx.DiGraph(nx.configuration_model(deg_seq, seed=seed))
G.remove_edges_from(nx.selfloop_edges(G))
_check_edge_connectivity(G)
def test_shell_directed():
# seeds = [3134027055, 4079264063, 1350769518, 1405643020, 530038094]
seeds = [31]
for seed in seeds:
constructor = [(12, 70, 0.8), (15, 40, 0.6)]
G = nx.random_shell_graph(constructor, seed=seed).to_directed()
_check_edge_connectivity(G)
def test_karate_directed():
G = nx.karate_club_graph().to_directed()
_check_edge_connectivity(G)
|
py | b4134bdfc05937e7009b5fc16360797f0365ec03 | # -*- coding: utf-8 -*-
"""
kombu.async.timer
=================
Timer scheduling Python callbacks.
"""
from __future__ import absolute_import
import heapq
import sys
from collections import namedtuple
from datetime import datetime
from functools import wraps
from time import time
from weakref import proxy as weakrefproxy
from kombu.five import monotonic
from kombu.log import get_logger
from kombu.utils.compat import timedelta_seconds
try:
from pytz import utc
except ImportError:
utc = None
DEFAULT_MAX_INTERVAL = 2
EPOCH = datetime.utcfromtimestamp(0).replace(tzinfo=utc)
IS_PYPY = hasattr(sys, 'pypy_version_info')
logger = get_logger(__name__)
__all__ = ['Entry', 'Timer', 'to_timestamp']
scheduled = namedtuple('scheduled', ('eta', 'priority', 'entry'))
def to_timestamp(d, default_timezone=utc):
if isinstance(d, datetime):
if d.tzinfo is None:
d = d.replace(tzinfo=default_timezone)
return timedelta_seconds(d - EPOCH)
return d
class Entry(object):
if not IS_PYPY: # pragma: no cover
__slots__ = (
'fun', 'args', 'kwargs', 'tref', 'cancelled',
'_last_run', '__weakref__',
)
def __init__(self, fun, args=None, kwargs=None):
self.fun = fun
self.args = args or []
self.kwargs = kwargs or {}
self.tref = weakrefproxy(self)
self._last_run = None
self.cancelled = False
def __call__(self):
return self.fun(*self.args, **self.kwargs)
def cancel(self):
try:
self.tref.cancelled = True
except ReferenceError: # pragma: no cover
pass
def __repr__(self):
return '<TimerEntry: {0}(*{1!r}, **{2!r})'.format(
self.fun.__name__, self.args, self.kwargs)
def __hash__(self):
return hash((self.fun, self.args, self.kwargs))
if sys.version_info[0] == 3: # pragma: no cover
def __lt__(self, other):
return hash(self) < hash(other)
def __gt__(self, other):
return hash(self) > hash(other)
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
class Timer(object):
"""ETA scheduler."""
Entry = Entry
on_error = None
def __init__(self, max_interval=None, on_error=None, **kwargs):
self.max_interval = float(max_interval or DEFAULT_MAX_INTERVAL)
self.on_error = on_error or self.on_error
self._queue = []
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stop()
def call_at(self, eta, fun, args=(), kwargs={}, priority=0):
return self.enter_at(self.Entry(fun, args, kwargs), eta, priority)
def call_after(self, secs, fun, args=(), kwargs={}, priority=0):
return self.enter_after(secs, self.Entry(fun, args, kwargs), priority)
def call_repeatedly(self, secs, fun, args=(), kwargs={}, priority=0):
tref = self.Entry(fun, args, kwargs)
@wraps(fun)
def _reschedules(*args, **kwargs):
last, now = tref._last_run, monotonic()
lsince = (now - tref._last_run) if last else secs
try:
if lsince and lsince >= secs:
tref._last_run = now
return fun(*args, **kwargs)
finally:
if not tref.cancelled:
last = tref._last_run
next = secs - (now - last) if last else secs
self.enter_after(next, tref, priority)
tref.fun = _reschedules
tref._last_run = None
return self.enter_after(secs, tref, priority)
def enter_at(self, entry, eta=None, priority=0, time=time):
"""Enter function into the scheduler.
:param entry: Item to enter.
:keyword eta: Scheduled time as a :class:`datetime.datetime` object.
:keyword priority: Unused.
"""
if eta is None:
eta = time()
if isinstance(eta, datetime):
try:
eta = to_timestamp(eta)
except Exception as exc:
if not self.handle_error(exc):
raise
return
return self._enter(eta, priority, entry)
def enter_after(self, secs, entry, priority=0, time=time):
return self.enter_at(entry, time() + secs, priority)
def _enter(self, eta, priority, entry, push=heapq.heappush):
push(self._queue, scheduled(eta, priority, entry))
return entry
def apply_entry(self, entry):
try:
entry()
except Exception as exc:
if not self.handle_error(exc):
logger.error('Error in timer: %r', exc, exc_info=True)
def handle_error(self, exc_info):
if self.on_error:
self.on_error(exc_info)
return True
def stop(self):
pass
def __iter__(self, min=min, nowfun=time,
pop=heapq.heappop, push=heapq.heappush):
"""This iterator yields a tuple of ``(entry, wait_seconds)``,
where if entry is :const:`None` the caller should wait
for ``wait_seconds`` until it polls the schedule again."""
max_interval = self.max_interval
queue = self._queue
while 1:
if queue:
eventA = queue[0]
now, eta = nowfun(), eventA[0]
if now < eta:
yield min(eta - now, max_interval), None
else:
eventB = pop(queue)
if eventB is eventA:
entry = eventA[2]
if not entry.cancelled:
yield None, entry
continue
else:
push(queue, eventB)
else:
yield None, None
def clear(self):
self._queue[:] = [] # atomic, without creating a new list.
def cancel(self, tref):
tref.cancel()
def __len__(self):
return len(self._queue)
def __nonzero__(self):
return True
@property
def queue(self, _pop=heapq.heappop):
"""Snapshot of underlying datastructure."""
events = list(self._queue)
return [_pop(v) for v in [events] * len(events)]
@property
def schedule(self):
return self
|
py | b4134c35fa50389b71f78c139b32a1bad03de69b | # Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Networks used in discrete-action agents."""
import sonnet as snt
import tensorflow as tf
class DiscreteFilteredQNetwork(snt.Module):
"""Discrete filtered Q-network.
This produces filtered Q values according to the method used in the discrete
BCQ algorithm (https://arxiv.org/pdf/1910.01708.pdf - section 4).
"""
def __init__(self,
g_network: snt.Module,
q_network: snt.Module,
threshold: float):
super().__init__(name='discrete_filtered_qnet')
assert threshold >= 0 and threshold <= 1
self.g_network = g_network
self.q_network = q_network
self._threshold = threshold
def __call__(self, o_t: tf.Tensor) -> tf.Tensor:
q_t = self.q_network(o_t)
g_t = tf.nn.softmax(self.g_network(o_t))
normalized_g_t = g_t / tf.reduce_max(g_t, axis=-1, keepdims=True)
# Filter actions based on g_network outputs.
min_q = tf.reduce_min(q_t, axis=-1, keepdims=True)
return tf.where(normalized_g_t >= self._threshold, q_t, min_q)
|
py | b4134c824e15ccf564a6a3dacb0d65cb39a246ff | # coding: utf-8
from unittest import TestCase
from tools.error import DataNotFound
from tools.text import find_number, drop_space, normalize_space
class TextTestCase(TestCase):
def test_find_number(self):
self.assertEqual(2, find_number('2'))
self.assertEqual(2, find_number('foo 2 4 bar'))
self.assertEqual('2', find_number('foo 2 4 bar', make_int=False))
self.assertEqual(24, find_number('foo 2 4 bar', ignore_spaces=True))
self.assertEqual(24, find_number(u'бешеный 2 4 барсук', ignore_spaces=True))
self.assertRaises(DataNotFound,
lambda: find_number('foo'))
self.assertRaises(DataNotFound,
lambda: find_number(u'фыва'))
def test_drop_space(self):
self.assertEqual('', drop_space(' '))
self.assertEqual('f', drop_space(' f '))
self.assertEqual('fb', drop_space(' f b '))
self.assertEqual(u'триглаза', drop_space(u' тр и гла' + '\t' + '\n' + u' за '))
def test_normalize_space(self):
self.assertEqual('', normalize_space(' '))
self.assertEqual('f', normalize_space(' f '))
self.assertEqual('f b', normalize_space(' f b '))
self.assertEqual(u'тр и гла за', normalize_space(u' тр и гла' + '\t' + '\n' + u' за '))
self.assertEqual(u'тр_и_гла_за', normalize_space(u' тр и гла' + '\t' + '\n' + u' за ', replace='_'))
self.assertEqual(u'трABCиABCглаABCза', normalize_space(u' тр и гла' + '\t' + '\n' + u' за ', replace='ABC'))
|
py | b4134d7a379510842d2a6dc253f9c3cf462b119b | from sqlalchemy import Column, Float, ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from nonbonded.backend.database.models import Base
class ForceBalancePrior(Base):
__tablename__ = "force_balance_priors"
id = Column(Integer, primary_key=True, index=True)
parent_id = Column(Integer, ForeignKey("force_balance.id"))
parameter_type = Column(String, nullable=False)
value = Column(Float, nullable=False)
class ForceBalance(Base):
__tablename__ = "force_balance"
id = Column(Integer, primary_key=True, index=True)
parent_id = Column(Integer, ForeignKey("optimizations.id"), nullable=False)
priors = relationship("ForceBalancePrior", cascade="all, delete-orphan")
convergence_step_criteria = Column(Float, nullable=False)
convergence_objective_criteria = Column(Float, nullable=False)
convergence_gradient_criteria = Column(Float, nullable=False)
n_criteria = Column(Integer, nullable=False)
initial_trust_radius = Column(Float, nullable=False)
minimum_trust_radius = Column(Float, nullable=False)
|
py | b4134d7df1036ed179ac4475006f136709b082a3 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The arithmetic circuit library."""
from .functional_pauli_rotations import FunctionalPauliRotations
from .integer_comparator import IntegerComparator
from .linear_pauli_rotations import LinearPauliRotations
from .piecewise_linear_pauli_rotations import PiecewiseLinearPauliRotations
from .piecewise_polynomial_pauli_rotations import PiecewisePolynomialPauliRotations
from .polynomial_pauli_rotations import PolynomialPauliRotations
from .weighted_adder import WeightedAdder
from .quadratic_form import QuadraticForm
from .linear_amplitude_function import LinearAmplitudeFunction
from .adders import VBERippleCarryAdder, CDKMRippleCarryAdder, DraperQFTAdder
from .piecewise_chebyshev import PiecewiseChebyshev
from .multipliers import HRSCumulativeMultiplier, RGQFTMultiplier
|
py | b4134d9ca8c2917a81985fbe2f7aa6b0b8bc4434 | import yaml
import os
import re
import sys
import httplib2
import socket
import pkg_resources
import threading
import base64
import json
from docassemble.base.generate_key import random_string
from distutils.version import LooseVersion
dbtableprefix = None
daconfig = dict()
s3_config = dict()
S3_ENABLED = False
gc_config = dict()
GC_ENABLED = False
azure_config = dict()
AZURE_ENABLED = False
hostname = None
loaded = False
in_celery = False
errors = list()
env_messages = list()
def env_true_false(var):
value = str(os.getenv(var, 'false')).lower().strip()
return value == 'true'
def env_exists(var):
value = os.getenv(var)
return value is not None
def env_translate(var):
value = str(os.getenv(var)).strip()
if value in ('true', 'True'):
return True
if value in ('false', 'False'):
return False
if value in ('null', 'None', 'Null'):
return None
if re.match(r'^\-?[0-9]+$', value):
return int(value)
return value
def override_config(the_config, messages, key, var, pre_key=None):
value = env_translate(var)
if value == '':
return
if value is None and (key in ('redis', 'rabbitmq', 'log server') or (pre_key == 'db' and key == 'host')):
return
if pre_key is None:
if key in the_config and str(the_config[key]) != str(value):
messages.append("The value of configuration key %s has been replaced with %s based on the value of environment variable %s" % (key, value, var))
elif key not in the_config:
messages.append("The value of configuration key %s has been set to %s based on the value of environment variable %s" % (key, value, var))
the_config[key] = value
else:
if pre_key not in the_config:
the_config[pre_key] = dict()
if key in the_config[pre_key] and str(the_config[pre_key][key]) != str(value):
messages.append("The value of configuration key %s in %s has been replaced with %s based on the value of environment variable %s" % (key, pre_key, value, var))
elif key not in the_config[pre_key]:
messages.append("The value of configuration key %s in %s has been set to %s based on the value of environment variable %s" % (key, pre_key, value, var))
the_config[pre_key][key] = value
def config_error(error):
errors.append(error)
sys.stderr.write(error + "\n")
def cleanup_filename(filename):
filename = filename.strip()
parts = filename.split(':')
if len(parts) != 2 or re.search(r'\s', parts[0]):
return None
if not parts[0].startswith('docassemble.playground') and not parts[1].startswith('data/questions/'):
return parts[0] + ':' + 'data/questions/' + parts[1]
return filename
def delete_environment():
for var in ('DBSSLMODE', 'DBSSLCERT', 'DBSSLKEY', 'DBSSLROOTCERT', 'DBTYPE', 'DBPREFIX', 'DBNAME', 'DBUSER', 'DBPASSWORD', 'DBHOST', 'DBPORT', 'DBTABLEPREFIX', 'DBBACKUP', 'DASECRETKEY', 'DABACKUPDAYS', 'ENVIRONMENT_TAKES_PRECEDENCE', 'DASTABLEVERSION', 'DASSLPROTOCOLS', 'SERVERADMIN', 'REDIS', 'REDISCLI', 'RABBITMQ', 'DACELERYWORKERS', 'S3ENABLE', 'S3ACCESSKEY', 'S3SECRETACCESSKEY', 'S3BUCKET', 'S3REGION', 'S3ENDPOINTURL', 'AZUREENABLE', 'AZUREACCOUNTKEY', 'AZUREACCOUNTNAME', 'AZURECONTAINER', 'AZURECONNECTIONSTRING', 'EC2', 'COLLECTSTATISTICS', 'KUBERNETES', 'LOGSERVER', 'USECLOUDURLS', 'USEMINIO', 'USEHTTPS', 'USELETSENCRYPT', 'LETSENCRYPTEMAIL', 'BEHINDHTTPSLOADBALANCER', 'XSENDFILE', 'DAUPDATEONSTART', 'URLROOT', 'DAHOSTNAME', 'DAEXPOSEWEBSOCKETS', 'DAWEBSOCKETSIP', 'DAWEBSOCKETSPORT', 'POSTURLROOT', 'DAWEBSERVER', 'DASQLPING', 'PORT', 'OTHERLOCALES', 'DAMAXCONTENTLENGTH', 'DACELERYWORKERS', 'PACKAGES', 'PYTHONPACKAGES', 'DAALLOWUPDATES', 'AWS_SECRET_ACCESS_KEY', 'AWS_ACCESS_KEY_ID', 'S4CMD_OPTS', 'WSGIROOT', 'DATIMEOUT'):
if var in os.environ:
del os.environ[var]
this_thread = threading.local()
this_thread.botoclient = dict()
def aws_get_region(arn):
m = re.search(r'arn:aws:secretsmanager:([^:]+):', arn)
if m:
return m.group(1)
return 'us-east-1'
def aws_get_secret(data):
region = aws_get_region(data)
if region not in this_thread.botoclient:
import boto3
if env_exists('AWSACCESSKEY') and env_exists('AWSSECRETACCESSKEY'):
sys.stderr.write("Using access keys\n")
session = boto3.session.Session(aws_access_key_id=os.environ['AWSACCESSKEY'], aws_secret_access_key=os.environ['AWSSECRETACCESSKEY'])
else:
sys.stderr.write("Not using access keys\n")
session = boto3.session.Session()
this_thread.botoclient[region] = session.client(
service_name='secretsmanager',
region_name=region,
)
try:
response = this_thread.botoclient[region].get_secret_value(SecretId=data)
except Exception as e:
if e.__class__.__name__ == 'ClientError':
if e.response['Error']['Code'] == 'DecryptionFailureException':
sys.stderr.write("aws_get_secret: Secrets Manager can't decrypt the protected secret text using the provided KMS key.\n")
elif e.response['Error']['Code'] == 'InternalServiceErrorException':
sys.stderr.write("aws_get_secret: An error occurred on the server side.\n")
elif e.response['Error']['Code'] == 'InvalidParameterException':
sys.stderr.write("aws_get_secret: You provided an invalid value for a parameter.\n")
elif e.response['Error']['Code'] == 'InvalidRequestException':
sys.stderr.write("aws_get_secret: You provided a parameter value that is not valid for the current state of the resource.\n")
elif e.response['Error']['Code'] == 'ResourceNotFoundException':
sys.stderr.write("aws_get_secret: We can't find the resource that you asked for.")
else:
sys.stderr.write("aws_get_secret: " + e.__class__.__name__ + ": " + str(e) + "\n")
else:
sys.stderr.write("aws_get_secret: " + e.__class__.__name__ + ": " + str(e) + "\n")
return data
if 'SecretString' in response:
result = response['SecretString']
else:
result = base64.b64decode(response['SecretBinary'])
try:
result = json.loads(result)
except:
sys.stderr.write("aws_get_secret: problem decoding JSON\n")
return result
def recursive_fetch_cloud(data):
if isinstance(data, str):
if data.startswith('arn:aws:secretsmanager:'):
data = aws_get_secret(data.strip())
return data
elif isinstance(data, (int, float, bool)):
return data
elif isinstance(data, list):
return [recursive_fetch_cloud(y) for y in data]
elif isinstance(data, dict):
return {k: recursive_fetch_cloud(v) for k, v in data.items()}
elif isinstance(data, set):
return {recursive_fetch_cloud(y) for y in data}
elif isinstance(data, tuple):
return tuple([recursive_fetch_cloud(y) for y in data])
else:
return data
def load(**kwargs):
global daconfig
global s3_config
global S3_ENABLED
global gc_config
global GC_ENABLED
global azure_config
global AZURE_ENABLED
global dbtableprefix
global hostname
global loaded
global in_celery
global env_messages
# changed = False
filename = None
if 'arguments' in kwargs and isinstance(kwargs['arguments'], list) and len(kwargs['arguments']) > 1:
for arg in kwargs['arguments'][1:]:
if arg.startswith('--'):
continue
if os.path.isfile(arg):
filename = arg
if filename is None:
filename = kwargs.get('filename', os.getenv('DA_CONFIG_FILE', '/usr/share/docassemble/config/config.yml'))
if 'in_celery' in kwargs and kwargs['in_celery']:
in_celery = True
if not os.path.isfile(filename):
if not os.access(os.path.dirname(filename), os.W_OK):
sys.stderr.write("Configuration file " + str(filename) + " does not exist and cannot be created\n")
sys.exit(1)
with open(filename, 'w') as config_file:
config_file.write(default_config())
sys.stderr.write("Wrote configuration file to " + str(filename) + "\n")
if not os.path.isfile(filename):
sys.stderr.write("Configuration file " + str(filename) + " does not exist. Trying default instead.\n")
filename = '/usr/share/docassemble/config/config.yml'
if not os.path.isfile(filename):
sys.stderr.write("Configuration file " + str(filename) + " does not exist.\n")
sys.exit(1)
with open(filename, 'r', encoding='utf-8') as stream:
raw_daconfig = yaml.load(stream, Loader=yaml.FullLoader)
if raw_daconfig is None:
sys.stderr.write("Could not open configuration file from " + str(filename) + "\n")
with open(filename, 'r', encoding='utf-8') as fp:
sys.stderr.write(fp.read() + "\n")
sys.exit(1)
daconfig.clear()
raw_daconfig = recursive_fetch_cloud(raw_daconfig)
if raw_daconfig.get('config from', None) and isinstance(raw_daconfig['config from'], dict):
raw_daconfig.update(raw_daconfig['config from'])
del raw_daconfig['config from']
for key, val in raw_daconfig.items():
if re.search(r'_', key):
config_error("Configuration keys may not contain underscores. Your configuration key " + str(key) + " has been converted.")
daconfig[re.sub(r'_', r' ', key)] = val
else:
daconfig[key] = val
if 'avconv' in daconfig:
config_error("The Configuration directive avconv has been renamed ffmpeg.")
daconfig['ffmpeg'] = daconfig['avconv']
del daconfig['avconv']
daconfig['config file'] = filename
if 'modules' not in daconfig:
daconfig['modules'] = os.getenv('DA_PYTHON', '/usr/share/docassemble/local' + str(sys.version_info.major) + '.' + str(sys.version_info.minor))
daconfig['python version'] = str(pkg_resources.get_distribution("docassemble.base").version)
version_file = daconfig.get('version file', '/usr/share/docassemble/webapp/VERSION')
if os.path.isfile(version_file) and os.access(version_file, os.R_OK):
with open(version_file, 'r', encoding='utf-8') as fp:
daconfig['system version'] = fp.read().strip()
else:
daconfig['system version'] = '0.1.12'
if LooseVersion(daconfig['system version']) >= LooseVersion('1.2.50'):
daconfig['has_celery_single_queue'] = True
else:
daconfig['has_celery_single_queue'] = False
if env_true_false('ENVIRONMENT_TAKES_PRECEDENCE'):
null_messages = list()
for env_var, key in (('S3ENABLE', 'enable'), ('S3ACCESSKEY', 'access key id'), ('S3SECRETACCESSKEY', 'secret access key'), ('S3BUCKET', 'bucket'), ('S3REGION', 'region'), ('S3ENDPOINTURL', 'endpoint url')):
if env_exists(env_var):
override_config(daconfig, null_messages, key, env_var, pre_key='s3')
for env_var, key in (('AZUREENABLE', 'enable'), ('AZUREACCOUNTKEY', 'account key'), ('AZUREACCOUNTNAME', 'account name'), ('AZURECONTAINER', 'container'), ('AZURECONNECTIONSTRING', 'connection string')):
if env_exists(env_var):
override_config(daconfig, null_messages, key, env_var, pre_key='azure')
if env_exists('KUBERNETES'):
override_config(daconfig, null_messages, 'kubernetes', 'KUBERNETES')
s3_config = daconfig.get('s3', None)
if not s3_config or ('enable' in s3_config and not s3_config['enable']):
S3_ENABLED = False
else:
S3_ENABLED = True
if not s3_config.get('access key id', None) and env_exists('AWSACCESSKEY'):
s3_config['access key id'] = os.environ['AWSACCESSKEY']
if not s3_config.get('secret access key', None) and env_exists('AWSSECRETACCESSKEY'):
s3_config['secret access key'] = os.environ['AWSSECRETACCESSKEY']
gc_config = daconfig.get('google cloud', None)
if not gc_config or ('enable' in gc_config and not gc_config['enable']) or not ('access key id' in gc_config and gc_config['access key id']) or not ('secret access key' in gc_config and gc_config['secret access key']):
GC_ENABLED = False
else:
GC_ENABLED = True
if 'azure' in daconfig and not isinstance(daconfig['azure'], dict):
config_error('azure must be a dict')
azure_config = daconfig.get('azure', None)
if not isinstance(azure_config, dict) or ('enable' in azure_config and not azure_config['enable']) or 'account name' not in azure_config or azure_config['account name'] is None or 'account key' not in azure_config or azure_config['account key'] is None:
AZURE_ENABLED = False
else:
AZURE_ENABLED = True
if daconfig.get('ec2', False) or (env_true_false('ENVIRONMENT_TAKES_PRECEDENCE') and env_true_false('EC2')):
h = httplib2.Http()
resp, content = h.request(daconfig.get('ec2 ip url', "http://169.254.169.254/latest/meta-data/local-hostname"), "GET")
if resp['status'] and int(resp['status']) == 200:
hostname = content.decode()
else:
config_error("Could not get hostname from ec2")
sys.exit(1)
elif daconfig.get('kubernetes', False) or (env_true_false('ENVIRONMENT_TAKES_PRECEDENCE') and env_true_false('KUBERNETES')):
hostname = socket.gethostbyname(socket.gethostname())
else:
hostname = os.getenv('SERVERHOSTNAME', socket.gethostname())
if S3_ENABLED:
import docassemble.webapp.amazon
cloud = docassemble.webapp.amazon.s3object(s3_config)
elif AZURE_ENABLED:
import docassemble.webapp.microsoft
cloud = docassemble.webapp.microsoft.azureobject(azure_config)
if ('key vault name' in azure_config and azure_config['key vault name'] is not None and 'managed identity' in azure_config and azure_config['managed identity'] is not None):
daconfig = cloud.load_with_secrets(daconfig)
else:
cloud = None
if 'suppress error notificiations' in daconfig and isinstance(daconfig['suppress error notificiations'], list):
ok = True
for item in daconfig['suppress error notificiations']:
if not isinstance(item, str):
ok = False
break
if not ok:
daconfig['suppress error notificiations'] = []
config_error("Configuration file suppress error notifications directive not valid")
else:
daconfig['suppress error notificiations'] = []
if 'maximum content length' in daconfig:
if isinstance(daconfig['maximum content length'], (int, type(None))):
if daconfig['maximum content length'] is not None and daconfig['maximum content length'] <= 0:
daconfig['maximum content length'] = None
else:
config_error("The maximum content length must be an integer number of bytes, or null.")
del daconfig['maximum content length']
if 'maximum content length' not in daconfig:
daconfig['maximum content length'] = 16 * 1024 * 1024
if 'social' not in daconfig or not isinstance(daconfig['social'], dict):
daconfig['social'] = dict()
if 'twitter' not in daconfig['social'] or not isinstance(daconfig['social']['twitter'], dict):
daconfig['social']['twitter'] = dict()
if 'og' not in daconfig['social'] or not isinstance(daconfig['social']['og'], dict):
daconfig['social']['og'] = dict()
if 'fb' not in daconfig['social'] or not isinstance(daconfig['social']['fb'], dict):
daconfig['social']['fb'] = dict()
for key in list(daconfig['social'].keys()):
if key in ('og', 'twitter', 'fb'):
continue
if (not isinstance(daconfig['social'][key], str)) or daconfig['social'][key].strip() == '':
del daconfig['social'][key]
else:
daconfig['social'][key] = noquote(daconfig['social'][key])
for part in ('og', 'fb', 'twitter'):
for key in list(daconfig['social'][part].keys()):
if (not isinstance(daconfig['social'][part][key], str)) or daconfig['social'][part][key].strip() == '':
del daconfig['social'][part][key]
else:
daconfig['social'][part][key] = noquote(daconfig['social'][part][key])
if 'name' in daconfig['social']:
del daconfig['social']['name']
if 'title' in daconfig['social']['og']:
del daconfig['social']['og']['title']
if 'title' in daconfig['social']['twitter']:
del daconfig['social']['twitter']['title']
if 'url' in daconfig['social']['og']:
del daconfig['social']['og']['url']
if 'administrative interviews' in daconfig:
if isinstance(daconfig['administrative interviews'], list):
new_admin_interviews = list()
for item in daconfig['administrative interviews']:
if isinstance(item, str):
new_item = cleanup_filename(item)
if new_item:
new_admin_interviews.append(dict(interview=new_item))
elif isinstance(item, dict) and 'interview' in item and isinstance(item['interview'], str):
item['interview'] = cleanup_filename(item['interview'])
if item['interview'] is not None:
new_admin_interviews.append(item)
daconfig['administrative interviews'] = new_admin_interviews
else:
del daconfig['administrative interviews']
if 'session lifetime seconds' in daconfig:
try:
daconfig['session lifetime seconds'] = int(daconfig['session lifetime seconds'])
assert daconfig['session lifetime seconds'] > 0
except:
config_error("Invalid session lifetime seconds.")
del daconfig['session lifetime seconds']
if 'pagination limit' in daconfig:
try:
assert isinstance(daconfig['pagination limit'], int)
assert daconfig['pagination limit'] > 1
assert daconfig['pagination limit'] < 1001
except:
daconfig['pagination limit'] = 100
if 'page after login' in daconfig:
if isinstance(daconfig['page after login'], str):
daconfig['page after login'] = [{'*': daconfig['page after login']}]
if isinstance(daconfig['page after login'], dict):
daconfig['page after login'] = [daconfig['page after login']]
page_after_login = []
if isinstance(daconfig['page after login'], list):
for item in daconfig['page after login']:
if isinstance(item, dict):
for key, val in item.items():
if isinstance(key, str) and isinstance(val, str):
page_after_login.append((key, val))
else:
config_error('page after login keys and values must be strings')
else:
config_error('page after login items must be dictionaries')
else:
config_error('page after login must be a string, a list, or a dict')
daconfig['page after login'] = page_after_login
else:
daconfig['page after login'] = []
if 'keymap' in daconfig and daconfig['keymap'] not in ['vim', 'emacs', 'sublime']:
config_error("You used a keymap that is not supported. Available values are vim, emacs, and sublime.")
del daconfig['keymap']
if 'voicerss' in daconfig:
if isinstance(daconfig['voicerss'], dict):
if 'languages' in daconfig['voicerss']:
daconfig['voicerss']['dialects'] = daconfig['voicerss']['languages']
del daconfig['voicerss']['languages']
else:
config_error('voicerss must be a dict')
del daconfig['voicerss']
if 'cross site domain' in daconfig and 'cross site domains' not in daconfig:
daconfig['cross site domains'] = [daconfig['cross site domain'].strip()]
del daconfig['cross site domain']
if 'cross site domains' in daconfig:
if isinstance(daconfig['cross site domains'], list):
for item in daconfig['cross site domains']:
if not isinstance(item, str):
config_error("The configuration directive cross site domains must be a list of strings.")
del daconfig['cross site domains']
break
if len(daconfig['cross site domains']) == 1 and daconfig['cross site domains'] == '*':
daconfig['cross site domains'] = '*'
else:
config_error("The configuration directive cross site domains must be a list.")
del daconfig['cross site domains']
if 'vim' in daconfig:
config_error("The configuration directive vim is deprecated. Please use keymap instead.")
if daconfig['vim'] and 'keymap' not in daconfig:
daconfig['keymap'] = 'vim'
if 'db' not in daconfig:
daconfig['db'] = dict(name="docassemble", user="docassemble", password="abc123")
dbtableprefix = daconfig['db'].get('table prefix', None)
if not dbtableprefix:
dbtableprefix = ''
if cloud is not None:
if 'host' not in daconfig['db'] or daconfig['db']['host'] is None:
key = cloud.get_key('hostname-sql')
if key.does_exist:
the_host = key.get_contents_as_string()
if the_host == hostname:
daconfig['db']['host'] = 'localhost'
else:
daconfig['db']['host'] = the_host
if 'log server' not in daconfig or daconfig['log server'] is None:
key = cloud.get_key('hostname-log')
if key.does_exist:
the_host = key.get_contents_as_string()
if the_host == hostname:
daconfig['log server'] = 'localhost'
else:
daconfig['log server'] = the_host
if 'redis' not in daconfig or daconfig['redis'] is None:
key = cloud.get_key('hostname-redis')
if key.does_exist:
the_host = key.get_contents_as_string()
if the_host == hostname:
the_host = 'localhost'
daconfig['redis'] = 'redis://' + the_host
if 'rabbitmq' not in daconfig or daconfig['rabbitmq'] is None:
key = cloud.get_key('hostname-rabbitmq')
if key.does_exist:
the_host = key.get_contents_as_string()
daconfig['rabbitmq'] = 'pyamqp://guest@' + str(the_host) + '//'
if daconfig['db'].get('prefix', None) is None or daconfig['db'].get('prefix', '') == '':
daconfig['db']['prefix'] = 'postgresql+psycopg2://'
if daconfig['db'].get('host', None) is None or daconfig['db'].get('host', '') == '':
daconfig['db']['host'] = 'localhost'
if daconfig['db'].get('name', None) is None or daconfig['db'].get('name', '') == '':
daconfig['db']['name'] = 'docassemble'
if daconfig['db'].get('user', None) is None or daconfig['db'].get('user', '') == '':
daconfig['db']['user'] = 'docassemble'
if daconfig['db'].get('password', None) is None or daconfig['db'].get('password', '') == '':
daconfig['db']['password'] = 'abc123'
if daconfig['db'].get('port', None) is None or daconfig['db'].get('port', '') == '':
if daconfig['db']['prefix'].startswith('postgresql'):
daconfig['db']['port'] = '5432'
elif daconfig['db']['prefix'].startswith('mysql'):
daconfig['db']['port'] = '3306'
elif daconfig['db']['prefix'].startswith('oracle'):
daconfig['db']['port'] = '1521'
if 'ocr languages' not in daconfig:
daconfig['ocr languages'] = dict()
if not isinstance(daconfig['ocr languages'], dict):
config_error('ocr languages must be a dict')
daconfig['ocr languages'] = dict()
if 'zh' not in daconfig['ocr languages']:
daconfig['ocr languages']['zh'] = 'chi-tra'
if 'attempt limit' not in daconfig:
daconfig['attempt limit'] = 10
if not isinstance(daconfig['attempt limit'], (int, float)):
config_error('attempt limit must be a number')
daconfig['attempt limit'] = 10
if daconfig['attempt limit'] < 2:
config_error('attempt limit cannot be less than 2')
daconfig['attempt limit'] = 10
if 'ban period' not in daconfig:
daconfig['ban period'] = 86400
if not isinstance(daconfig['ban period'], (int, float)):
config_error('ban period must be a number')
daconfig['ban period'] = 86400
if daconfig['ban period'] < 2:
config_error('ban period cannot be less than 2')
daconfig['ban period'] = 86400
if 'verification code digits' not in daconfig:
daconfig['verification code digits'] = 6
if not isinstance(daconfig['verification code digits'], (int, float)):
config_error('verification code digits must be a number')
daconfig['verification code digits'] = 6
if daconfig['verification code digits'] < 1 or daconfig['verification code digits'] > 32:
config_error('verification code digits must be between 1 and 32')
daconfig['verification code digits'] = 6
if 'verification code timeout' not in daconfig:
daconfig['verification code timeout'] = 180
if not isinstance(daconfig['verification code timeout'], (int, float)):
config_error('verification code timeout must be a number')
daconfig['verification code timeout'] = 180
if daconfig['verification code timeout'] < 1:
config_error('verification code timeout must be one or greater')
daconfig['verification code timeout'] = 180
if 'api privileges' in daconfig:
if not isinstance(daconfig['api privileges'], list):
config_error("api privileges must be in the form of a list")
daconfig['api privileges'] = ['admin', 'developer']
else:
daconfig['api privileges'] = ['admin', 'developer']
if 'two factor authentication' in daconfig:
if isinstance(daconfig['two factor authentication'], bool):
daconfig['two factor authentication'] = dict(enable=daconfig['two factor authentication'])
if not isinstance(daconfig['two factor authentication'], dict):
config_error('two factor authentication must be boolean or a dict')
daconfig['two factor authentication'] = dict()
else:
daconfig['two factor authentication'] = dict(enable=False)
if 'allowed for' in daconfig['two factor authentication']:
if not isinstance(daconfig['two factor authentication']['allowed for'], list):
config_error("two factor authentication allowed for must be in the form of a list")
daconfig['two factor authentication']['allowed for'] = ['admin', 'developer']
else:
if 'two factor authentication privileges' in daconfig:
if isinstance(daconfig['two factor authentication privileges'], list):
daconfig['two factor authentication']['allowed for'] = daconfig['two factor authentication privileges']
else:
config_error("two factor authentication privileges must be in the form of a list")
daconfig['two factor authentication']['allowed for'] = ['admin', 'developer']
else:
daconfig['two factor authentication']['allowed for'] = ['admin', 'developer']
if 'email confirmation privileges' in daconfig:
if not isinstance(daconfig['email confirmation privileges'], list):
config_error("email confirmation privileges must be in the form of a list")
daconfig['email confirmation privileges'] = []
else:
daconfig['email confirmation privileges'] = []
loaded = True
for key in ['global javascript', 'global css']:
if key in daconfig:
if daconfig[key] is None:
del daconfig[key]
elif not isinstance(daconfig[key], list):
daconfig[key] = [daconfig[key]]
if 'password complexity' in daconfig:
if isinstance(daconfig['password complexity'], dict):
for key in ('length', 'lowercase', 'uppercase', 'digits', 'punctuation'):
if key in daconfig['password complexity'] and not isinstance(daconfig['password complexity'][key], int):
config_error("password complexity key " + key + " must be an integer.")
del daconfig['password complexity'][key]
else:
config_error("password complexity must be in the form of a dict.")
del daconfig['password complexity']
if 'checkin interval' in daconfig:
if not isinstance(daconfig['checkin interval'], int):
config_error("checkin interval must be an integer.")
del daconfig['checkin interval']
elif daconfig['checkin interval'] > 0 and daconfig['checkin interval'] < 1000:
config_error("checkin interval must be at least 1000, if not 0.")
del daconfig['checkin interval']
if daconfig.get('checkin interval', 5) == 0:
daconfig['enable monitor'] = False
else:
daconfig['enable monitor'] = True
if daconfig.get('default icons', None) == 'font awesome':
daconfig['use font awesome'] = True
if 'websockets port' in daconfig and daconfig['websockets port']:
try:
daconfig['websockets port'] = int(daconfig['websockets port'])
except:
config_error("websockets port must be an integer")
del daconfig['websockets port']
if 'mail' not in daconfig:
daconfig['mail'] = dict()
if 'dispatch' not in daconfig:
daconfig['dispatch'] = dict()
if not isinstance(daconfig['dispatch'], dict):
config_error("dispatch must be structured as a dictionary")
daconfig['dispatch'] = dict()
if len(daconfig['dispatch']):
new_dispatch = dict()
for shortcut, filename in daconfig['dispatch'].items():
if isinstance(shortcut, str) and isinstance(filename, str):
new_filename = cleanup_filename(filename)
if new_filename:
new_dispatch[shortcut] = new_filename
daconfig['dispatch'] = new_dispatch
if 'interview delete days by filename' in daconfig and isinstance(daconfig['interview delete days by filename'], dict):
new_delete_days = dict()
for filename, days in daconfig['interview delete days by filename'].items():
new_filename = cleanup_filename(filename)
if new_filename:
new_delete_days[new_filename] = days
daconfig['interview delete days by filename'] = new_delete_days
for key in ('default interview', 'session list interview', 'dispatch interview', 'auto resume interview'):
if key in daconfig:
if isinstance(daconfig[key], str):
daconfig[key] = cleanup_filename(daconfig[key])
if daconfig[key] is None:
del daconfig[key]
else:
del daconfig[key]
if 'ldap login' not in daconfig:
daconfig['ldap login'] = dict()
if not isinstance(daconfig['ldap login'], dict):
config_error("ldap login must be structured as a dictionary")
daconfig['ldap login'] = dict()
if daconfig.get('auto resume interview', None) is not None:
daconfig['show interviews link'] = False
if 'use minio' not in daconfig:
daconfig['use minio'] = False
if 'server administrator email' not in daconfig or not daconfig['server administrator email']:
daconfig['server administrator email'] = 'webmaster@localhost'
if 'use cloud urls' not in daconfig:
daconfig['use cloud urls'] = False
else:
daconfig['use cloud urls'] = True if daconfig['use cloud urls'] else False
if 'use https' not in daconfig or not daconfig['use https']:
daconfig['use https'] = False
if 'use lets encrypt' not in daconfig or not daconfig['use lets encrypt']:
daconfig['use lets encrypt'] = False
if 'behind https load balancer' not in daconfig or not daconfig['behind https load balancer']:
daconfig['behind https load balancer'] = False
if 'websockets ip' in daconfig and not daconfig['websockets ip']:
del daconfig['websockets ip']
if 'websockets port' not in daconfig or not daconfig['websockets port']:
daconfig['websockets port'] = 5000
if 'root' not in daconfig or not daconfig['root']:
daconfig['root'] = '/'
if 'web server' not in daconfig or not daconfig['web server']:
daconfig['web server'] = 'nginx'
if 'table css class' not in daconfig or not isinstance(daconfig['table css class'], str):
daconfig['table css class'] = 'table table-striped'
if env_true_false('ENVIRONMENT_TAKES_PRECEDENCE'):
messages = list()
for env_var, key in (('DBPREFIX', 'prefix'), ('DBNAME', 'name'), ('DBUSER', 'user'), ('DBPASSWORD', 'password'), ('DBHOST', 'host'), ('DBPORT', 'port'), ('DBTABLEPREFIX', 'table prefix'), ('DBBACKUP', 'backup')):
if env_exists(env_var):
override_config(daconfig, messages, key, env_var, pre_key='db')
if env_exists('DASECRETKEY'):
override_config(daconfig, messages, 'secretkey', 'DASECRETKEY')
daconfig['secretkey'] = env_translate('DASECRETKEY')
if env_exists('DABACKUPDAYS'):
override_config(daconfig, messages, 'backup days', 'DABACKUPDAYS')
if env_exists('DASTABLEVERSION'):
override_config(daconfig, messages, 'stable version', 'DASTABLEVERSION')
if env_exists('DASSLPROTOCOLS'):
override_config(daconfig, messages, 'nginx ssl protocols', 'DASSLPROTOCOLS')
if env_exists('SERVERADMIN'):
override_config(daconfig, messages, 'server administrator email', 'SERVERADMIN')
if env_exists('LOCALE'):
override_config(daconfig, messages, 'os locale', 'LOCALE')
if env_exists('TIMEZONE'):
override_config(daconfig, messages, 'timezone', 'TIMEZONE')
if env_exists('REDIS'):
override_config(daconfig, messages, 'redis', 'REDIS')
if env_exists('RABBITMQ'):
override_config(daconfig, messages, 'rabbitmq', 'RABBITMQ')
if env_exists('DACELERYWORKERS'):
override_config(daconfig, messages, 'celery processes', 'DACELERYWORKERS')
for env_var, key in (('S3ENABLE', 'enable'), ('S3ACCESSKEY', 'access key id'), ('S3SECRETACCESSKEY', 'secret access key'), ('S3BUCKET', 'bucket'), ('S3REGION', 'region'), ('S3ENDPOINTURL', 'endpoint url')):
if env_exists(env_var):
override_config(daconfig, messages, key, env_var, pre_key='s3')
for env_var, key in (('AZUREENABLE', 'enable'), ('AZUREACCOUNTKEY', 'account key'), ('AZUREACCOUNTNAME', 'account name'), ('AZURECONTAINER', 'container'), ('AZURECONNECTIONSTRING', 'connection string')):
if env_exists(env_var):
override_config(daconfig, messages, key, env_var, pre_key='azure')
if env_exists('EC2'):
override_config(daconfig, messages, 'ec2', 'EC2')
if env_exists('COLLECTSTATISTICS'):
override_config(daconfig, messages, 'collect statistics', 'COLLECTSTATISTICS')
if env_exists('KUBERNETES'):
override_config(daconfig, messages, 'kubernetes', 'KUBERNETES')
if env_exists('LOGSERVER'):
override_config(daconfig, messages, 'log server', 'LOGSERVER')
if env_exists('USECLOUDURLS'):
override_config(daconfig, messages, 'use cloud urls', 'USECLOUDURLS')
if env_exists('USEMINIO'):
override_config(daconfig, messages, 'use minio', 'USEMINIO')
if env_exists('USEHTTPS'):
override_config(daconfig, messages, 'use https', 'USEHTTPS')
if env_exists('USELETSENCRYPT'):
override_config(daconfig, messages, 'use lets encrypt', 'USELETSENCRYPT')
if env_exists('LETSENCRYPTEMAIL'):
override_config(daconfig, messages, 'lets encrypt email', 'LETSENCRYPTEMAIL')
if env_exists('BEHINDHTTPSLOADBALANCER'):
override_config(daconfig, messages, 'behind https load balancer', 'BEHINDHTTPSLOADBALANCER')
if env_exists('XSENDFILE'):
override_config(daconfig, messages, 'xsendfile', 'XSENDFILE')
if env_exists('DAUPDATEONSTART'):
override_config(daconfig, messages, 'update on start', 'DAUPDATEONSTART')
if env_exists('URLROOT'):
override_config(daconfig, messages, 'url root', 'URLROOT')
if env_exists('DAHOSTNAME'):
override_config(daconfig, messages, 'external hostname', 'DAHOSTNAME')
if env_exists('DAEXPOSEWEBSOCKETS'):
override_config(daconfig, messages, 'expose websockets', 'DAEXPOSEWEBSOCKETS')
if env_exists('DAWEBSOCKETSIP'):
override_config(daconfig, messages, 'websockets ip', 'DAWEBSOCKETSIP')
if env_exists('DAWEBSOCKETSPORT'):
override_config(daconfig, messages, 'websockets port', 'DAWEBSOCKETSPORT')
if env_exists('POSTURLROOT'):
override_config(daconfig, messages, 'root', 'POSTURLROOT')
if env_exists('DAWEBSERVER'):
override_config(daconfig, messages, 'web server', 'DAWEBSERVER')
if env_exists('DASQLPING'):
override_config(daconfig, messages, 'sql ping', 'DASQLPING')
if env_exists('PORT'):
override_config(daconfig, messages, 'http port', 'PORT')
env_messages = messages
return
def default_config():
config = """\
secretkey: """ + random_string(32) + """
mail:
default sender: '"Administrator" <[email protected]>'
"""
return config
def parse_redis_uri():
redis_host = daconfig.get('redis', None)
if redis_host is None:
redis_host = 'redis://localhost'
redis_host = redis_host.strip()
if not redis_host.startswith('redis://'):
redis_host = 'redis://' + redis_host
m = re.search(r'redis://([^:@\?]*):([^:@\?]*)@(.*)', redis_host)
if m:
redis_password = m.group(2)
redis_host = 'redis://' + m.group(3)
else:
redis_password = None
m = re.search(r'[?\&]password=([^&]+)', redis_host)
if m:
redis_password = m.group(1)
m = re.search(r'[?\&]db=([0-9]+)', redis_host)
if m:
redis_db = int(m.group(1))
else:
redis_db = 0
redis_host = re.sub(r'\?.*', '', redis_host)
redis_host = re.sub(r'^redis://', r'', redis_host)
m = re.search(r'/([0-9]+)', redis_host)
if m:
redis_db = int(m.group(1))
redis_host = re.sub(r'/.*', r'', redis_host)
m = re.search(r':([0-9]+)$', redis_host)
if m:
redis_port = m.group(1)
redis_host = re.sub(r':([0-9]+)$', '', redis_host)
else:
redis_port = '6379'
redis_offset = daconfig.get('redis database offset', redis_db)
redis_cli = 'redis-cli'
if redis_host != 'localhost' or redis_port != '6379':
redis_cli += ' -h ' + redis_host + ' -p ' + redis_port
if redis_password is not None:
redis_cli += ' -a ' + redis_password
return (redis_host, redis_port, redis_password, redis_offset, redis_cli)
def noquote(string):
if isinstance(string, str):
return string.replace('\n', ' ').replace('"', '"').strip()
return string
|
py | b4134de3ce411ae2867aba6bda6c41dc639a7564 | """Abstract model with created, modified timestamps"""
from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating "created" and "modified" fields.
"""
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
"""Abstract model"""
abstract = True
|
py | b4134e998bafbfb0408e24eb7187d26e8dfee594 | # qubit number=3
# total number=14
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.y(input_qubit[3]) # number=5
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[1],input_qubit[0]) # number=6
prog.swap(input_qubit[1],input_qubit[0]) # number=7
prog.y(input_qubit[3]) # number=8
prog.y(input_qubit[3]) # number=9
prog.swap(input_qubit[1],input_qubit[0]) # number=10
prog.swap(input_qubit[1],input_qubit[0]) # number=11
prog.x(input_qubit[1]) # number=12
prog.x(input_qubit[1]) # number=13
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_QC793.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
py | b4134fd37a28f291fea1dfa4a173a98ab1356b5e | from airflow.hooks.base_hook import BaseHook
from neo4j.v1 import GraphDatabase
class Neo4jHook(BaseHook):
"""
The Neo4j Python driver is officially supported by Neo4j and
connects to the database using the binary protocol.
It aims to be minimal, while being idiomatic to Python.
pip install neo4j-driver
https://neo4j.com/developer/python/#neo4j-python-driver
You can specify connection string options in extra field of your connection
https://neo4j.com/docs/api/python-driver/current/
"""
conn_type = 'Neo4j'
def __init__(self, neo4j_conn_id='neo4j_default', *args, **kwargs):
super().__init__(source='neo4j')
self.neo4j_conn_id = neo4j_conn_id
self.neo4j_connection = self.get_connection(neo4j_conn_id)
self.neo4j_driver = None
def get_driver(self):
"""
Fetches neo4j driver
"""
if not self.neo4j_driver:
conn = self.neo4j_connection
host = conn.host
port = conn.port
uri = str(host) + ":" + str(port)
user = conn.login
password = conn.password
self.neo4j_driver = GraphDatabase.driver(uri, auth=(user, password))
return self.neo4j_driver
def get_session(self):
"""
Fetches neo4j session
"""
neo4j_driver = self.get_driver()
return neo4j_driver.session()
def execute_cql(self, cql):
"""
Executes cql
"""
if not cql:
return
tx = self.get_session().begin_transaction()
if isinstance(cql, list):
cql_sequence = cql
else:
cql_sequence = [cql]
for cql in cql_sequence:
tx.run(cql)
tx.commit()
def on_kill(self):
"""
Gets neo4j connection closed
"""
if self.neo4j_driver:
self.neo4j_driver.close()
self.neo4j_driver = None
|
py | b4135032a95bebccb03e4fdb57232473dd0395ac | from datetime import datetime
from django.contrib.auth.models import User
from django.core import management
from django.db import models
# Forward declared intermediate model
class Membership(models.Model):
person = models.ForeignKey('Person')
group = models.ForeignKey('Group')
price = models.IntegerField(default=100)
def __unicode__(self):
return "%s is a member of %s" % (self.person.name, self.group.name)
class UserMembership(models.Model):
user = models.ForeignKey(User)
group = models.ForeignKey('Group')
price = models.IntegerField(default=100)
def __unicode__(self):
return "%s is a user and member of %s" % (self.user.username, self.group.name)
class Person(models.Model):
name = models.CharField(max_length=128)
def __unicode__(self):
return self.name
class Group(models.Model):
name = models.CharField(max_length=128)
# Membership object defined as a class
members = models.ManyToManyField(Person, through=Membership)
user_members = models.ManyToManyField(User, through='UserMembership')
def __unicode__(self):
return self.name
__test__ = {'API_TESTS':"""
# Create some dummy data
>>> bob = Person.objects.create(name='Bob')
>>> jim = Person.objects.create(name='Jim')
>>> rock = Group.objects.create(name='Rock')
>>> roll = Group.objects.create(name='Roll')
>>> frank = User.objects.create_user('frank','[email protected]','password')
>>> jane = User.objects.create_user('jane','[email protected]','password')
# Now test that the forward declared Membership works
>>> Membership.objects.create(person=bob, group=rock)
<Membership: Bob is a member of Rock>
>>> Membership.objects.create(person=bob, group=roll)
<Membership: Bob is a member of Roll>
>>> Membership.objects.create(person=jim, group=rock)
<Membership: Jim is a member of Rock>
>>> bob.group_set.all()
[<Group: Rock>, <Group: Roll>]
>>> roll.members.all()
[<Person: Bob>]
# Error messages use the model name, not repr of the class name
>>> bob.group_set = []
Traceback (most recent call last):
...
AttributeError: Cannot set values on a ManyToManyField which specifies an intermediary model. Use Membership's Manager instead.
>>> roll.members = []
Traceback (most recent call last):
...
AttributeError: Cannot set values on a ManyToManyField which specifies an intermediary model. Use Membership's Manager instead.
>>> rock.members.create(name='Anne')
Traceback (most recent call last):
...
AttributeError: Cannot use create() on a ManyToManyField which specifies an intermediary model. Use Membership's Manager instead.
>>> bob.group_set.create(name='Funk')
Traceback (most recent call last):
...
AttributeError: Cannot use create() on a ManyToManyField which specifies an intermediary model. Use Membership's Manager instead.
# Now test that the intermediate with a relationship outside
# the current app (i.e., UserMembership) workds
>>> UserMembership.objects.create(user=frank, group=rock)
<UserMembership: frank is a user and member of Rock>
>>> UserMembership.objects.create(user=frank, group=roll)
<UserMembership: frank is a user and member of Roll>
>>> UserMembership.objects.create(user=jane, group=rock)
<UserMembership: jane is a user and member of Rock>
>>> frank.group_set.all()
[<Group: Rock>, <Group: Roll>]
>>> roll.user_members.all()
[<User: frank>]
# Regression test for #8134 --
# m2m-through models shouldn't be serialized as m2m fields on the model.
# First, clean up a lot of objects we don't need.
# The serialization test only requires three objects to work -
# one for each end of the m2m, plus the through model.
>>> User.objects.all().delete()
>>> UserMembership.objects.all().delete()
>>> frank.delete()
>>> rock.delete()
>>> jim.delete()
# Dump the current contents of the database as a JSON fixture
>>> management.call_command('dumpdata', 'm2m_through_regress', format='json', indent=2)
[
{
"pk": 2,
"model": "m2m_through_regress.membership",
"fields": {
"person": 1,
"price": 100,
"group": 2
}
},
{
"pk": 1,
"model": "m2m_through_regress.person",
"fields": {
"name": "Bob"
}
},
{
"pk": 2,
"model": "m2m_through_regress.group",
"fields": {
"name": "Roll"
}
}
]
# Check the XML serializer too, since it doesn't use the common implementation
>>> management.call_command('dumpdata', 'm2m_through_regress', format='xml', indent=2)
<?xml version="1.0" encoding="utf-8"?>
<django-objects version="1.0">
<object pk="2" model="m2m_through_regress.membership">
<field to="m2m_through_regress.person" name="person" rel="ManyToOneRel">1</field>
<field to="m2m_through_regress.group" name="group" rel="ManyToOneRel">2</field>
<field type="IntegerField" name="price">100</field>
</object>
<object pk="1" model="m2m_through_regress.person">
<field type="CharField" name="name">Bob</field>
</object>
<object pk="2" model="m2m_through_regress.group">
<field type="CharField" name="name">Roll</field>
</object>
</django-objects>
## Regression test for #8046:
Check that we don't involve too many copies of the intermediate table when
doing a join.
>>> bob = Person.objects.create(name='Bob')
>>> jim = Person.objects.create(name='Jim')
>>> rock = Group.objects.create(name='Rock')
>>> roll = Group.objects.create(name='Roll')
>>> _ = Membership.objects.create(person=bob, group=rock)
>>> _ = Membership.objects.create(person=jim, group=rock, price=50)
>>> _ = Membership.objects.create(person=bob, group=roll, price=50)
>>> rock.members.filter(membership__price=50)
[<Person: Jim>]
## Regression test for #8254
>>> bob.group_set.filter(membership__price=50)
[<Group: Roll>]
"""}
|
py | b41351baebf15f859c3ad9d16b4dde93a794d3c9 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Основная ветка программы, не считая заголовков функций, состоит из двух строки кода. Это вызов функции test() и
# инструкции if __name__ == '__main__' . В ней запрашивается на ввод целое число. Если оно положительное,
# то вызывается функция positive(), тело которой содержит команду вывода на экран слова "Положительное". Если число
# отрицательное, то вызывается функция negative(), ее тело содержит выражение вывода на экран слова "Отрицательное”.
def test(A):
if A >= 0:
positive()
elif A < 0:
negative()
def positive():
print('Положительное')
def negative():
print('Отрицательное')
if __name__ == '__main__':
a = int(input('Введите целое число: '))
test(a) |
py | b413528b2e148b7403535f662f0dd87202883b9c | #!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from typing import List, Tuple, Union
import cereal.messaging as messaging
import selfdrive.sentry as sentry
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.hardware.eon.apk import (pm_apply_packages, update_apks)
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init() -> None:
# update system time from panda
set_time(cloudlog)
# save boot log
# subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params: List[Tuple[str, Union[str, bytes]]] = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
("EndToEndToggle", "1"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoShutdown", "2"),
("OpkrForceShutdown", "5"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightness", "0"),
("OpkrUIVolumeBoost", "0"),
("OpkrEnableDriverMonitoring", "1"),
("OpkrEnableLogger", "0"),
("OpkrEnableUploader", "0"),
("OpkrEnableGetoffAlert", "0"),
("OpkrAutoResume", "1"),
("OpkrVariableCruise", "1"),
("OpkrLaneChangeSpeed", "45"),
("OpkrAutoLaneChangeDelay", "0"),
("OpkrSteerAngleCorrection", "0"),
("PutPrebuiltOn", "1"),
("LdwsCarFix", "0"),
("LateralControlMethod", "2"),
("CruiseStatemodeSelInit", "1"),
("InnerLoopGain", "35"),
("OuterLoopGain", "20"),
("TimeConstant", "14"),
("ActuatorEffectiveness", "20"),
("Scale", "1500"),
("LqrKi", "15"),
("DcGain", "270"),
("PidKp", "25"),
("PidKi", "50"),
("PidKd", "150"),
("PidKf", "7"),
("CameraOffsetAdj", "60"),
("PathOffsetAdj", "0"),
("SteerRatioAdj", "1550"),
("SteerRatioMaxAdj", "1750"),
("SteerActuatorDelayAdj", "20"),
("SteerRateCostAdj", "35"),
("SteerLimitTimerAdj", "100"),
("TireStiffnessFactorAdj", "100"),
("SteerMaxBaseAdj", "384"),
("SteerMaxAdj", "384"),
("SteerDeltaUpBaseAdj", "3"),
("SteerDeltaUpAdj", "3"),
("SteerDeltaDownBaseAdj", "7"),
("SteerDeltaDownAdj", "7"),
("OpkrBatteryChargingControl", "1"),
("OpkrBatteryChargingMin", "70"),
("OpkrBatteryChargingMax", "80"),
("LeftCurvOffsetAdj", "0"),
("RightCurvOffsetAdj", "0"),
("DebugUi1", "0"),
("DebugUi2", "0"),
("DebugUi3", "0"),
("LongLogDisplay", "0"),
("OpkrBlindSpotDetect", "1"),
("OpkrMaxAngleLimit", "90"),
("OpkrSpeedLimitOffset", "0"),
("OpkrLiveSteerRatio", "1"),
("OpkrVariableSteerMax", "0"),
("OpkrVariableSteerDelta", "0"),
("FingerprintTwoSet", "0"),
("OpkrDrivingRecord", "0"),
("OpkrTurnSteeringDisable", "0"),
("CarModel", ""),
("OpkrHotspotOnBoot", "0"),
("OpkrSSHLegacy", "1"),
("CruiseOverMaxSpeed", "0"),
("JustDoGearD", "0"),
("LanelessMode", "2"),
("ComIssueGone", "1"),
("MaxSteer", "384"),
("MaxRTDelta", "112"),
("MaxRateUp", "3"),
("MaxRateDown", "7"),
("SteerThreshold", "150"),
("RecordingCount", "100"),
("RecordingQuality", "1"),
("CruiseGapAdjust", "0"),
("AutoEnable", "1"),
("CruiseAutoRes", "0"),
("AutoResOption", "0"),
("AutoResCondition", "0"),
("OpkrMonitoringMode", "0"),
("OpkrMonitorEyesThreshold", "45"),
("OpkrMonitorNormalEyesThreshold", "45"),
("OpkrMonitorBlinkThreshold", "35"),
("MadModeEnabled", "1"),
("WhitePandaSupport", "0"),
("SteerWarningFix", "0"),
("OpkrRunNaviOnBoot", "0"),
("CruiseGap1", "11"),
("CruiseGap2", "13"),
("CruiseGap3", "15"),
("CruiseGap4", "17"),
("DynamicTRGap", "3"),
("DynamicTRSpd", "0,20,40,60,110"),
("DynamicTRSet", "1.3,1.4,1.5,1.6,1.7"),
("OpkrBattLess", "0"),
("LCTimingFactorUD", "1"),
("LCTimingFactor30", "10"),
("LCTimingFactor60", "20"),
("LCTimingFactor80", "70"),
("LCTimingFactor110", "100"),
("OpkrUIBrightnessOff", "10"),
("LCTimingFactorEnable", "1"),
("AutoEnableSpeed", "3"),
("SafetyCamDecelDistGain", "0"),
("OpkrLiveTunePanelEnable", "0"),
("RadarLongHelper", "1"),
("GitPullOnBoot", "0"),
("LiveSteerRatioPercent", "-5"),
("StoppingDistAdj", "0"),
("ShowError", "1"),
("AutoResLimitTime", "0"),
("VCurvSpeedC", "30,50,70,90"),
("VCurvSpeedT", "43,55,68,85"),
("OCurvSpeedC", "30,40,50,60,70"),
("OCurvSpeedT", "35,45,60,70,80"),
("OSMCustomSpeedLimitC", "30,40,50,60,70,90"),
("OSMCustomSpeedLimitT", "30,40,65,72,80,95"),
("StockNaviSpeedEnabled", "0"),
("OPKRNaviSelect", "2"),
("dp_atl", "1"),
("E2ELong", "0"),
("GoogleMapEnabled", "0"),
("OPKRServer", "0"),
("OPKRMapboxStyleSelect", "0"),
("IgnoreCANErroronISG", "0"),
("RESCountatStandstill", "20"),
("OpkrSpeedLimitOffsetOption", "0"),
("OpkrSpeedLimitSignType", "0"),
("StockLKASEnabled", "1"),
("SpeedLimitDecelOff", "0"),
("CurvDecelOption", "2"),
("FCA11Message", "0"),
("StandstillResumeAlt", "0"),
("MapboxEnabled", "0"),
("AutoRESDelay", "0"),
("UseRadarTrack", "0"),
("RadarDisable", "0"),
("DesiredCurvatureLimit", "5"),
("C2WithCommaPower", "0"),
("CustomTREnabled", "0"),
("RoadList", "RoadName1,+0.0,RoadName2,-0.0"),
("LaneWidth", "37"),
("SpdLaneWidthSpd", "0,31"),
("SpdLaneWidthSet", "2.8,3.5"),
("TopTextView", "0"),
("CloseToRoadEdge", "0"),
("LeftEdgeOffset", "0"),
("RightEdgeOffset", "0"),
("AvoidLKASFaultEnabled", "0"),
("AvoidLKASFaultMaxAngle", "85"),
("AvoidLKASFaultMaxFrame", "90"),
("AvoidLKASFaultBeyond", "0"),
("UseStockDecelOnSS", "0"),
("AnimatedRPM", "1"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE", "0"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if EON:
update_apks(show_spinner=True)
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
elif not reg_res:
dongle_id = "maintenance"
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
# init logging
sentry.init(sentry.SentryProject.SELFDRIVE)
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
# opkr
if os.path.isfile('/data/log/error.txt'):
os.remove('/data/log/error.txt')
if os.path.isfile('/data/log/can_missing.txt'):
os.remove('/data/log/can_missing.txt')
if os.path.isfile('/data/log/can_timeout.txt'):
os.remove('/data/log/can_timeout.txt')
# ensure shared libraries are readable by apks
if EON:
os.chmod(BASEDIR, 0o755)
os.chmod("/dev/shm", 0o777)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
os.system("/data/openpilot/selfdrive/assets/addon/script/gitcommit.sh")
def manager_prepare() -> None:
for p in managed_processes.values():
p.prepare()
def manager_cleanup() -> None:
if EON:
pm_apply_packages('disable')
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread() -> None:
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore: List[str] = []
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
if EON:
pm_apply_packages('enable')
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
while True:
sm.update()
not_run = ignore[:]
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
shutdown = True
params.put("LastManagerExitReason", param)
cloudlog.warning(f"Shutting down manager - {param} set")
if shutdown:
break
def main() -> None:
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
sentry.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
try:
managed_processes['ui'].stop()
except Exception:
pass
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
py | b41352cc9050aa22b00f3265ee5ee643b17d6322 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import time
from six import wraps, get_method_function, get_method_self
from .exceptions import DoNotReadThisWidget
null_logger = logging.getLogger('widgetastic_null')
null_logger.addHandler(logging.NullHandler())
def call_sig(args, kwargs):
"""Generates a function-like signature of function called with certain parameters.
Args:
args: *args
kwargs: **kwargs
Returns:
A string that contains parameters in parentheses like the call to it.
"""
arglist = [repr(x) for x in args]
arglist.extend("{0}={1!r}".format(k, v) for k, v in kwargs.items())
return "({args})".format(
args=', '.join(arglist),
)
class PrependParentsAdapter(logging.LoggerAdapter):
"""This class ensures the path to the widget is represented in the log records."""
def process(self, msg, kwargs):
# Sanitizing %->%% for formatter working properly
return '[{}]: {}'.format(self.extra['widget_path'].replace('%', '%%'), msg), kwargs
def __repr__(self):
return '{}({!r}, {!r})'.format(type(self).__name__, self.logger, self.extra['widget_path'])
def create_widget_logger(widget_path, logger=None):
"""Create a logger that prepends the ``widget_path`` to the log records.
Args:
widget_path: A string indicating the path to the widget
logger: Specify a logger if you want some output, otherwise a null logger will be used.
Returns:
A logger instance.
"""
return PrependParentsAdapter(
logger or null_logger,
{'widget_path': widget_path})
def _create_logger_appender(parent_logger, suffix):
"""Generic name-append logger creator."""
if isinstance(parent_logger, PrependParentsAdapter):
widget_path = '{}{}'.format(parent_logger.extra['widget_path'], suffix)
logger = parent_logger.logger
else:
widget_path = suffix
logger = parent_logger
return PrependParentsAdapter(logger, {'widget_path': widget_path.lstrip('/')})
def create_child_logger(parent_logger, child_name):
"""Creates a logger for a standard child widget.
Args:
parent_logger: Logger of the parent widget (or can be plain, in that case this is the
top-level widget then.
child_name: Name under which this child widgets is represented.
Returns:
A :py:class:`PrependParentsAdapter` logger instance.
"""
return _create_logger_appender(parent_logger, '/{}'.format(child_name))
def create_item_logger(parent_logger, item):
"""Creates a logger for a widget that is inside iteration - referred to by index or key.
Args:
parent_logger: Logger of the parent widget (or can be plain, in that case this is the
top-level widget then.
item: Index or key name under which this widget is represented.
Returns:
A :py:class:`PrependParentsAdapter` logger instance.
"""
return _create_logger_appender(parent_logger, '[{!r}]'.format(item))
def logged(log_args=False, log_result=False):
"""Decorator that logs entry and exit to a method and also times the execution.
It assumes that the object where you decorate the methods on has a ``.logger`` attribute.
:py:meth:`widgetastic.widget.Widget.fill` and :py:meth:`widgetastic.widget.Widget.read` are
automatically wrapped with this call due to usage of
:py:class:`widgetastic.widget.WidgetMetaclass` which finds all ``fill`` and ``read`` methods and
wraps them automatically.
Args:
log_args: Whether to log args passed to the method
log_result: Whether to log the result value returned from the method.
"""
def g(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
start_time = time.time()
signature = f.__name__ + (call_sig(args, kwargs) if log_args else '')
self.logger.debug('%s started', signature)
try:
result = f(self, *args, **kwargs)
except DoNotReadThisWidget:
elapsed_time = (time.time() - start_time) * 1000.0
self.logger.info(
'%s not read on widget\'s request (elapsed %.0f ms)',
signature, elapsed_time)
raise
except Exception as e:
elapsed_time = (time.time() - start_time) * 1000.0
self.logger.error(
'An exception happened during %s call (elapsed %.0f ms)',
signature, elapsed_time)
self.logger.exception(e)
raise
else:
elapsed_time = (time.time() - start_time) * 1000.0
if log_result:
self.logger.info('%s -> %r (elapsed %.0f ms)', signature, result, elapsed_time)
else:
self.logger.info('%s (elapsed %.0f ms)', signature, elapsed_time)
return result
wrapped.original_function = f
return wrapped
return g
def call_unlogged(method, *args, **kwargs):
"""Calls the original method without logging when ``logged`` is applied.
In case you pass in an ordinary method that was not decorated, it will work as usual.
Args:
method: The method object from the object.
*args: Args to pass to the method.
**kwargs: Keyword arguments to pass to the method.
Returns:
Whatever that method returns.
"""
try:
f = method.original_function
except AttributeError:
f = get_method_function(method)
return f(get_method_self(method), *args, **kwargs)
|
py | b413536eead765ce81ee3b1b10671cac483f5a64 | #!/usr/bin/env python3
# https://pythonspot.com/pyqt5-drag-and-drop/
import sys
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLineEdit, QLabel
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import pyqtSlot
class App(QWidget):
def __init__(self):
super().__init__()
self.title = 'PyQt5 drag and drop - pythonspot.com'
self.left = 10
self.top = 10
self.width = 320
self.height = 60
self.initUI()
def initUI(self):
self.setWindowTitle(self.title)
self.setGeometry(self.left, self.top, self.width, self.height)
editBox = QLineEdit('Drag this', self)
editBox.setDragEnabled(True)
editBox.move(10, 10)
editBox.resize(100,32)
button = CustomLabel('Drop here.', self)
button.move(130,15)
self.show()
@pyqtSlot()
def on_click(self):
print('PyQt5 button click')
class CustomLabel(QLabel):
def __init__(self, title, parent):
super().__init__(title, parent)
self.setAcceptDrops(True)
def dragEnterEvent(self, e):
if e.mimeData().hasFormat('text/plain'):
e.accept()
else:
e.ignore()
def dropEvent(self, e):
self.setText(e.mimeData().text())
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) |
py | b413537c55ae79caf2eaeff943859be27d712472 | from keras.models import Model
from keras.layers import Input, Lambda, BatchNormalization, Convolution2D, MaxPooling2D, Dropout, merge, Flatten, Dense
import keras.backend as K
input_shape = (3, 32, 32)
pr_axis = 1
if K.image_dim_ordering() == 'tf':
input_shape = (32, 32, 3)
pr_axis = 3
def get_slice(axis, axis_id, input_shape):
return Lambda(
lambda x: x[[slice(None) if i != axis else slice(axis_id, axis_id + 1) for i in range(len(input_shape) + 1)]],
output_shape=[p if i+1 != axis else 1 for i, p in enumerate(input_shape)])
def get_eqsplit_eq_xcon(nb_classes):
inputYUV = Input(shape=input_shape)
# inputNorm = BatchNormalization(axis=1)(inputYUV)
inputY = get_slice(pr_axis, 0, input_shape)(inputYUV)
inputU = get_slice(pr_axis, 1, input_shape)(inputYUV)
inputV = get_slice(pr_axis, 2, input_shape)(inputYUV)
inputYnorm = BatchNormalization(axis=pr_axis)(inputY)
inputUnorm = BatchNormalization(axis=pr_axis)(inputU)
inputVnorm = BatchNormalization(axis=pr_axis)(inputV)
convY = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(inputYnorm)
convU = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(inputUnorm)
convV = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(inputVnorm)
convY = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(convY)
convU = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(convU)
convV = Convolution2D(21, 3, 3, border_mode='same', activation='relu')(convV)
poolY = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convY)
poolU = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convU)
poolV = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convV)
poolY = Dropout(0.25)(poolY)
poolU = Dropout(0.25)(poolU)
poolV = Dropout(0.25)(poolV)
# ------------------
Y_to_U = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolY)
Y_to_V = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolY)
U_to_Y = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolU)
U_to_V = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolU)
V_to_Y = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolV)
V_to_U = Convolution2D(21, 1, 1, border_mode='same', activation='relu')(poolV)
Ymap = merge([poolY, U_to_Y, V_to_Y], mode='concat', concat_axis=pr_axis)
Umap = merge([poolU, V_to_U, Y_to_U], mode='concat', concat_axis=pr_axis)
Vmap = merge([poolV, Y_to_V, U_to_V], mode='concat', concat_axis=pr_axis)
Ycon = Ymap
Ucon = Umap
Vcon = Vmap
# ------------------
convY = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(Ycon)
convU = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(Ucon)
convV = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(Vcon)
convY = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(convY)
convU = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(convU)
convV = Convolution2D(42, 3, 3, border_mode='same', activation='relu')(convV)
poolY = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convY)
poolU = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convU)
poolV = MaxPooling2D((2, 2), strides=(2, 2), border_mode='same')(convV)
poolY = Dropout(0.25)(poolY)
poolU = Dropout(0.25)(poolU)
poolV = Dropout(0.25)(poolV)
concatenate_map = merge([poolY, poolU, poolV], mode='concat', concat_axis=pr_axis)
reshape = Flatten()(concatenate_map)
fc = Dense(512, activation='relu')(reshape)
fc = Dropout(0.5)(fc)
out = Dense(nb_classes, activation='softmax')(fc)
model = Model(input=inputYUV, output=out)
return model, 32
|
py | b41353e36cdddb8baf389d879dacc40818c52abc | from direct.distributed import DistributedObject
from toontown.catalog import CatalogItem
from toontown.catalog import CatalogItemList
from direct.directnotify.DirectNotifyGlobal import *
class DistributedFurnitureManager(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedFurnitureManager')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.director = 0
self.dfitems = []
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.accept('releaseDirector', self.releaseDirector)
def disable(self):
self.ignoreAll()
if self.cr.furnitureManager == self:
self.cr.furnitureManager = None
base.localAvatar.setFurnitureDirector(0, self)
self.director = 0
self.notify.debug('disable')
DistributedObject.DistributedObject.disable(self)
return
def delete(self):
self.notify.debug('delete')
DistributedObject.DistributedObject.delete(self)
def setOwnerId(self, ownerId):
self.ownerId = ownerId
if self.ownerId == base.localAvatar.doId:
self.cr.furnitureManager = self
if self.cr.objectManager == None:
import houseDesign
self.cr.objectManager = houseDesign.ObjectManager()
return
def setOwnerName(self, name):
self.ownerName = name
def setInteriorId(self, interiorId):
self.interiorId = interiorId
def getInteriorObject(self):
return self.cr.doId2do.get(self.interiorId)
def setAtticItems(self, items):
self.atticItems = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
def setAtticWallpaper(self, items):
self.atticWallpaper = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
def setAtticWindows(self, items):
self.atticWindows = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
def setDeletedItems(self, items):
self.deletedItems = CatalogItemList.CatalogItemList(items, store=CatalogItem.Customization)
def releaseDirector(self):
if self.director == base.localAvatar.doId:
self.d_suggestDirector(0)
self.setDirector(0)
def d_suggestDirector(self, avId):
self.sendUpdate('suggestDirector', [avId])
def setDirector(self, avId):
self.notify.info('Furniture director is now %s' % avId)
base.localAvatar.setFurnitureDirector(avId, self)
self.director = avId
def d_avatarEnter(self):
self.sendUpdate('avatarEnter', [])
def d_avatarExit(self):
self.sendUpdate('avatarExit', [])
def moveItemToAttic(self, dfitem, callback):
context = self.getCallbackContext(callback, [dfitem.item])
self.sendUpdate('moveItemToAtticMessage', [dfitem.doId, context])
def moveItemFromAttic(self, index, posHpr, callback):
context = self.getCallbackContext(callback, [index])
self.sendUpdate('moveItemFromAtticMessage', [index,
posHpr[0],
posHpr[1],
posHpr[2],
posHpr[3],
posHpr[4],
posHpr[5],
context])
def deleteItemFromAttic(self, item, index, callback):
context = self.getCallbackContext(callback, [item, index])
blob = item.getBlob(store=CatalogItem.Customization)
self.sendUpdate('deleteItemFromAtticMessage', [blob, index, context])
def deleteItemFromRoom(self, dfitem, callback):
context = self.getCallbackContext(callback, [dfitem.item])
blob = dfitem.item.getBlob(store=CatalogItem.Customization)
self.sendUpdate('deleteItemFromRoomMessage', [blob, dfitem.doId, context])
def moveWallpaperFromAttic(self, index, room, callback):
context = self.getCallbackContext(callback, [index, room])
self.sendUpdate('moveWallpaperFromAtticMessage', [index, room, context])
def deleteWallpaperFromAttic(self, item, index, callback):
context = self.getCallbackContext(callback, [item, index])
blob = item.getBlob(store=CatalogItem.Customization)
self.sendUpdate('deleteWallpaperFromAtticMessage', [blob, index, context])
def moveWindowToAttic(self, slot, callback):
context = self.getCallbackContext(callback, [slot])
self.sendUpdate('moveWindowToAtticMessage', [slot, context])
def moveWindowFromAttic(self, index, slot, callback):
context = self.getCallbackContext(callback, [index, slot])
self.sendUpdate('moveWindowFromAtticMessage', [index, slot, context])
def moveWindow(self, fromSlot, toSlot, callback):
context = self.getCallbackContext(callback, [fromSlot, toSlot])
self.sendUpdate('moveWindowMessage', [fromSlot, toSlot, context])
def deleteWindowFromAttic(self, item, index, callback):
context = self.getCallbackContext(callback, [item, index])
blob = item.getBlob(store=CatalogItem.Customization)
self.sendUpdate('deleteWindowFromAtticMessage', [blob, index, context])
def recoverDeletedItem(self, item, index, callback):
context = self.getCallbackContext(callback, [item, index])
blob = item.getBlob(store=CatalogItem.Customization)
self.sendUpdate('recoverDeletedItemMessage', [blob, index, context])
def moveItemToAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def moveItemFromAtticResponse(self, retcode, objectId, context):
if retcode >= 0:
dfitem = base.cr.doId2do[objectId]
else:
dfitem = None
self.doCallbackContext(context, [retcode, dfitem])
return
def deleteItemFromAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def deleteItemFromRoomResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def moveWallpaperFromAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def deleteWallpaperFromAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def moveWindowToAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def moveWindowFromAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def moveWindowResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def deleteWindowFromAtticResponse(self, retcode, context):
self.doCallbackContext(context, [retcode])
def recoverDeletedItemResponse(self, retcode, context):
self.doCallbackContext(context, [retcode]) |
py | b413543aaf5b93ba81fe71deedec6511f07e08fa | #!/usr/bin/python
# Author: Brendan Le Foll <[email protected]>
# Contributions: Zion Orent <[email protected]>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_lsm303dlh as lsm303dlh
def main():
# Instantiate LSM303DLH compass on I2C
myAccelrCompass = lsm303dlh.LSM303DLH(0)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myAccelrCompass
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
# Load coordinates into LSM303DLH object
successFail = myAccelrCompass.getCoordinates()
# in XYZ order. The sensor returns XZY,
# but the driver compensates and makes it XYZ
coords = myAccelrCompass.getRawCoorData()
# Print out the X, Y, and Z coordinate data
# using two different methods
outputStr = "coor: rX {0} - rY {1} - rZ {2}".format(
coords.__getitem__(0), coords.__getitem__(1),
coords.__getitem__(2))
print(outputStr)
outputStr = "coor: gX {0} - gY {1} - gZ {2}".format(
myAccelrCompass.getCoorX(), myAccelrCompass.getCoorY(),
myAccelrCompass.getCoorZ())
print(outputStr)
# Get and print out the heading
print("heading:", myAccelrCompass.getHeading())
# Get the acceleration
myAccelrCompass.getAcceleration();
accel = myAccelrCompass.getRawAccelData();
# Print out the X, Y, and Z acceleration data
# using two different methods
outputStr = "acc: rX {0} - rY {1} - Z {2}".format(
accel.__getitem__(0), accel.__getitem__(1), accel.__getitem__(2))
print(outputStr)
outputStr = "acc: gX {0} - gY {1} - gZ {2}".format(
myAccelrCompass.getAccelX(), myAccelrCompass.getAccelY(),
myAccelrCompass.getAccelZ())
print(outputStr)
print(" ")
time.sleep(1)
if __name__ == '__main__':
main()
|
py | b413544e7f164d4fa131446a6dc28b606400d06c | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:[email protected]>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import memsource_cli
from memsource_cli.models.moravia_warning_dto import MoraviaWarningDto # noqa: E501
from memsource_cli.rest import ApiException
class TestMoraviaWarningDto(unittest.TestCase):
"""MoraviaWarningDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMoraviaWarningDto(self):
"""Test MoraviaWarningDto"""
# FIXME: construct object with mandatory attributes with example values
# model = memsource_cli.models.moravia_warning_dto.MoraviaWarningDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b413554103241bd20463225ff046b4ace9db44fd | # Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import torch
import torch.distributed as dist
from concurrent.futures import ThreadPoolExecutor
from galileo.platform.log import log
from galileo.platform.export import export
from galileo.framework.pytorch.python.hooks.base import BaseHook
DEFAULT_CHECKPOINT_FILENAME = 'checkpoint.pth'
def load_checkpoint(trainer, checkpoint_file, optimizer=None):
if not os.path.isfile(checkpoint_file):
return False
local_rank = trainer.config['local_rank']
if local_rank is None:
checkpoint = torch.load(checkpoint_file)
else:
if trainer.config['use_cuda']:
# Map model to be loaded to specified single gpu.
loc = f'cuda:{local_rank}'
checkpoint = torch.load(checkpoint_file, map_location=loc)
try:
trainer.model.load_state_dict(checkpoint['state_dict'])
except RuntimeError:
from torch.nn.parallel import DistributedDataParallel
dist.init_process_group(backend=trainer.config['dist_backend'],
init_method=trainer.config['dist_url'],
world_size=trainer.config['world_size'],
rank=trainer.config['global_rank'])
trainer.model = DistributedDataParallel(trainer.model)
trainer.model.load_state_dict(checkpoint['state_dict'])
start_epoch = checkpoint['epoch']
trainer.run_config['start_epoch'] = start_epoch
if optimizer is not None:
optimizer.load_state_dict(checkpoint['optimizer'])
log.info(f'loaded checkpoint "{checkpoint_file}" (epoch {start_epoch})')
return True
@export('galileo.pytorch')
class CheckpointHook(BaseHook):
r'''
load and save checkpoint
'''
def __init__(self, trainer, optimizer):
super().__init__()
self.trainer = trainer
self.optimizer = optimizer
self.resume = trainer.run_config.get('resume')
self.model_dir = trainer.run_config.get('model_dir')
self.checkpoint_file = os.path.join(self.model_dir,
DEFAULT_CHECKPOINT_FILENAME)
self.save_checkpoint_epochs = trainer.run_config.get(
'save_checkpoint_epochs')
self.save_best_model = trainer.run_config.get('save_best_model')
self.save_model = None
self.save_loss = None
self.epoch = 0
self._checkpoint_executor = ThreadPoolExecutor(
max_workers=2, thread_name_prefix='checkpoint-threadpool')
def on_train_begin(self):
if self.resume:
if not load_checkpoint(
self.trainer, self.resume, optimizer=self.optimizer):
log.warning(f'no checkpoint found at "{self.resume}"')
def on_evaluate_begin(self):
resume = self.resume
if not resume:
resume = self.checkpoint_file
if not load_checkpoint(self.trainer, resume, optimizer=None):
log.warning(f'no checkpoint found at "{resume}", '
'this may not what you want')
def on_evaluate_end(self):
if dist.is_initialized():
dist.barrier()
def on_predict_begin(self):
self.on_evaluate_begin()
def on_epoch_begin(self, epoch, steps):
self.epoch = epoch
def on_epoch_end(self, outputs):
loss = outputs.pop('loss')
if self.save_best_model and self.optimizer is not None:
if self.save_model is None or self.save_loss > loss.item():
self.save_loss = loss.item()
self.save_model = (self.trainer.model.state_dict(),
self.optimizer.state_dict())
if ((self.epoch + 1) % self.save_checkpoint_epochs
== 0) and self.trainer.config['is_master']:
if not self.save_best_model and self.optimizer is not None:
self.save_loss = loss.item()
self.save_model = (self.trainer.model.state_dict(),
self.optimizer.state_dict())
if self.save_loss is not None and self.save_model is not None:
self._checkpoint_executor.submit(
torch.save, {
'epoch': self.epoch + 1,
'loss': self.save_loss,
'state_dict': self.save_model[0],
'optimizer': self.save_model[1],
}, self.checkpoint_file)
def on_train_end(self):
self._checkpoint_executor.shutdown()
|
py | b41355e90c867da7c8ce100489bef500521834d6 | import os
import sys
import shutil
try:
from dialog import Dialog
except:
from menu import Dialog
from shutil import copyfile
from shutil import rmtree
from util import copytree
def find_installed_dir(src_path,search_name,directory=True):
found=[]
for root, dirs, files in os.walk(src_path):
for name in list(set(files + dirs)) :
if directory==True:
dir_name=os.path.dirname(os.path.join(root, name))
if dir_name.endswith(search_name):
if dir_name not in found:
found.append(dir_name)
if directory==False:
file_name=os.path.join(root, name)
if os.path.isfile(file_name)==True:
if file_name.endswith(search_name):
if file_name not in found:
found.append(file_name)
print(found)
if len(found)==0:
return None
return min(found, key=len)
def intelligent_move(dest_root,search_path,target,directory=True):
src=find_installed_dir(search_path,target,directory=directory)
if src==None:
print("not found",target)
return
if os.path.isdir(src)==True:
for f in os.listdir(src):
dest=os.path.join(dest_root,f)
src_file=os.path.join(src,f)
if os.path.isdir(os.path.dirname(dest))==False:
os.makedirs(os.path.dirname(dest))
print("move",src_file)
shutil.move(src_file,dest)
else:
shutil.move(src,dest_root)
def flat_install(d):
pub_path=os.path.join(os.getcwd(),"pub")
output_path=os.path.join(pub_path,"build")
flat_path=os.path.join(pub_path,"flat")
flat_path_plugins=os.path.join(flat_path,"plugins")
flat_path_inp_template=os.path.join(flat_path,"inp_template")
flat_path_docs=os.path.join(flat_path,"docs")
flat_path_device_lib=os.path.join(flat_path,"device_lib")
#asdsd
if os.path.isdir(pub_path)==True:
rmtree(pub_path)
os.makedirs(output_path)
interactive=">../log.txt 2>../log.txt &"
interactive=""
os.system("cd gpvdm_core; make DESTDIR="+output_path+" install "+interactive)
#ret=d.tailbox("log.txt", height=None, width=150)
os.system("cd gpvdm_gui; make DESTDIR="+output_path+" install "+interactive)
#ret=d.tailbox("log.txt", height=None, width=150)
os.system("cd gpvdm_data; make DESTDIR="+output_path+" install "+interactive)
#ret=d.tailbox("log.txt", height=None, width=150)
#This needs fixing
intelligent_move(flat_path,output_path,"gpvdm_core")
intelligent_move(flat_path,output_path,"gpvdm_data")
intelligent_move(flat_path_device_lib,output_path,"device_lib")
intelligent_move(flat_path,output_path,"gpvdm_gui")
intelligent_move(flat_path_plugins,output_path,"plugins")
intelligent_move(flat_path,output_path,"icons")
intelligent_move(flat_path_docs,output_path,"doc")
intelligent_move(flat_path,output_path,"man")
intelligent_move(flat_path_inp_template,output_path,"inp_template")
intelligent_move(flat_path,output_path,"applications")
intelligent_move(flat_path,output_path,"mime")
intelligent_move(flat_path,output_path,"video")
#The bin file
intelligent_move(flat_path,output_path,"gpvdm_core",directory=False)
intelligent_move(flat_path,output_path,"gpvdm_core.exe",directory=False)
intelligent_move(flat_path,output_path,"libgpvdm_core.dll",directory=False)
intelligent_move(flat_path,output_path,"libgpvdm_core.so",directory=False)
intelligent_move(flat_path,output_path,"base.gpvdm",directory=False)
os.symlink("gui/gpvdm.py",os.path.join(flat_path,"gpvdm"))
#os.symlink("gui/gpvdm_tool.py",os.path.join(flat_path,"gpvdm_tool"))
#shutil.copytree("./gpvdm_data/materials", os.path.join(output_path,"materials"), symlinks=False)
#shutil.copytree("./gpvdm_data/spectra", os.path.join(output_path,"spectra"), symlinks=False)
return flat_path
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.