filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_2359 | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import json
import multiprocessing as mp
from utils import iou_with_anchors
def load_json(file):
with open(file) as json_file:
data = json.load(json_file)
return data
# 获取测试集视频信息
def getDatasetDict(opt):
df = pd.read_csv(opt["video_info"])
json_data = load_json(opt["video_anno"])
database = json_data
video_dict = {}
for i in range(len(df)):
video_name = df.video.values[i]
video_info = database[video_name]
video_new_info = {}
video_new_info['duration_frame'] = video_info['duration_frame']
video_new_info['duration_second'] = video_info['duration_second']
video_new_info["feature_frame"] = video_info['feature_frame']
video_subset = df.subset.values[i]
video_new_info['annotations'] = video_info['annotations']
if video_subset == 'validation':
video_dict[video_name] = video_new_info
return video_dict # 4728
# soft_nms是分别对每个视频进行处理
def soft_nms(df, alpha, t1, t2):
'''
df: proposals generated by network;
alpha: alpha value of Gaussian decaying function;
t1, t2: threshold for soft nms.
'''
df = df.sort_values(by="score", ascending=False) # 按得分降序排列
tstart = list(df.xmin.values[:])
tend = list(df.xmax.values[:])
tscore = list(df.score.values[:])
rstart = []
rend = []
rscore = []
# 每个视频获取前100个提议
while len(tscore) > 1 and len(rscore) < 101:
max_index = tscore.index(max(tscore))
tmp_iou_list = iou_with_anchors(
np.array(tstart),
np.array(tend), tstart[max_index], tend[max_index])
for idx in range(0, len(tscore)):
if idx != max_index:
tmp_iou = tmp_iou_list[idx]
tmp_width = tend[max_index] - tstart[max_index]
if tmp_iou > t1 + (t2 - t1) * tmp_width:
tscore[idx] = tscore[idx] * np.exp(-np.square(tmp_iou) /
alpha)
rstart.append(tstart[max_index])
rend.append(tend[max_index])
rscore.append(tscore[max_index])
tstart.pop(max_index)
tend.pop(max_index)
tscore.pop(max_index)
newDf = pd.DataFrame()
newDf['score'] = rscore
newDf['xmin'] = rstart
newDf['xmax'] = rend
return newDf
def video_post_process(opt, video_list, video_dict):
for video_name in video_list:
df = pd.read_csv("./output/BMN_results/" + video_name + ".csv")
if len(df) > 1:
snms_alpha = opt["soft_nms_alpha"]
snms_t1 = opt["soft_nms_low_thres"]
snms_t2 = opt["soft_nms_high_thres"]
df = soft_nms(df, snms_alpha, snms_t1, snms_t2)
df = df.sort_values(by="score", ascending=False)
video_info = video_dict[video_name]
video_duration = float(video_info["duration_frame"] // 16 * 16) / video_info["duration_frame"] * video_info[
"duration_second"]
proposal_list = []
for j in range(min(100, len(df))):
tmp_proposal = {}
tmp_proposal["score"] = df.score.values[j]
tmp_proposal["segment"] = [max(0, df.xmin.values[j]) * video_duration,
min(1, df.xmax.values[j]) * video_duration]
proposal_list.append(tmp_proposal)
result_dict[video_name[2:]] = proposal_list
def BMN_post_processing(opt):
video_dict = getDatasetDict(opt)
video_list = list(video_dict.keys()) # [:100]
global result_dict
result_dict = mp.Manager().dict()
num_videos = len(video_list)
num_videos_per_thread = num_videos // opt["post_process_thread"] # 8个线程(591个视频)
# 创建多线程
processes = []
for tid in range(opt["post_process_thread"] - 1):
tmp_video_list = video_list[tid * num_videos_per_thread:(tid + 1) * num_videos_per_thread]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
# 避免不能整除的情况,即处理剩余的视频
tmp_video_list = video_list[(opt["post_process_thread"] - 1) * num_videos_per_thread:]
p = mp.Process(target=video_post_process, args=(opt, tmp_video_list, video_dict))
p.start()
processes.append(p)
for p in processes:
p.join()
result_dict = dict(result_dict)
output_dict = {"version": "VERSION 1.3", "results": result_dict, "external_data": {}}
outfile = open(opt["result_file"], "w")
json.dump(output_dict, outfile)
outfile.close()
# opt = opts.parse_opt()
# opt = vars(opt)
# BSN_post_processing(opt)
|
the-stack_0_2360 | # Copyright 2012 by Wibowo Arindrarto. All rights reserved.
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Bio.SearchIO objects to model high scoring regions between query and hit."""
import warnings
from operator import ge, le
from Bio import BiopythonWarning
from Bio.Align import MultipleSeqAlignment
from Bio.Alphabet import single_letter_alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio._utils import getattr_str, trim_str
from Bio.SearchIO._utils import singleitem, allitems, fullcascade, fragcascade
from ._base import _BaseHSP
class HSP(_BaseHSP):
"""Class representing high-scoring region(s) between query and hit.
HSP (high-scoring pair) objects are contained by Hit objects (see Hit).
In most cases, HSP objects store the bulk of the statistics and results
(e.g. e-value, bitscores, query sequence, etc.) produced by a search
program.
Depending on the search output file format, a given HSP will contain one
or more HSPFragment object(s). Examples of search programs that produce HSP
with one HSPFragments are BLAST, HMMER, and FASTA. Other programs such as
BLAT or Exonerate may produce HSPs containing more than one HSPFragment.
However, their native terminologies may differ: in BLAT these fragments
are called 'blocks' while in Exonerate they are called exons or NER.
Here are examples from each type of HSP. The first one comes from a BLAST
search::
>>> from Bio import SearchIO
>>> blast_qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> blast_hsp = blast_qresult[1][0] # the first HSP from the second hit
>>> blast_hsp
HSP(hit_id='gi|301171311|ref|NR_035856.1|', query_id='33211', 1 fragments)
>>> print(blast_hsp)
Query: 33211 mir_1
Hit: gi|301171311|ref|NR_035856.1| Pan troglodytes microRNA mir-520b ...
Query range: [1:61] (1)
Hit range: [0:60] (1)
Quick stats: evalue 1.7e-22; bitscore 109.49
Fragments: 1 (60 columns)
Query - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
For HSPs with a single HSPFragment, you can invoke ``print`` on it and see the
underlying sequence alignment, if it exists. This is not the case for HSPs
with more than one HSPFragment. Below is an example, using an HSP from a
BLAT search. Invoking ``print`` on these HSPs will instead show a table of the
HSPFragment objects it contains::
>>> blat_qresult = SearchIO.read('Blat/mirna.pslx', 'blat-psl', pslx=True)
>>> blat_hsp = blat_qresult[1][0] # the first HSP from the second hit
>>> blat_hsp
HSP(hit_id='chr11', query_id='blat_1', 2 fragments)
>>> print(blat_hsp)
Query: blat_1 <unknown description>
Hit: chr11 <unknown description>
Query range: [42:67] (-1)
Hit range: [59018929:59018955] (1)
Quick stats: evalue ?; bitscore ?
Fragments: --- -------------- ---------------------- ----------------------
# Span Query range Hit range
--- -------------- ---------------------- ----------------------
0 6 [61:67] [59018929:59018935]
1 16 [42:58] [59018939:59018955]
Notice that in HSPs with more than one HSPFragments, the HSP's ``query_range``
``hit_range`` properties encompasses all fragments it contains.
You can check whether an HSP has more than one HSPFragments or not using the
``is_fragmented`` property::
>>> blast_hsp.is_fragmented
False
>>> blat_hsp.is_fragmented
True
Since HSP objects are also containers similar to Python lists, you can
access a single fragment in an HSP using its integer index::
>>> blat_fragment = blat_hsp[0]
>>> print(blat_fragment)
Query: blat_1 <unknown description>
Hit: chr11 <unknown description>
Query range: [61:67] (-1)
Hit range: [59018929:59018935] (1)
Fragments: 1 (6 columns)
Query - tatagt
Hit - tatagt
This applies to HSPs objects with a single fragment as well::
>>> blast_fragment = blast_hsp[0]
>>> print(blast_fragment)
Query: 33211 mir_1
Hit: gi|301171311|ref|NR_035856.1| Pan troglodytes microRNA mir-520b ...
Query range: [1:61] (1)
Hit range: [0:60] (1)
Fragments: 1 (60 columns)
Query - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
Regardless of the search output file format, HSP objects provide the
properties listed below. These properties always return values in a list,
due to the HSP object itself being a list-like container. However, for
HSP objects with a single HSPFragment, shortcut properties that fetches
the item from the list are also provided.
+----------------------+---------------------+-----------------------------+
| Property | Shortcut | Value |
+======================+=====================+=============================+
| aln_all | aln | HSP alignments as |
| | | MultipleSeqAlignment object |
+----------------------+---------------------+-----------------------------+
| aln_annotation_all | aln_annotation | dictionary of annotation(s) |
| | | of all fragments' alignments|
+----------------------+---------------------+-----------------------------+
| fragments | fragment | HSPFragment objects |
+----------------------+---------------------+-----------------------------+
| hit_all | hit | hit sequence as SeqRecord |
| | | objects |
+----------------------+---------------------+-----------------------------+
| hit_features_all | hit_features | SeqFeatures of all hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_start_all | hit_start* | start coordinates of the |
| | | hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_end_all | hit_end* | end coordinates of the hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_span_all | hit_span* | sizes of each hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_strand_all | hit_strand | strand orientations of the |
| | | hit fragments |
+----------------------+---------------------+-----------------------------+
| hit_frame_all | hit_frame | reading frames of the hit |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| hit_range_all | hit_range | tuples of start and end |
| | | coordinates of each hit |
| | | fragment |
+----------------------+---------------------+-----------------------------+
| query_all | query | query sequence as SeqRecord |
| | | object |
+----------------------+---------------------+-----------------------------+
| query_features_all | query_features | SeqFeatures of all query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_start_all | query_start* | start coordinates of the |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_end_all | query_end* | end coordinates of the |
| | | query fragments |
+----------------------+---------------------+-----------------------------+
| query_span_all | query_span* | sizes of each query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_strand_all | query_strand | strand orientations of the |
| | | query fragments |
+----------------------+---------------------+-----------------------------+
| query_frame_all | query_frame | reading frames of the query |
| | | fragments |
+----------------------+---------------------+-----------------------------+
| query_range_all | query_range | tuples of start and end |
| | | coordinates of each query |
| | | fragment |
+----------------------+---------------------+-----------------------------+
For all types of HSP objects, the property will return the values in a list.
Shorcuts are only applicable for HSPs with one fragment. Except the ones
noted, if they are used on an HSP with more than one fragments, an exception
will be raised.
For properties that may be used in HSPs with multiple or single fragments
(``*_start``, ``*_end``, and ``*_span`` properties), their interpretation depends
on how many fragment the HSP has:
+------------+---------------------------------------------------+
| Property | Value |
+============+===================================================+
| hit_start | smallest coordinate value of all hit fragments |
+------------+---------------------------------------------------+
| hit_end | largest coordinate value of all hit fragments |
+------------+---------------------------------------------------+
| hit_span | difference between ``hit_start`` and ``hit_end`` |
+------------+---------------------------------------------------+
| query_start| smallest coordinate value of all query fragments |
+------------+---------------------------------------------------+
| query_end | largest coordinate value of all query fragments |
+------------+---------------------------------------------------+
| query_span | difference between ``query_start`` and |
| | ``query_end`` |
+------------+---------------------------------------------------+
In addition to the objects listed above, HSP objects also provide the
following properties and/or attributes:
+--------------------+------------------------------------------------------+
| Property | Value |
+====================+======================================================+
| aln_span | total number of residues in all HSPFragment objects |
+--------------------+------------------------------------------------------+
| alphabet | alphabet used in hit and query SeqRecord objects |
+--------------------+------------------------------------------------------+
| is_fragmented | boolean, whether there are multiple fragments or not |
+--------------------+------------------------------------------------------+
| hit_id | ID of the hit sequence |
+--------------------+------------------------------------------------------+
| hit_description | description of the hit sequence |
+--------------------+------------------------------------------------------+
| hit_inter_ranges | list of hit sequence coordinates of the regions |
| | between fragments |
+--------------------+------------------------------------------------------+
| hit_inter_spans | list of lengths of the regions between hit fragments |
+--------------------+------------------------------------------------------+
| output_index | 0-based index for storing the order by which the HSP |
| | appears in the output file (default: -1). |
+--------------------+------------------------------------------------------+
| query_id | ID of the query sequence |
+--------------------+------------------------------------------------------+
| query_description | description of the query sequence |
+--------------------+------------------------------------------------------+
| query_inter_ranges | list of query sequence coordinates of the regions |
| | between fragments |
+--------------------+------------------------------------------------------+
| query_inter_spans | list of lengths of the regions between query |
| | fragments |
+--------------------+------------------------------------------------------+
.. [1] may be used in HSPs with multiple fragments
"""
# attributes we don't want to transfer when creating a new Hit class
# from this one
_NON_STICKY_ATTRS = ("_items",)
def __init__(self, fragments=(), output_index=-1):
"""Initialize an HSP object.
:param fragments: fragments contained in the HSP object
:type fragments: iterable yielding HSPFragment
:param output_index: optional index / ordering of the HSP fragment in
the original input file.
:type output_index: integer
HSP objects must be initialized with a list containing at least one
HSPFragment object. If multiple HSPFragment objects are used for
initialization, they must all have the same ``query_id``,
``query_description``, ``hit_id``, ``hit_description``, and alphabet
properties.
"""
if not fragments:
raise ValueError("HSP objects must have at least one HSPFragment object.")
# TODO - Move this into the for look in case hsps is a single use
# iterable?
# check that all fragments contain the same IDs, descriptions, alphabet
for attr in (
"query_id",
"query_description",
"hit_id",
"hit_description",
"alphabet",
):
if len({getattr(frag, attr) for frag in fragments}) != 1:
raise ValueError(
"HSP object can not contain fragments with more than one %s." % attr
)
self.output_index = output_index
self._items = []
for fragment in fragments:
self._validate_fragment(fragment)
self._items.append(fragment)
def __repr__(self):
"""Return string representation of HSP object."""
return "%s(hit_id=%r, query_id=%r, %r fragments)" % (
self.__class__.__name__,
self.hit_id,
self.query_id,
len(self),
)
def __iter__(self):
"""Iterate over HSP items."""
return iter(self._items)
def __contains__(self, fragment):
"""Return True if HSPFragment is on HSP items."""
return fragment in self._items
def __len__(self):
"""Return number of HSPs items."""
return len(self._items)
def __bool__(self):
"""Return True if it has HSPs."""
return bool(self._items)
def __str__(self):
"""Return a human readable summary of the HSP object."""
lines = []
# set hsp info line
statline = []
# evalue
evalue = getattr_str(self, "evalue", fmt="%.2g")
statline.append("evalue " + evalue)
# bitscore
bitscore = getattr_str(self, "bitscore", fmt="%.2f")
statline.append("bitscore " + bitscore)
lines.append("Quick stats: " + "; ".join(statline))
if len(self.fragments) == 1:
return "\n".join(
[self._str_hsp_header(), "\n".join(lines), self.fragments[0]._str_aln()]
)
else:
lines.append(
" Fragments: %s %s %s %s" % ("-" * 3, "-" * 14, "-" * 22, "-" * 22)
)
pattern = "%16s %14s %22s %22s"
lines.append(pattern % ("#", "Span", "Query range", "Hit range"))
lines.append(pattern % ("-" * 3, "-" * 14, "-" * 22, "-" * 22))
for idx, block in enumerate(self.fragments):
# set hsp line and table
# alignment span
aln_span = getattr_str(block, "aln_span")
# query region
query_start = getattr_str(block, "query_start")
query_end = getattr_str(block, "query_end")
query_range = "[%s:%s]" % (query_start, query_end)
# max column length is 20
query_range = trim_str(query_range, 22, "~]")
# hit region
hit_start = getattr_str(block, "hit_start")
hit_end = getattr_str(block, "hit_end")
hit_range = "[%s:%s]" % (hit_start, hit_end)
hit_range = trim_str(hit_range, 22, "~]")
# append the hsp row
lines.append(pattern % (str(idx), aln_span, query_range, hit_range))
return self._str_hsp_header() + "\n" + "\n".join(lines)
def __getitem__(self, idx):
"""Return object of index idx."""
# if key is slice, return a new HSP instance
if isinstance(idx, slice):
obj = self.__class__(self._items[idx])
self._transfer_attrs(obj)
return obj
return self._items[idx]
def __setitem__(self, idx, fragments):
"""Set an item of index idx with the given fragments."""
# handle case if hsps is a list of hsp
if isinstance(fragments, (list, tuple)):
for fragment in fragments:
self._validate_fragment(fragment)
else:
self._validate_fragment(fragments)
self._items[idx] = fragments
def __delitem__(self, idx):
"""Delete item of index idx."""
# note that this may result in an empty HSP object, which should be
# invalid
del self._items[idx]
def _validate_fragment(self, fragment):
if not isinstance(fragment, HSPFragment):
raise TypeError("HSP objects can only contain HSPFragment objects.")
# HACK: to make validation during __init__ work
if self._items:
if fragment.hit_id != self.hit_id:
raise ValueError(
"Expected HSPFragment with hit ID %r, found %r instead."
% (self.id, fragment.hit_id)
)
if fragment.hit_description != self.hit_description:
raise ValueError(
"Expected HSPFragment with hit description %r, found %r instead."
% (self.description, fragment.hit_description)
)
if fragment.query_id != self.query_id:
raise ValueError(
"Expected HSPFragment with query ID %r, found %r instead."
% (self.query_id, fragment.query_id)
)
if fragment.query_description != self.query_description:
raise ValueError(
"Expected HSP with query description %r, found %r instead."
% (self.query_description, fragment.query_description)
)
def _aln_span_get(self):
# length of all alignments
# alignment span can be its own attribute, or computed from
# query / hit length
return sum(frg.aln_span for frg in self.fragments)
aln_span = property(
fget=_aln_span_get, doc="Total number of columns in all HSPFragment objects."
)
# coordinate properties #
def _get_coords(self, seq_type, coord_type):
assert seq_type in ("hit", "query")
assert coord_type in ("start", "end")
coord_name = "%s_%s" % (seq_type, coord_type)
coords = [getattr(frag, coord_name) for frag in self.fragments]
if None in coords:
warnings.warn(
"'None' exist in %s coordinates; ignored" % (coord_name),
BiopythonWarning,
)
return coords
def _hit_start_get(self):
return min(self._get_coords("hit", "start"))
hit_start = property(
fget=_hit_start_get, doc="Smallest coordinate value of all hit fragments."
)
def _query_start_get(self):
return min(self._get_coords("query", "start"))
query_start = property(
fget=_query_start_get, doc="Smallest coordinate value of all query fragments."
)
def _hit_end_get(self):
return max(self._get_coords("hit", "end"))
hit_end = property(
fget=_hit_end_get, doc="Largest coordinate value of all hit fragments."
)
def _query_end_get(self):
return max(self._get_coords("query", "end"))
query_end = property(
fget=_query_end_get, doc="Largest coordinate value of all hit fragments."
)
# coordinate-dependent properties #
def _hit_span_get(self):
try:
return self.hit_end - self.hit_start
except TypeError: # triggered if any of the coordinates are None
return None
hit_span = property(
fget=_hit_span_get, doc="The number of hit residues covered by the HSP."
)
def _query_span_get(self):
try:
return self.query_end - self.query_start
except TypeError: # triggered if any of the coordinates are None
return None
query_span = property(
fget=_query_span_get, doc="The number of query residues covered by the HSP."
)
def _hit_range_get(self):
return (self.hit_start, self.hit_end)
hit_range = property(
fget=_hit_range_get, doc="Tuple of HSP hit start and end coordinates."
)
def _query_range_get(self):
return (self.query_start, self.query_end)
query_range = property(
fget=_query_range_get, doc="Tuple of HSP query start and end coordinates."
)
def _inter_ranges_get(self, seq_type):
# this property assumes that there are no mixed strands in a hit/query
assert seq_type in ("query", "hit")
strand = getattr(self, "%s_strand_all" % seq_type)[0]
coords = getattr(self, "%s_range_all" % seq_type)
# determine function used to set inter range
# start and end coordinates, given two pairs
# of fragment start and end coordinates
if strand == -1:
startfunc, endfunc = min, max
else:
startfunc, endfunc = max, min
inter_coords = []
for idx, coord in enumerate(coords[:-1]):
start = startfunc(coords[idx])
end = endfunc(coords[idx + 1])
inter_coords.append((min(start, end), max(start, end)))
return inter_coords
def _hit_inter_ranges_get(self):
return self._inter_ranges_get("hit")
hit_inter_ranges = property(
fget=_hit_inter_ranges_get,
doc="Hit sequence coordinates of the regions between fragments.",
)
def _query_inter_ranges_get(self):
return self._inter_ranges_get("query")
query_inter_ranges = property(
fget=_query_inter_ranges_get,
doc="Query sequence coordinates of the regions between fragments.",
)
def _inter_spans_get(self, seq_type):
assert seq_type in ("query", "hit")
attr_name = "%s_inter_ranges" % seq_type
return [coord[1] - coord[0] for coord in getattr(self, attr_name)]
def _hit_inter_spans_get(self):
return self._inter_spans_get("hit")
hit_inter_spans = property(
fget=_hit_inter_spans_get, doc="Lengths of regions between hit fragments."
)
def _query_inter_spans_get(self):
return self._inter_spans_get("query")
query_inter_spans = property(
fget=_query_inter_spans_get, doc="Lengths of regions between query fragments."
)
# shortcuts for fragments' properties #
# bool check if there's more than one fragments
is_fragmented = property(
lambda self: len(self) > 1,
doc="Whether the HSP has more than one HSPFragment objects.",
)
# first item properties with setters
hit_description = fullcascade(
"hit_description", doc="Description of the hit sequence."
)
query_description = fullcascade(
"query_description", doc="Description of the query sequence."
)
hit_id = fullcascade("hit_id", doc="ID of the hit sequence.")
query_id = fullcascade("query_id", doc="ID of the query sequence.")
alphabet = fullcascade(
"alphabet", doc="Alphabet used in hit and query SeqRecord objects."
)
# properties for single-fragment HSPs
fragment = singleitem(doc="HSPFragment object, first fragment.")
hit = singleitem("hit", doc="Hit sequence as a SeqRecord object, first fragment.")
query = singleitem(
"query", doc="Query sequence as a SeqRecord object, first fragment."
)
aln = singleitem(
"aln", doc="Alignment of the first fragment as a MultipleSeqAlignment object."
)
aln_annotation = singleitem(
"aln_annotation",
doc="Dictionary of annotation(s) of the first fragment's alignment.",
)
hit_features = singleitem(
"hit_features", doc="Hit sequence features, first fragment."
)
query_features = singleitem(
"query_features", doc="Query sequence features, first fragment."
)
hit_strand = singleitem("hit_strand", doc="Hit strand orientation, first fragment.")
query_strand = singleitem(
"query_strand", doc="Query strand orientation, first fragment."
)
hit_frame = singleitem(
"hit_frame", doc="Hit sequence reading frame, first fragment."
)
query_frame = singleitem(
"query_frame", doc="Query sequence reading frame, first fragment."
)
# properties for multi-fragment HSPs
fragments = allitems(doc="List of all HSPFragment objects.")
hit_all = allitems(
"hit", doc="List of all fragments' hit sequences as SeqRecord objects."
)
query_all = allitems(
"query", doc="List of all fragments' query sequences as SeqRecord objects."
)
aln_all = allitems(
"aln", doc="List of all fragments' alignments as MultipleSeqAlignment objects."
)
aln_annotation_all = allitems(
"aln_annotation",
doc="Dictionary of annotation(s) of all fragments' alignments.",
)
hit_features_all = allitems(
"hit_features", doc="List of all hit sequence features."
)
query_features_all = allitems(
"query_features", doc="List of all query sequence features."
)
hit_strand_all = allitems(
"hit_strand", doc="List of all fragments' hit sequence strands."
)
query_strand_all = allitems(
"query_strand", doc="List of all fragments' query sequence strands"
)
hit_frame_all = allitems(
"hit_frame", doc="List of all fragments' hit sequence reading frames."
)
query_frame_all = allitems(
"query_frame", doc="List of all fragments' query sequence reading frames."
)
hit_start_all = allitems(
"hit_start", doc="List of all fragments' hit start coordinates."
)
query_start_all = allitems(
"query_start", doc="List of all fragments' query start coordinates."
)
hit_end_all = allitems("hit_end", doc="List of all fragments' hit end coordinates.")
query_end_all = allitems(
"query_end", doc="List of all fragments' query end coordinates."
)
hit_span_all = allitems("hit_span", doc="List of all fragments' hit sequence size.")
query_span_all = allitems(
"query_span", doc="List of all fragments' query sequence size."
)
hit_range_all = allitems(
"hit_range", doc="List of all fragments' hit start and end coordinates."
)
query_range_all = allitems(
"query_range", doc="List of all fragments' query start and end coordinates."
)
class HSPFragment(_BaseHSP):
"""Class representing a contiguous alignment of hit-query sequence.
HSPFragment forms the core of any parsed search output file. Depending on
the search output file format, it may contain the actual query and/or hit
sequences that produces the search hits. These sequences are stored as
SeqRecord objects (see SeqRecord):
>>> from Bio import SearchIO
>>> qresult = next(SearchIO.parse('Blast/mirna.xml', 'blast-xml'))
>>> fragment = qresult[0][0][0] # first hit, first hsp, first fragment
>>> print(fragment)
Query: 33211 mir_1
Hit: gi|262205317|ref|NR_030195.1| Homo sapiens microRNA 520b (MIR520...
Query range: [0:61] (1)
Hit range: [0:61] (1)
Fragments: 1 (61 columns)
Query - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
Hit - CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTTTAGAGGG
# the query sequence is a SeqRecord object
>>> fragment.query.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.query)
ID: 33211
Name: aligned query sequence
Description: mir_1
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# the hit sequence is a SeqRecord object as well
>>> fragment.hit.__class__
<class 'Bio.SeqRecord.SeqRecord'>
>>> print(fragment.hit)
ID: gi|262205317|ref|NR_030195.1|
Name: aligned hit sequence
Description: Homo sapiens microRNA 520b (MIR520B), microRNA
Number of features: 0
Seq('CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAGTGCTTCCTTT...GGG', DNAAlphabet())
# when both query and hit are present, we get a MultipleSeqAlignment object
>>> fragment.aln.__class__
<class 'Bio.Align.MultipleSeqAlignment'>
>>> print(fragment.aln)
DNAAlphabet() alignment with 2 rows and 61 columns
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG 33211
CCCTCTACAGGGAAGCGCTTTCTGTTGTCTGAAAGAAAAGAAAG...GGG gi|262205317|ref|NR_030195.1|
"""
def __init__(
self,
hit_id="<unknown id>",
query_id="<unknown id>",
hit=None,
query=None,
alphabet=single_letter_alphabet,
):
"""Initialize the class."""
self._alphabet = alphabet
self.aln_annotation = {}
self._hit_id = hit_id
self._query_id = query_id
for seq_type in ("query", "hit"):
# query or hit attributes default attributes
setattr(self, "_%s_description" % seq_type, "<unknown description>")
setattr(self, "_%s_features" % seq_type, [])
# query or hit attributes whose default attribute is None
for attr in ("strand", "frame", "start", "end"):
setattr(self, "%s_%s" % (seq_type, attr), None)
# self.query or self.hit
if eval(seq_type):
setattr(self, seq_type, eval(seq_type))
else:
setattr(self, seq_type, None)
def __repr__(self):
"""Return HSPFragment info; hit id, query id, number of columns."""
info = "hit_id=%r, query_id=%r" % (self.hit_id, self.query_id)
try:
info += ", %i columns" % len(self)
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, info)
def __len__(self):
"""Return alignment span."""
return self.aln_span
def __str__(self):
"""Return string of HSP header and alignments."""
return self._str_hsp_header() + "\n" + self._str_aln()
def __getitem__(self, idx):
"""Return object of index idx."""
if self.aln is not None:
obj = self.__class__(
hit_id=self.hit_id, query_id=self.query_id, alphabet=self.alphabet
)
# transfer query and hit attributes
# let SeqRecord handle feature slicing, then retrieve the sliced
# features into the sliced HSPFragment
if self.query is not None:
obj.query = self.query[idx]
obj.query_features = obj.query.features
if self.hit is not None:
obj.hit = self.hit[idx]
obj.hit_features = obj.hit.features
# description, strand, frame
for attr in ("description", "strand", "frame"):
for seq_type in ("hit", "query"):
attr_name = "%s_%s" % (seq_type, attr)
self_val = getattr(self, attr_name)
setattr(obj, attr_name, self_val)
# alignment annotation should be transferred, since we can compute
# the resulting annotation
obj.aln_annotation = {}
for key, value in self.aln_annotation.items():
assert len(value[idx]) == len(obj)
obj.aln_annotation[key] = value[idx]
return obj
else:
raise TypeError(
"Slicing for HSP objects without alignment is not supported."
)
def _str_aln(self):
lines = []
# alignment length
aln_span = getattr_str(self, "aln_span")
lines.append(" Fragments: 1 (%s columns)" % aln_span)
# sequences
if self.query is not None and self.hit is not None:
try:
qseq = str(self.query.seq)
except AttributeError: # query is None
qseq = "?"
try:
hseq = str(self.hit.seq)
except AttributeError: # hit is None
hseq = "?"
# similarity line
simil = ""
if "similarity" in self.aln_annotation and isinstance(
self.aln_annotation.get("similarity"), str
):
simil = self.aln_annotation["similarity"]
if self.aln_span <= 67:
lines.append("%10s - %s" % ("Query", qseq))
if simil:
lines.append(" %s" % simil)
lines.append("%10s - %s" % ("Hit", hseq))
else:
# adjust continuation character length, so we don't display
# the same residues twice
if self.aln_span - 66 > 3:
cont = "~" * 3
else:
cont = "~" * (self.aln_span - 66)
lines.append("%10s - %s%s%s" % ("Query", qseq[:59], cont, qseq[-5:]))
if simil:
lines.append(" %s%s%s" % (simil[:59], cont, simil[-5:]))
lines.append("%10s - %s%s%s" % ("Hit", hseq[:59], cont, hseq[-5:]))
return "\n".join(lines)
# sequence properties #
def _set_seq(self, seq, seq_type):
"""Check the given sequence for attribute setting (PRIVATE).
:param seq: sequence to check
:type seq: string or SeqRecord
:param seq_type: sequence type
:type seq_type: string, choice of 'hit' or 'query'
"""
assert seq_type in ("hit", "query")
if seq is None:
return seq # return immediately if seq is None
else:
if not isinstance(seq, (str, SeqRecord)):
raise TypeError(
"%s sequence must be a string or a SeqRecord object." % seq_type
)
# check length if the opposite sequence is not None
opp_type = "hit" if seq_type == "query" else "query"
opp_seq = getattr(self, "_%s" % opp_type, None)
if opp_seq is not None:
if len(seq) != len(opp_seq):
raise ValueError(
"Sequence lengths do not match. Expected: %r (%s); found: %r (%s)."
% (len(opp_seq), opp_type, len(seq), seq_type)
)
seq_id = getattr(self, "%s_id" % seq_type)
seq_desc = getattr(self, "%s_description" % seq_type)
seq_feats = getattr(self, "%s_features" % seq_type)
seq_name = "aligned %s sequence" % seq_type
if isinstance(seq, SeqRecord):
seq.id = seq_id
seq.description = seq_desc
seq.name = seq_name
seq.features = seq_feats
seq.seq.alphabet = self.alphabet
elif isinstance(seq, str):
seq = SeqRecord(
Seq(seq, self.alphabet),
id=seq_id,
name=seq_name,
description=seq_desc,
features=seq_feats,
)
return seq
def _hit_get(self):
return self._hit
def _hit_set(self, value):
self._hit = self._set_seq(value, "hit")
hit = property(
fget=_hit_get,
fset=_hit_set,
doc="Hit sequence as a SeqRecord object, defaults to None.",
)
def _query_get(self):
return self._query
def _query_set(self, value):
self._query = self._set_seq(value, "query")
query = property(
fget=_query_get,
fset=_query_set,
doc="Query sequence as a SeqRecord object, defaults to None.",
)
def _aln_get(self):
if self.query is None and self.hit is None:
return None
elif self.hit is None:
return MultipleSeqAlignment([self.query], self.alphabet)
elif self.query is None:
return MultipleSeqAlignment([self.hit], self.alphabet)
else:
return MultipleSeqAlignment([self.query, self.hit], self.alphabet)
aln = property(
fget=_aln_get,
doc="Query-hit alignment as a MultipleSeqAlignment object, defaults to None.",
)
def _alphabet_get(self):
return self._alphabet
def _alphabet_set(self, value):
self._alphabet = value
try:
self.query.seq.alphabet = value
except AttributeError:
pass
try:
self.hit.seq.alphabet = value
except AttributeError:
pass
alphabet = property(
fget=_alphabet_get,
fset=_alphabet_set,
doc="Alphabet object used in the fragment's "
"sequences and alignment, defaults to single_letter_alphabet.",
)
def _aln_span_get(self):
# length of alignment (gaps included)
# alignment span can be its own attribute, or computed from
# query / hit length
if not hasattr(self, "_aln_span"):
if self.query is not None:
self._aln_span = len(self.query)
elif self.hit is not None:
self._aln_span = len(self.hit)
return self._aln_span
def _aln_span_set(self, value):
self._aln_span = value
aln_span = property(
fget=_aln_span_get,
fset=_aln_span_set,
doc="The number of alignment columns covered by the fragment.",
)
# id, description, and features properties #
hit_description = fragcascade("description", "hit", doc="Hit sequence description.")
query_description = fragcascade(
"description", "query", doc="Query sequence description."
)
hit_id = fragcascade("id", "hit", doc="Hit sequence ID.")
query_id = fragcascade("id", "query", doc="Query sequence ID.")
hit_features = fragcascade("features", "hit", doc="Hit sequence features.")
query_features = fragcascade("features", "query", doc="Query sequence features.")
# strand properties #
def _prep_strand(self, strand):
# follow SeqFeature's convention
if strand not in (-1, 0, 1, None):
raise ValueError("Strand should be -1, 0, 1, or None; not %r" % strand)
return strand
def _get_strand(self, seq_type):
assert seq_type in ("hit", "query")
strand = getattr(self, "_%s_strand" % seq_type)
if strand is None:
# try to compute strand from frame
frame = getattr(self, "%s_frame" % seq_type)
if frame is not None:
try:
strand = frame // abs(frame)
except ZeroDivisionError:
strand = 0
setattr(self, "%s_strand" % seq_type, strand)
return strand
def _hit_strand_get(self):
return self._get_strand("hit")
def _hit_strand_set(self, value):
self._hit_strand = self._prep_strand(value)
hit_strand = property(
fget=_hit_strand_get,
fset=_hit_strand_set,
doc="Hit sequence strand, defaults to None.",
)
def _query_strand_get(self):
return self._get_strand("query")
def _query_strand_set(self, value):
self._query_strand = self._prep_strand(value)
query_strand = property(
fget=_query_strand_get,
fset=_query_strand_set,
doc="Query sequence strand, defaults to None.",
)
# frame properties #
def _prep_frame(self, frame):
if frame not in (-3, -2, -1, 0, 1, 2, 3, None):
raise ValueError(
"Strand should be an integer between -3 and 3, or None; not %r" % frame
)
return frame
def _hit_frame_get(self):
return self._hit_frame
def _hit_frame_set(self, value):
self._hit_frame = self._prep_frame(value)
hit_frame = property(
fget=_hit_frame_get,
fset=_hit_frame_set,
doc="Hit sequence reading frame, defaults to None.",
)
def _query_frame_get(self):
"""Get query sequence reading frame (PRIVATE)."""
return self._query_frame
def _query_frame_set(self, value):
"""Set query sequence reading frame (PRIVATE)."""
self._query_frame = self._prep_frame(value)
query_frame = property(
fget=_query_frame_get,
fset=_query_frame_set,
doc="Query sequence reading frame, defaults to None.",
)
# coordinate properties #
def _prep_coord(self, coord, opp_coord_name, op):
# coord must either be None or int
if coord is None:
return coord
assert isinstance(coord, int)
# try to get opposite coordinate, if it's not present, return
try:
opp_coord = getattr(self, opp_coord_name)
except AttributeError:
return coord
# if opposite coordinate is None, return
if opp_coord is None:
return coord
# otherwise compare it to coord ('>=' or '<=')
else:
assert op(coord, opp_coord)
return coord
def _hit_start_get(self):
"""Get the sequence hit start coordinate (PRIVATE)."""
return self._hit_start
def _hit_start_set(self, value):
"""Set the sequence hit start coordinate (PRIVATE)."""
self._hit_start = self._prep_coord(value, "hit_end", le)
hit_start = property(
fget=_hit_start_get,
fset=_hit_start_set,
doc="Hit sequence start coordinate, defaults to None.",
)
def _query_start_get(self):
"""Get the query sequence start coordinate (PRIVATE)."""
return self._query_start
def _query_start_set(self, value):
"""Set the query sequence start coordinate (PRIVATE)."""
self._query_start = self._prep_coord(value, "query_end", le)
query_start = property(
fget=_query_start_get,
fset=_query_start_set,
doc="Query sequence start coordinate, defaults to None.",
)
def _hit_end_get(self):
"""Get the hit sequence end coordinate (PRIVATE)."""
return self._hit_end
def _hit_end_set(self, value):
"""Set the hit sequence end coordinate (PRIVATE)."""
self._hit_end = self._prep_coord(value, "hit_start", ge)
hit_end = property(
fget=_hit_end_get,
fset=_hit_end_set,
doc="Hit sequence end coordinate, defaults to None.",
)
def _query_end_get(self):
"""Get the query sequence end coordinate (PRIVATE)."""
return self._query_end
def _query_end_set(self, value):
"""Set the query sequence end coordinate (PRIVATE)."""
self._query_end = self._prep_coord(value, "query_start", ge)
query_end = property(
fget=_query_end_get,
fset=_query_end_set,
doc="Query sequence end coordinate, defaults to None.",
)
# coordinate-dependent properties #
def _hit_span_get(self):
"""Return the number of residues covered by the hit sequence (PRIVATE)."""
try:
return self.hit_end - self.hit_start
except TypeError: # triggered if any of the coordinates are None
return None
hit_span = property(
fget=_hit_span_get, doc="The number of residues covered by the hit sequence."
)
def _query_span_get(self):
"""Return the number or residues covered by the query (PRIVATE)."""
try:
return self.query_end - self.query_start
except TypeError: # triggered if any of the coordinates are None
return None
query_span = property(
fget=_query_span_get,
doc="The number of residues covered by the query sequence.",
)
def _hit_range_get(self):
"""Return the start and end of a hit (PRIVATE)."""
return (self.hit_start, self.hit_end)
hit_range = property(
fget=_hit_range_get, doc="Tuple of hit start and end coordinates."
)
def _query_range_get(self):
"""Return the start and end of a query (PRIVATE)."""
return (self.query_start, self.query_end)
query_range = property(
fget=_query_range_get, doc="Tuple of query start and end coordinates."
)
# if not used as a module, run the doctest
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest()
|
the-stack_0_2363 | """Editing JSON and JavaScript files in Sublime views"""
import json
from live.shared.js_cursor import StructuredCursor
def json_root_in(view):
return Entity(view, [])
class Entity:
def __init__(self, view, path):
self.view = view
self.path = path
def __getitem__(self, key):
return Entity(self.view, self.path + [key])
def append(self, item):
cur = StructuredCursor(0, self.view, inside_what='array')
for key in self.path:
cur.enter()
cur.goto_entry_keyed_by(json.dumps(key))
cur.prepare_for_insertion_at_end()
dump_py_as_json(cur, item)
def dump_py_as_json(cur, obj):
def insert(obj):
if isinstance(obj, dict):
with cur.laying_out('object') as separate:
for k, v in obj.items():
separate()
cur.insert(json.dumps(k))
cur.insert_keyval_sep()
insert(v)
elif isinstance(obj, list):
with cur.laying_out('array') as separate:
for v in obj:
separate()
insert(v)
elif isinstance(obj, (str, int, bool)):
cur.insert(json.dumps(obj))
else:
raise RuntimeError("Unsupported object for insertion in JSON: {}".format(obj))
insert(obj)
|
the-stack_0_2364 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.dialogflow.v2beta1 KnowledgeBases API."""
import functools
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.page_iterator
import google.api_core.path_template
import grpc
from dialogflow_v2beta1.gapic import enums
from dialogflow_v2beta1.gapic import knowledge_bases_client_config
from dialogflow_v2beta1.gapic.transports import knowledge_bases_grpc_transport
from dialogflow_v2beta1.proto import agent_pb2
from dialogflow_v2beta1.proto import agent_pb2_grpc
from dialogflow_v2beta1.proto import context_pb2
from dialogflow_v2beta1.proto import context_pb2_grpc
from dialogflow_v2beta1.proto import document_pb2
from dialogflow_v2beta1.proto import document_pb2_grpc
from dialogflow_v2beta1.proto import entity_type_pb2
from dialogflow_v2beta1.proto import entity_type_pb2_grpc
from dialogflow_v2beta1.proto import gcs_pb2
from dialogflow_v2beta1.proto import intent_pb2
from dialogflow_v2beta1.proto import intent_pb2_grpc
from dialogflow_v2beta1.proto import knowledge_base_pb2
from dialogflow_v2beta1.proto import knowledge_base_pb2_grpc
from dialogflow_v2beta1.proto import validation_result_pb2
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
from google.protobuf import struct_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("dialogflow").version
class KnowledgeBasesClient(object):
"""
Manages knowledge bases.
Allows users to setup and maintain knowledge bases with their knowledge data.
"""
SERVICE_ADDRESS = "dialogflow.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.dialogflow.v2beta1.KnowledgeBases"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
dialogflow_v2beta1.KnowledgeBasesClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def knowledge_base_path(cls, project, knowledge_base):
"""Return a fully-qualified knowledge_base string."""
return google.api_core.path_template.expand(
"projects/{project}/knowledgeBases/{knowledge_base}",
project=project,
knowledge_base=knowledge_base,
)
@classmethod
def project_path(cls, project):
"""Return a fully-qualified project string."""
return google.api_core.path_template.expand(
"projects/{project}", project=project
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.KnowledgeBasesGrpcTransport,
Callable[[~.Credentials, type], ~.KnowledgeBasesGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = knowledge_bases_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=knowledge_bases_grpc_transport.KnowledgeBasesGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = knowledge_bases_grpc_transport.KnowledgeBasesGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def list_knowledge_bases(
self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns the list of all knowledge bases of the specified agent.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # Iterate over all results
>>> for element in client.list_knowledge_bases(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_knowledge_bases(parent).pages:
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The project to list of knowledge bases for. Format:
``projects/<Project ID>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.api_core.page_iterator.PageIterator` instance.
An iterable of :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instances.
You can also iterate over the pages of the response
using its `pages` property.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "list_knowledge_bases" not in self._inner_api_calls:
self._inner_api_calls[
"list_knowledge_bases"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_knowledge_bases,
default_retry=self._method_configs["ListKnowledgeBases"].retry,
default_timeout=self._method_configs["ListKnowledgeBases"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.ListKnowledgeBasesRequest(
parent=parent, page_size=page_size
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls["list_knowledge_bases"],
retry=retry,
timeout=timeout,
metadata=metadata,
),
request=request,
items_field="knowledge_bases",
request_token_field="page_token",
response_token_field="next_page_token",
)
return iterator
def get_knowledge_base(
self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Retrieves the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> name = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> response = client.get_knowledge_base(name)
Args:
name (str): Required. The name of the knowledge base to retrieve. Format
``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"get_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_knowledge_base,
default_retry=self._method_configs["GetKnowledgeBase"].retry,
default_timeout=self._method_configs["GetKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.GetKnowledgeBaseRequest(name=name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def create_knowledge_base(
self,
parent,
knowledge_base,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `knowledge_base`:
>>> knowledge_base = {}
>>>
>>> response = client.create_knowledge_base(parent, knowledge_base)
Args:
parent (str): Required. The project to create a knowledge base for. Format:
``projects/<Project ID>``.
knowledge_base (Union[dict, ~google.cloud.dialogflow_v2beta1.types.KnowledgeBase]): Required. The knowledge base to create.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"create_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_knowledge_base,
default_retry=self._method_configs["CreateKnowledgeBase"].retry,
default_timeout=self._method_configs["CreateKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.CreateKnowledgeBaseRequest(
parent=parent, knowledge_base=knowledge_base
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def delete_knowledge_base(
self,
name,
force=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Deletes the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> name = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> client.delete_knowledge_base(name)
Args:
name (str): Required. The name of the knowledge base to delete. Format:
``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
force (bool): Optional. Force deletes the knowledge base. When set to true, any documents
in the knowledge base are also deleted.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "delete_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"delete_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.delete_knowledge_base,
default_retry=self._method_configs["DeleteKnowledgeBase"].retry,
default_timeout=self._method_configs["DeleteKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.DeleteKnowledgeBaseRequest(name=name, force=force)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("name", name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["delete_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def update_knowledge_base(
self,
knowledge_base,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates the specified knowledge base.
Note: The ``projects.agent.knowledgeBases`` resource is deprecated; only
use ``projects.knowledgeBases``.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.KnowledgeBasesClient()
>>>
>>> # TODO: Initialize `knowledge_base`:
>>> knowledge_base = {}
>>>
>>> response = client.update_knowledge_base(knowledge_base)
Args:
knowledge_base (Union[dict, ~google.cloud.dialogflow_v2beta1.types.KnowledgeBase]): Required. The knowledge base to update.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase`
update_mask (Union[dict, ~google.cloud.dialogflow_v2beta1.types.FieldMask]): Optional. Not specified means ``update all``. Currently, only
``display_name`` can be updated, an InvalidArgument will be returned for
attempting to update other fields.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.dialogflow_v2beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.KnowledgeBase` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_knowledge_base" not in self._inner_api_calls:
self._inner_api_calls[
"update_knowledge_base"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_knowledge_base,
default_retry=self._method_configs["UpdateKnowledgeBase"].retry,
default_timeout=self._method_configs["UpdateKnowledgeBase"].timeout,
client_info=self._client_info,
)
request = knowledge_base_pb2.UpdateKnowledgeBaseRequest(
knowledge_base=knowledge_base, update_mask=update_mask
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("knowledge_base.name", knowledge_base.name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["update_knowledge_base"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
the-stack_0_2365 | from django.contrib import admin
import nested_admin
from django.utils.html import format_html
from django.utils.translation import gettext as _
from mptt.admin import DraggableMPTTAdmin
from .models import Tax, Category, Notification, Currency, Carrier
# Register your models here.
class CategoriesAdmin(DraggableMPTTAdmin):
fields = ('name','slug')
list_display = ('tree_actions', 'getname','slug')
list_display_links = ('getname',)
prepopulated_fields = {'slug': ('name',)}
def getname(self, instance):
return format_html(
'<div style="text-indent:{}px">{}</div>',
instance._mpttfield('level') * self.mptt_level_indent,
instance.name, # Or whatever you want to put here
)
getname.short_description = _('Name')
class CarriersAdmin(admin.ModelAdmin):
list_display = ['name', 'price','delivery_text']
# list_filter = ('name', 'price')
admin.site.register(Carrier, CarriersAdmin)
admin.site.register(Category, CategoriesAdmin)
admin.site.register([Tax, Notification, Currency]) |
the-stack_0_2366 | # quick-write python script to calculate, plot and write out the n(z) for the BOSS and 2dFLenS samples
# we can compare the BOSS n(z) for the full NGP with the samples within the KiDS footprint
# CH: 12th Dec 2019
from astropy.io import fits
import numpy as np
from matplotlib import rcParams
import matplotlib.pyplot as plt
# Some font setting
rcParams['ps.useafm'] = True
rcParams['pdf.use14corefonts'] = True
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 19}
plt.rc('font', **font)
plt.figure(figsize=(8,7))
#This is where the catalogues live on cuillin
DD='/disk09/KIDS/K1000_TWO_PT_STATS/GGLCATS'
#To apply the 9-band photo-zs mask use
#bitmask=0x6FFC
#You might however just want to know where the gri information is
#for this you want
bitmask=0x681C
#what resolution do you want the n(z) binned with?
dz=0.01
#Now lets see the n(z) when the KiDS mask has been applied
for ilens in range(2):
#input data
bossfile=DD+'/BOSS_data_z'+str(ilens+1)+'.fits'
twodffile=DD+'/2dFLenS_data_z'+str(ilens+1)+'.fits'
#output ascii n(z)
bossnzfile=DD+'/N_of_Z/BOSS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
twodfnzfile=DD+'/N_of_Z/2dFLenS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
twodf_w_nzfile=DD+'/N_of_Z/2dFLenS_weighted_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
allnzfile=DD+'/N_of_Z/BOSS_and_2dFLenS_n_of_z'+str(ilens+1)+'_res_'+str(dz)+'.txt'
#set up the z-range to bin over and the number of bins
if ilens==0:
zmin=0.2
zmax=0.5
else:
zmin=0.5
zmax=0.75
nbins=np.int((zmax-zmin)/dz)
#Read in the BOSS catalogue weights and the MASK
hdulist = fits.open(bossfile)
bosscat = hdulist[1].data
BKIDSMASK=bosscat.field('KIDSMASK')
bossz=bosscat.field('Z')
bossweight = bosscat.field('WEICOMP')
#filter based on the 9-band or gri mask
ibfilter=np.logical_not(np.array(BKIDSMASK.astype(int) & bitmask, dtype=bool))
#histogram the redshifts within the KiDS footprint
if ilens==0:
mylabel='BOSS in KiDS'
else:
mylabel=None
n, bins, patches = plt.hist(bossz[ibfilter], nbins, normed=True, weights=bossweight[ibfilter], color='red',histtype=u'step',label=mylabel,linewidth=3)
#write out the mean redshift
print ('BOSS %d %s'%(ilens,np.average(bossz[ibfilter],weights=bossweight[ibfilter])))
#and write out to file (note reporting the left corner of the bin here)
np.savetxt(bossnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#Read in the 2dFLenS catalogue and the MASK
hdulist = fits.open(twodffile)
twodfcat = hdulist[1].data
TKIDSMASK=twodfcat.field('KIDSMASK')
twodfz=twodfcat.field('Z')
twodfweight = twodfcat.field('WEIMAG')
twodfweightcomp = twodfcat.field('WEICOMP')
#filter based on the 9-band or gri mask
itfilter=np.logical_not(np.array(TKIDSMASK.astype(int) & bitmask, dtype=bool))
#this with no weights
if ilens==0:
mylabel='2dFLenS in KiDS'
else:
mylabel=None
n, bins, patches = plt.hist(twodfz[itfilter], nbins, normed=True, color='green', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(twodfnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#write out the mean redshift
print ('2dFLenS %d %s'%(ilens,np.average(twodfz[itfilter])))
#here we apply the gri weights
if ilens==0:
mylabel='2dFLenS weighted'
else:
mylabel=None
n, bins, patches = plt.hist(twodfz[itfilter], nbins, normed=True, weights=twodfweight[itfilter], color='blue', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(twodf_w_nzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#what does a combined unweighted 2dFLenS and BOSS in KiDS n(z) look like?
allinkids = np.append(twodfz[itfilter],bossz[ibfilter])
allweight = np.append(twodfweightcomp[itfilter],bossweight[ibfilter])
if ilens==0:
mylabel='BOSS and 2dFLenS'
else:
mylabel=None
n, bins, patches = plt.hist(allinkids, nbins, normed=True, weights=allweight, color='orange', histtype=u'step',label=mylabel,linewidth=2.5)
np.savetxt(allnzfile,np.c_[bins[0:nbins],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
#write out the mean redshift
print ('All %d %s'%(ilens,np.average(allinkids,weights=allweight)))
#Lets overplot the n(z) in the original NGP data files
original_datafile=DD+'/BOSS_original/galaxy_DR12v5_CMASSLOWZTOT_North.fits'
hdulist = fits.open(original_datafile)
datacat = hdulist[1].data
zspec=datacat.field('Z')
weicp = datacat.field('WEIGHT_CP')
weinoz = datacat.field('WEIGHT_NOZ')
weisys = datacat.field('WEIGHT_SYSTOT')
weicompboss = weisys*(weinoz+weicp-1.)
zbin1filt=((zspec <=0.5) & (zspec>0.2))
nbins1=np.int((0.5-0.2)/dz)
bossallnzfile1=DD+'/N_of_Z/BOSS_NGP_n_of_z1.txt'
zbin2filt=((zspec <=0.75) & (zspec>0.5))
nbins2=np.int((0.75-0.5)/dz)
bossallnzfile2=DD+'/N_of_Z/BOSS_NGP_n_of_z2.txt'
n, bins, patches = plt.hist(zspec[zbin1filt], nbins1, normed=True, weights=weicompboss[zbin1filt], color='black', alpha=0.75,label='BOSS all',histtype=u'step',linewidth=4)
np.savetxt(bossallnzfile1,np.c_[bins[0:nbins1],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
n, bins, patches = plt.hist(zspec[zbin2filt], nbins2, normed=True, weights=weicompboss[zbin2filt], color='black', alpha=0.75, histtype=u'step',linewidth=4)
np.savetxt(bossallnzfile2,np.c_[bins[0:nbins2],n],fmt=['%.3f','%.3f'],header='z_bin_left n_of_z')
plt.xlim(0.15,0.8)
plt.xlabel('z')
plt.ylabel('n(z)')
plt.legend(loc = 'upper left', fontsize=14)
plt.savefig('BOSS-2dFLenS-nofz.png')
plt.show()
|
the-stack_0_2367 | from os import getenv
from fastapi import Request
from fastapi.params import Depends
from fastapi.templating import Jinja2Templates
from pyngrok import conf, ngrok
from lnbits.core.models import User
from lnbits.decorators import check_user_exists
from . import ngrok_ext, ngrok_renderer
templates = Jinja2Templates(directory="templates")
def log_event_callback(log):
string = str(log)
string2 = string[string.find('url="https') : string.find('url="https') + 80]
if string2:
string3 = string2
string4 = string3[4:]
global string5
string5 = string4.replace('"', "")
conf.get_default().log_event_callback = log_event_callback
ngrok_authtoken = getenv("NGROK_AUTHTOKEN")
if ngrok_authtoken is not None:
ngrok.set_auth_token(ngrok_authtoken)
port = getenv("PORT")
ngrok_tunnel = ngrok.connect(port)
@ngrok_ext.get("/")
async def index(request: Request, user: User = Depends(check_user_exists)):
return ngrok_renderer().TemplateResponse(
"ngrok/index.html", {"request": request, "ngrok": string5, "user": user.dict()}
)
|
the-stack_0_2369 | from enum import Enum
import numpy as np
from actions import Direction
class Car():
def __init__(self, car):
self.x = int(car['x'])
self.y = int(car['y'])
self.health = int(car['health'])
self.resources = int(car['resources'])
self.collided = bool(car['collided'])
self.killed = bool(car['killed'])
self.state = State(car['state'])
class StateType(Enum):
STOPPED = 1
MOVING = 2
class State():
def __init__(self, state):
if isinstance(state, dict):
self.state_type = StateType[list(state.keys())[0]]
self.direction = Direction[state[list(state.keys())[0]]]
else:
self.state_type = StateType[state]
self.direction = None
class ItemType(Enum):
PRODUCER = 1
RESOURCE = 2
BASE = 3
class CellType(Enum):
OPEN = 1
WALL = 2
class Cell():
def __init__(self, cell_type, x, y):
self.cell_type = cell_type
self.x = x
self.y = y
self.items = []
def set_items(self, items):
self.items = items
def is_wall(self):
return self.cell_type == CellType.WALL
def has_base(self):
for i in self.items:
if i.is_base():
return True
return False
def has_producer(self):
for i in self.items:
if i.is_producer():
return True
return False
def has_resource(self):
for i in self.items:
if i.is_resource():
return True
return False
class Item():
def __init__(self, item_type):
self.item_type = item_type
def is_resource(self):
return self.item_type == ItemType.RESOURCE
def is_producer(self):
return self.item_type == ItemType.PRODUCER
def is_base(self):
return self.item_type == ItemType.BASE
class Observation():
def __init__(self, previous):
self.previous_observation = previous
def parse(self, game_state, team_id):
self.map = game_state['map']
self.cars = {str(k): Car(v) for k, v in game_state['cars'].items()}
self.team = game_state['teams'][str(team_id)]
self.score = self.team['score']
self.car = self.cars[str(team_id)]
self.cells = []
y = 0
for r in self.map['cells']:
x = 0
# row = []
# self.cells.append(row)
for c in r:
cell = Cell(CellType[c['block']], x, y)
items = []
for i in c['items']:
if isinstance(i, dict):
item_type = ItemType[list(i.keys())[0]]
else:
item_type = ItemType[i]
items.append(Item(item_type))
# row.append(cell)
self.cells.append(cell)
x += 1
y += 1
self.size_x = x
self.size_y = y
def get_car_at(self, cell):
for car in self.cars.values():
if car.x == cell.x and car.y == cell.y:
return car
return None
def to_rl(self):
o = []
o.append(self.car.health)
o.append(self.car.resources)
o.append(self.car.collided)
o.append(self.car.killed)
c = []
for cell in self.cells:
c.append(int(cell.is_wall()))
c.append(int(cell.has_base()))
c.append(int(cell.has_producer()))
c.append(int(cell.has_resource()))
car = self.get_car_at(cell)
if car is not None:
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.NORTH))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.SOUTH))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.EAST))
c.append(int(car.state.state_type == StateType.MOVING and car.state.direction == Direction.WEST))
c.append(car.resources)
c.append(car.health)
c.append(car.collided)
else:
for _ in range(7):
c.append(0)
cell_index = self.car.x + self.car.y*self.size_y
# Center observations on the car
o += c[cell_index:] + c[:cell_index]
o = np.array(o, dtype=np.int8)
return o
# not used
def sample():
raise 'Observation.sample called! Not really tested/implemented...'
o = []
o.append(np.random.randint(0, 4))
o.append(np.random.randint(0, 100))
nb_cells = 20 * 24 # XXX
car_cell_index = np.random.randint(0, nb_cells) # XXX car can be on a wall
for i in nb_cells:
if i == car_cell_index:
o.append(1)
else:
o.append(0)
has_wall = np.random.randint(0, 2)
o.append(has_wall)
if has_wall == 0:
has_base = np.random.randint(0, 2)
has_producer = np.random.randint(0, 2)
has_resource = np.random.randint(0, 2)
else:
has_base = 0
has_producer = 0
has_resource = 0
o.append(has_base)
o.append(has_producer)
o.append(has_resource)
return o
def to_reward(self):
reward = 0
if self.car.state.state_type != StateType.MOVING:
reward -= 1
if self.previous_observation is not None:
reward += (self.score - self.previous_observation.score) * 100
# resources_bonus = self.car.resources - self.previous_observation.car.resources
# if resources_bonus > 0:
# reward += resources_bonus * 10
if self.car.collided: # XXX tune penalty depending on health?
reward -= 2
if self.car.killed:
reward += -10 - self.car.resources*100
return reward
|
the-stack_0_2370 | #!/usr/bin/env python3
import copy
import sys
class Board:
def __init__(self, rows, cols):
self.rows = rows
self.cols = cols
self.board = []
self.initial_fill()
def initial_fill(self):
for i in range(self.rows):
self.board.append(self.cols * [-1])
def is_safe(self, x, y):
"""
Check if coordinates are in range
:param x:
:param y:
:return: boolean
"""
return (0 <= x < self.rows) and (0 <= y < self.cols) and (self.board[x][y] == -1)
def __str__(self):
return "\n".join([", ".join(["%2i" % i for i in row]) for row in self.board])
class BoardKT(Board):
# move_x, move_y
moves = ((-1, 2), (-2, 1), (1, 2), (2, 1), (2, -1), (1, -2), (-1, -2), (-2, -1))
def __init__(self, rows, cols):
Board.__init__(self, rows, cols)
self.bs_board = copy.deepcopy(self.board)
self.bs_counter = 0
def solve_kt(self, start_x=0, start_y=0):
self.board[start_x][start_y] = 0
return self.fill_kt(start_x, start_y, 1)
def fill_kt(self, x, y, counter):
if counter > self.bs_counter: # find the closest solution
self.bs_counter = counter
self.bs_board = copy.deepcopy(self.board)
if counter == self.rows * self.cols:
return True
for move in self.moves: # iterate over all possible positions
move_x, move_y = x + move[0], y + move[1]
if self.is_safe(move_x, move_y):
self.board[move_x][move_y] = counter # set current position
# check next move
if self.fill_kt(move_x, move_y, counter + 1):
return True
else:
self.board[move_x][move_y] = -1 # backtrack
return False
def __str__(self):
return "\n".join([", ".join(["%2i" % i for i in row]) for row in self.bs_board])
def exit_error(message):
exit("ERROR: " + message)
if __name__ == "__main__":
if len(sys.argv) < 3:
exit_error(" usage is %s N M" % (sys.argv[0]))
start_x, start_y = 0, 0
if len(sys.argv) == 5:
start_x, start_y = int(sys.argv[3]), int(sys.argv[4])
N = int(sys.argv[1])
M = int(sys.argv[2])
print("Board size: %d X %d" % (N, M))
# validate http://faculty.olin.edu/~sadams/DM/ktpaper.pdf
if (N < 3 and M < 2) or (N < 2 and M < 3):
exit_error("Board is not wide enough to construct a tour")
board = BoardKT(N, M)
if board.solve_kt(start_x, start_y) is False:
print("Could not walk all the board")
print("Walked: %d" % board.bs_counter)
print(board)
|
the-stack_0_2371 | from copy import deepcopy
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scanpy
import scvi
from anndata import AnnData
from pyro import clear_param_store
from pyro.nn import PyroModule
from scvi import _CONSTANTS
from scvi.data._anndata import _setup_anndata, get_from_registry
from scvi.model.base import BaseModelClass, PyroSampleMixin, PyroSviTrainMixin
from scvi.utils import setup_anndata_dsp
from cell2location.models._cell2location_module import (
LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel,
)
from cell2location.models.base._pyro_base_loc_module import Cell2locationBaseModule
from cell2location.models.base._pyro_mixin import PltExportMixin, QuantileMixin
from cell2location.utils import select_slide
class Cell2location(QuantileMixin, PyroSampleMixin, PyroSviTrainMixin, PltExportMixin, BaseModelClass):
"""
Cell2location model. User-end model class. See Module class for description of the model (incl. math).
Parameters
----------
adata
spatial AnnData object that has been registered via :func:`~scvi.data.setup_anndata`.
cell_state_df
pd.DataFrame with reference expression signatures for each gene (rows) in each cell type/population (columns).
use_gpu
Use the GPU?
**model_kwargs
Keyword args for :class:`~cell2location.models.LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel`
Examples
--------
TODO add example
>>>
"""
def __init__(
self,
adata: AnnData,
cell_state_df: pd.DataFrame,
model_class: Optional[PyroModule] = None,
detection_mean_per_sample: bool = False,
detection_mean_correction: float = 1.0,
**model_kwargs,
):
# in case any other model was created before that shares the same parameter names.
clear_param_store()
if not np.all(adata.var_names == cell_state_df.index):
raise ValueError("adata.var_names should match cell_state_df.index, find interecting variables/genes first")
# add index for each cell (provided to pyro plate for correct minibatching)
adata.obs["_indices"] = np.arange(adata.n_obs).astype("int64")
scvi.data.register_tensor_from_anndata(
adata,
registry_key="ind_x",
adata_attr_name="obs",
adata_key_name="_indices",
)
super().__init__(adata)
if model_class is None:
model_class = LocationModelLinearDependentWMultiExperimentLocationBackgroundNormLevelGeneAlphaPyroModel
self.cell_state_df_ = cell_state_df
self.n_factors_ = cell_state_df.shape[1]
self.factor_names_ = cell_state_df.columns.values
if not detection_mean_per_sample:
# compute expected change in sensitivity (m_g in V1 or y_s in V2)
sc_total = cell_state_df.sum(0).mean()
sp_total = get_from_registry(self.adata, _CONSTANTS.X_KEY).sum(1).mean()
get_from_registry(adata, _CONSTANTS.BATCH_KEY)
self.detection_mean_ = (sp_total / model_kwargs.get("N_cells_per_location", 1)) / sc_total
self.detection_mean_ = self.detection_mean_ * detection_mean_correction
model_kwargs["detection_mean"] = self.detection_mean_
else:
# compute expected change in sensitivity (m_g in V1 and y_s in V2)
sc_total = cell_state_df.sum(0).mean()
sp_total = get_from_registry(self.adata, _CONSTANTS.X_KEY).sum(1)
batch = get_from_registry(self.adata, _CONSTANTS.BATCH_KEY).flatten()
sp_total = np.array([sp_total[batch == b].mean() for b in range(self.summary_stats["n_batch"])])
self.detection_mean_ = (sp_total / model_kwargs.get("N_cells_per_location", 1)) / sc_total
self.detection_mean_ = self.detection_mean_ * detection_mean_correction
model_kwargs["detection_mean"] = self.detection_mean_.reshape((self.summary_stats["n_batch"], 1)).astype(
"float32"
)
detection_alpha = model_kwargs.get("detection_alpha", None)
if detection_alpha is not None:
if type(detection_alpha) is dict:
batch_mapping = self.adata.uns["_scvi"]["categorical_mappings"]["_scvi_batch"]["mapping"]
self.detection_alpha_ = pd.Series(detection_alpha)[batch_mapping]
model_kwargs["detection_alpha"] = self.detection_alpha_.values.reshape(
(self.summary_stats["n_batch"], 1)
).astype("float32")
self.module = Cell2locationBaseModule(
model=model_class,
n_obs=self.summary_stats["n_cells"],
n_vars=self.summary_stats["n_vars"],
n_factors=self.n_factors_,
n_batch=self.summary_stats["n_batch"],
cell_state_mat=self.cell_state_df_.values.astype("float32"),
**model_kwargs,
)
self._model_summary_string = f'cell2location model with the following params: \nn_factors: {self.n_factors_} \nn_batch: {self.summary_stats["n_batch"]} '
self.init_params_ = self._get_init_params(deepcopy(locals()))
@staticmethod
@setup_anndata_dsp.dedent
def setup_anndata(
adata: AnnData,
batch_key: Optional[str] = None,
labels_key: Optional[str] = None,
layer: Optional[str] = None,
categorical_covariate_keys: Optional[List[str]] = None,
continuous_covariate_keys: Optional[List[str]] = None,
copy: bool = False,
) -> Optional[AnnData]:
"""
%(summary)s.
Parameters
----------
%(param_adata)s
%(param_batch_key)s
%(param_labels_key)s
%(param_layer)s
%(param_cat_cov_keys)s
%(param_cont_cov_keys)s
%(param_copy)s
Returns
-------
%(returns)s
"""
return _setup_anndata(
adata,
batch_key=batch_key,
labels_key=labels_key,
layer=layer,
categorical_covariate_keys=categorical_covariate_keys,
continuous_covariate_keys=continuous_covariate_keys,
copy=copy,
)
def train(
self, max_epochs: int = 30000, batch_size: int = None, train_size: float = 1, lr: float = 0.002, **kwargs
):
"""Train the model with useful defaults
Parameters
----------
max_epochs
Number of passes through the dataset. If `None`, defaults to
`np.min([round((20000 / n_cells) * 400), 400])`
train_size
Size of training set in the range [0.0, 1.0]. Use all data points in training because
we need to estimate cell abundance at all locations.
batch_size
Minibatch size to use during training. If `None`, no minibatching occurs and all
data is copied to device (e.g., GPU).
lr
Optimiser learning rate (default optimiser is :class:`~pyro.optim.ClippedAdam`).
Specifying optimiser via plan_kwargs overrides this choice of lr.
kwargs
Other arguments to scvi.model.base.PyroSviTrainMixin().train() method
"""
kwargs["max_epochs"] = max_epochs
kwargs["batch_size"] = batch_size
kwargs["train_size"] = train_size
kwargs["lr"] = lr
super().train(**kwargs)
def export_posterior(
self,
adata,
sample_kwargs: Optional[dict] = None,
export_slot: str = "mod",
add_to_obsm: list = ["means", "stds", "q05", "q95"],
):
"""
Summarise posterior distribution and export results (cell abundance) to anndata object:
1. adata.obsm: Estimated cell abundance as pd.DataFrames for each posterior distribution summary `add_to_obsm`,
posterior mean, sd, 5% and 95% quantiles (['means', 'stds', 'q05', 'q95']).
If export to adata.obsm fails with error, results are saved to adata.obs instead.
2. adata.uns: Posterior of all parameters, model name, date,
cell type names ('factor_names'), obs and var names.
Parameters
----------
adata
anndata object where results should be saved
sample_kwargs
arguments for self.sample_posterior (generating and summarising posterior samples), namely:
num_samples - number of samples to use (Default = 1000).
batch_size - data batch size (keep low enough to fit on GPU, default 2048).
use_gpu - use gpu for generating samples?
export_slot
adata.uns slot where to export results
add_to_obsm
posterior distribution summary to export in adata.obsm (['means', 'stds', 'q05', 'q95']).
Returns
-------
"""
sample_kwargs = sample_kwargs if isinstance(sample_kwargs, dict) else dict()
# generate samples from posterior distributions for all parameters
# and compute mean, 5%/95% quantiles and standard deviation
self.samples = self.sample_posterior(**sample_kwargs)
# TODO use add_to_obsm to determine which quantiles need to be computed,
# and if means and stds are not in the list - use quantile methods rather than sampling posterior
# export posterior distribution summary for all parameters and
# annotation (model, date, var, obs and cell type names) to anndata object
adata.uns[export_slot] = self._export2adata(self.samples)
# add estimated cell abundance as dataframe to obsm in anndata
# first convert np.arrays to pd.DataFrames with cell type and observation names
# data frames contain mean, 5%/95% quantiles and standard deviation, denoted by a prefix
for k in add_to_obsm:
sample_df = self.sample2df_obs(
self.samples,
site_name="w_sf",
summary_name=k,
name_prefix="cell_abundance",
)
try:
adata.obsm[f"{k}_cell_abundance_w_sf"] = sample_df.loc[adata.obs.index, :]
except ValueError:
# Catching weird error with obsm: `ValueError: value.index does not match parent’s axis 1 names`
adata.obs[sample_df.columns] = sample_df.loc[adata.obs.index, :]
return adata
def plot_spatial_QC_across_batches(self):
"""QC plot: compare total RNA count with estimated total cell abundance and detection sensitivity."""
adata = self.adata
# get batch key and the list of samples
batch_key = self.adata.uns["_scvi"]["categorical_mappings"]["_scvi_batch"]["original_key"]
samples = adata.obs[batch_key].unique()
# figure out plot shape
ncol = len(samples)
nrow = 3
fig, axs = plt.subplots(nrow, ncol, figsize=(1 + 4 * ncol, 1 + 4 * nrow))
if ncol == 1:
axs = axs.reshape((nrow, 1))
# compute total counts
# find data slot
x_dict = self.adata.uns["_scvi"]["data_registry"]["X"]
if x_dict["attr_name"] == "X":
use_raw = False
else:
use_raw = True
if x_dict["attr_name"] == "layers":
layer = x_dict["attr_key"]
else:
layer = None
# get data
if layer is not None:
x = adata.layers[layer]
else:
if not use_raw:
x = adata.X
else:
x = adata.raw.X
# compute total counts per location
cell_type = "total RNA counts"
adata.obs[cell_type] = np.array(x.sum(1)).flatten()
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[0, i],
show=False,
)
axs[0, i].title.set_text(cell_type + "\n" + s)
cell_type = "Total cell abundance (sum_f w_sf)"
adata.obs[cell_type] = adata.uns["mod"]["post_sample_means"]["w_sf"].sum(1).flatten()
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[1, i],
show=False,
)
axs[1, i].title.set_text(cell_type + "\n" + s)
cell_type = "RNA detection sensitivity (y_s)"
adata.obs[cell_type] = adata.uns["mod"]["post_sample_q05"]["detection_y_s"]
# figure out colour map scaling
vmax = np.quantile(adata.obs[cell_type].values, 0.992)
# plot, iterating across samples
for i, s in enumerate(samples):
sp_data_s = select_slide(adata, s, batch_key=batch_key)
scanpy.pl.spatial(
sp_data_s,
cmap="magma",
color=cell_type,
size=1.3,
img_key="hires",
alpha_img=1,
vmin=0,
vmax=vmax,
ax=axs[2, i],
show=False,
)
axs[2, i].title.set_text(cell_type + "\n" + s)
fig.tight_layout(pad=0.5)
return fig
|
the-stack_0_2372 | import json
from packlib.base import ProxmoxAction
class NodesNodeCertificatesInfoAction(ProxmoxAction):
"""
Get information about node's certificates.
"""
def run(self, node, profile_name=None):
super().run(profile_name)
# Only include non None arguments to pass through to proxmox api.
proxmox_kwargs = {}
for api_arg in [
["node", node, "string"],
]:
if api_arg[1] is None:
continue
if "[n]" in api_arg[0]:
unit_list = json.loads(api_arg[1])
for i, v in enumerate(unit_list):
proxmox_kwargs[api_arg[0].replace("[n]", str(i))] = v
else:
if api_arg[2] == "boolean":
api_arg[1] = int(api_arg[1])
proxmox_kwargs[api_arg[0]] = api_arg[1]
return self.proxmox.get(f"nodes/{node}/certificates/info", **proxmox_kwargs)
|
the-stack_0_2374 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 14 21:24:17 2018
@author: Zhaoyi.Shen
"""
import sys
sys.path.append('/home/z1s/py/lib/')
from lanczos_filter import lanczos_filter
import numpy as np
import scipy as sp
from scipy.signal import butter, lfilter, filtfilt
def lfca(x, cutoff, truncation, scale, **kwargs):
if x.ndim!=2:
return
if 'covtot' in kwargs.keys():
covtot = kwargs['covtot']
else:
covtot = np.cov(x,rowvar=False)
(n,p) = x.shape
if covtot.shape!=(p,p):
return
# center data
x = x - np.nanmean(x,0)[np.newaxis,...]
xs = x * np.transpose(scale)
# eigendecomposition of covariance matrix
#scale.shape = (1,p)
covtot = np.transpose(scale)*covtot*scale
pcvec, evl, rest = peigs(covtot, min(n-1,p))
trcovtot = np.trace(covtot)
#scale.shape = (p)
# percent of total sample variation accounted for by ead EOF
pvar = evl/trcovtot*100
# principal component time series
pcs = np.dot(xs, pcvec)
# return EOFs in original scaling as patterns (row vectors)
eof = np.transpose(pcvec)/np.transpose(scale)
# truncation of EOFs
ntr = truncation
# whitening transformation
f = np.sqrt(np.squeeze(evl)[0:ntr])
# get transformation matrices
s = np.dot(pcvec[:,0:ntr], np.diag(1./f))
sadj = np.dot(np.diag(f), np.transpose(pcvec[:,0:ntr]))
# filter data matrix
b,a = butter(5,1./cutoff,btype='low')
t = np.arange(1,n+1)
#t.shape = (1,n)
#t = np.transpose(t)
x_f = xs.copy()
for i in range(xs.shape[1]):
p = np.polyfit(t,xs[:,i],1)
tmp = xs[t-1,i]-p[0]*t-p[1]
tmp1 = np.concatenate((np.flipud(tmp),tmp,np.flipud(tmp)))
#tmp_filt = filtfilt(b,a,tmp)
tmp_filt = lanczos_filter(tmp1,1,1./cutoff)[0]
x_f[:,i] = tmp_filt[int(np.size(tmp_filt)/3):int(2*np.size(tmp_filt)/3)]+p[0]*t+p[1]
#x_f[:,i] = tmp_filt+p[0]*t+p[1]
# whiten variables
y = np.dot(x_f, s)
# slow covariance matrix of whitened variables
gamma = np.cov(y,rowvar=False)
# SVD of slow variance matrix
dummy, r, v = csvd(gamma)
# weight vectors and patterns
weights = scale * np.dot(s, v)
lfps = np.dot(np.transpose(v), sadj)/np.transpose(scale)
# choose signs of patterns, weights, eofs, and pcs
#scale.shape = (1,p)
for j in range(lfps.shape[0]):
if np.dot(lfps[j,:][np.newaxis,...], scale)<0:
lfps[j,:] = -lfps[j,:]
weights[:,j] = -weights[:,j]
for j in range(eof.shape[0]):
if np.dot(eof[j,:][np.newaxis,...], scale)<0:
eof[j,:] = -eof[j,:]
pcs[:,j] = -pcs[:,j]
#scale.shape = (p)
# low-frequency components
xs = xs/np.transpose(scale)
lfcs = np.dot(xs, weights)
# slow covariance of untruncated state space
cov_slow = np.cov(x_f,rowvar=False)
trcovslow = np.trace(cov_slow)
w = weights/scale
p = lfps*np.transpose(scale)
pw_diag = np.diag(np.dot(p,w))
slow_var = np.diag(np.dot(np.dot(p,cov_slow),w))/pw_diag
tot_var = np.diag(np.dot(np.dot(p,covtot),w))/pw_diag
pcvec_diag = np.diag(np.dot(np.transpose(pcvec),pcvec))
slow_var_eofs = np.diag(np.dot(np.dot(np.transpose(pcvec),cov_slow),pcvec))/pcvec_diag
tot_var_eofs = np.diag(np.dot(np.dot(np.transpose(pcvec),covtot),pcvec))/pcvec_diag
# slow variance and total variance in each LFC
pvar_slow = slow_var/trcovslow*100
pvar_lfc = tot_var/trcovtot*100
r_eofs = slow_var_eofs/tot_var_eofs
pvar_slow_eofs = slow_var_eofs/trcovslow*100
return lfcs, lfps, weights, r, pvar, pcs, eof, ntr, pvar_slow, pvar_lfc, r_eofs, pvar_slow_eofs
def peigs(a, rmax):
(m,n) = a.shape
if rmax>min(m,n):
rmax = min(m,n)
if rmax<min(m,n)/10.:
(d,v) = sp.sparse.linalg.eigs(a, rmax)
else:
(d,v) = np.linalg.eig(a)
if d.size>max(d.shape):
d = np.diag(d)
# ensure that eigenvalues are monotonically decreasing
i = np.argsort(-d)
d = -np.sort(-d)
v = v[:,i]
# estimate number of positive eigenvalues of a
d_min = max(d)*max(m,n)*np.spacing(1)
r = np.sum(d>d_min)
# discard eigenpairs with eigenvalues that are close to or less than zero
d = d[:r]
v = v[:,:r]
d = d[:]
return v, d, r
def csvd(a):
(m,n) = a.shape
if m>=n:
(u,s,v) = np.linalg.svd(a,0)
v = np.transpose(v)
else:
(v,s,u) = np.linalg.svd(a.transpose(),0)
u = np.transpose(u)
return u, s, v
|
the-stack_0_2377 | """
Author: Sijin Chen, Fudan University
Finished Date: 2021/06/04
"""
from .nn import NetworkWrapper
from collections import OrderedDict
from copy import deepcopy
from typing import Callable
import numpy as np
class Optimizer:
""" Meta class for optimizers """
def __init__(self, *args, **kwargs):
self.state_dict = OrderedDict()
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
def step(self,): raise NotImplementedError("Overwrite this!")
class lr_scheduler:
""" Meta class for adjusting optimizer learning rate """
def __init__(self, optimizer: Optimizer, *args, **kwargs):
self.optimizer = optimizer
self.state_dict = OrderedDict()
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
class SGD(Optimizer):
""" The optimizer class to update the parameters from the network """
def __init__(self,
model: NetworkWrapper,
lr: float,
momentum: float=None,
weight_decay: float=None
):
super(SGD, self).__init__()
self.model = model
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay if weight_decay is not None else 0.
if self.momentum is not None:
for ModuleName, Layer in self.model.ModuleDict.items():
if "weight" in Layer.state_dict:
self.state_dict["{}-weight".format(ModuleName)] = 0.
if "bias" in Layer.state_dict:
self.state_dict["{}-bias".format(ModuleName)] = 0.
self.decay_rate = None
self.batch_size = None
def load_state_dict(self, state_dict: OrderedDict):
self.state_dict = deepcopy(state_dict)
def _step_without_momentum(self,):
""" Update the layers without momentum
Note:
Since the update without momentum is a speial version of momentum update
(momentum=0), we separate these two updating method to accelerate training.
"""
for ModuleName, Layer in self.model.ModuleDict.items():
self.batch_size = Layer["batch_size"]
self.decay_rate = 1 - (self.weight_decay/self.batch_size)
if "weight" in Layer.state_dict:
Layer.state_dict["weight"] = Layer["weight"]*self.decay_rate - self.lr*Layer.grad["weight"]
if "bias" in Layer.state_dict:
Layer.state_dict["bias"] = Layer["bias"]*self.decay_rate - self.lr*Layer.grad["bias"]
def _step_with_momentum(self,):
""" Update the layers with momentum update:
W(t+1) = W(t) - lr*dW + momentum*(W(t) - W(t-1))
"""
for ModuleName, Layer in self.model.ModuleDict.items():
self.batch_size = Layer["batch_size"]
self.decay_rate = 1-(self.weight_decay/self.batch_size)
if "weight" in Layer.state_dict:
cache = deepcopy(Layer["weight"])
momentum = cache - self.state_dict["{}-weight".format(ModuleName)]
Layer.state_dict["weight"] = cache*self.decay_rate - self.lr*Layer.grad["weight"] + self.momentum*momentum
self.state_dict["{}-weight".format(ModuleName)] = cache
if "bias" in Layer.state_dict:
cache = deepcopy(Layer["bias"])
momentum = cache - self.state_dict["{}-bias".format(ModuleName)]
Layer.state_dict["bias"] = cache*self.decay_rate - self.lr*Layer.grad["bias"] + self.momentum*momentum
self.state_dict["{}-bias".format(ModuleName)] = cache
def step(self,):
""" We implemented two different ways of updating parameters """
if self.momentum is not None:
self._step_with_momentum()
else:
self._step_without_momentum()
class LambdaLR(lr_scheduler):
""" Using lambda function to adjust learning rate """
# always update learning rate after running the entire epoch!
def __init__(self, optimizer: Optimizer, lr_lambda: Callable, verbose: bool=False):
super(lr_scheduler, self).__init__()
self.optimizer = optimizer
self.lr_lambda = lr_lambda
self.verbose = verbose
self.epoch = -1
def step(self,):
# adjusting learning rate using the lr-lambda function
lr = self.optimizer.lr
# Update learning rate
self.optimizer.lr = self.lr_lambda(lr)
if self.verbose is True:
print("Adjusting learning rate with lr_lambda from {:.4f} to {:.4f}".format(
lr, self.optimizer.lr))
self.epoch += 1
class CosineAnnealingLR(lr_scheduler):
""" Using lambda function to adjust learning rate """
# always update learning rate after running the entire epoch!
def __init__(self, optimizer: Optimizer, T_max: int, eta_min: float=0.,
verbose: bool=False):
super(lr_scheduler, self).__init__()
self.optimizer = optimizer
self.T_max = T_max
self.T_cur = -1
self.eta_min = eta_min
self.eta_max = None
self.verbose = verbose
def step(self,):
""" Update the learning rate using
\eta_t = \eta_{min} + \frac{1}{2}(\eta_{max} - \eta_{min})\left(1 +
\cos\left(\frac{T_{cur}}{T_{max}}\pi\right)\right)
"""
lr = self.optimizer.lr
if self.T_cur == -1: self.eta_max = lr
# Cosine annealing function
new_lr = self.eta_min + 1/2*(self.eta_max-self.eta_min) * \
(1 + np.cos(self.T_cur/self.T_max*np.pi))
# Update learning rate
self.optimizer.lr = new_lr
if self.verbose is True:
print("Adjusting learning rate with cosine annealing from {:.4f} to {:.4f}".format(
lr, self.optimizer.lr))
self.T_cur += 1
|
the-stack_0_2379 | # USAGE
# python cluster_faces.py --encodings encodings.pickle
# import the necessary packages
from sklearn.cluster import DBSCAN
from imutils import build_montages
import numpy as np
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required=True,
help="path to serialized db of facial encodings")
ap.add_argument("-j", "--jobs", type=int, default=-1,
help="# of parallel jobs to run (-1 will use all CPUs)")
args = vars(ap.parse_args())
# load the serialized face encodings + bounding box locations from
# disk, then extract the set of encodings to so we can cluster on
# them
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
data = np.array(data)
encodings = [d["encoding"] for d in data]
# cluster the embeddings
print("[INFO] clustering...")
clt = DBSCAN(metric="euclidean", n_jobs=args["jobs"])
clt.fit(encodings)
# determine the total number of unique faces found in the dataset
labelIDs = np.unique(clt.labels_)
numUniqueFaces = len(np.where(labelIDs > -1)[0])
print("[INFO] # unique faces: {}".format(numUniqueFaces))
# loop over the unique face integers
for labelID in labelIDs:
# find all indexes into the `data` array that belong to the
# current label ID, then randomly sample a maximum of 25 indexes
# from the set
print("[INFO] faces for face ID: {}".format(labelID))
idxs = np.where(clt.labels_ == labelID)[0]
idxs = np.random.choice(idxs, size=min(25, len(idxs)),
replace=False)
# initialize the list of faces to include in the montage
faces = []
# loop over the sampled indexes
for i in idxs:
# load the input image and extract the face ROI
image = cv2.imread(data[i]["imagePath"])
(top, right, bottom, left) = data[i]["loc"]
face = image[top:bottom, left:right]
# force resize the face ROI to 96x96 and then add it to the
# faces montage list
face = cv2.resize(face, (96, 96))
faces.append(face)
# create a montage using 96x96 "tiles" with 5 rows and 5 columns
montage = build_montages(faces, (96, 96), (5, 5))[0]
# show the output montage
title = "Face ID #{}".format(labelID)
title = "Unknown Faces" if labelID == -1 else title
cv2.imshow(title, montage)
cv2.waitKey(0) |
the-stack_0_2381 | from django import forms
from .models import Questionnaire
class NewLandingForm(forms.Form):
label = forms.CharField(max_length=64, required=True)
questionnaire = forms.ModelChoiceField(
Questionnaire.objects.all(),
widget=forms.widgets.RadioSelect(),
empty_label=None,
required=True,
)
|
the-stack_0_2383 | """
Copyright (c) 2019-2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging as log
import numpy as np
from openvino.runtime import Core, get_version, PartialShape
class InferenceEngine:
def __init__(self, net_model_xml_path, device, stride):
self.device = device
self.stride = stride
log.info('OpenVINO Inference Engine')
log.info('\tbuild: {}'.format(get_version()))
self.core = Core()
log.info('Reading model {}'.format(net_model_xml_path))
self.model = self.core.read_model(net_model_xml_path)
required_output_keys = {'features', 'heatmaps', 'pafs'}
for output_tensor_name in required_output_keys:
try:
self.model.output(output_tensor_name)
except RuntimeError:
raise RuntimeError("The demo supports only topologies with the following output keys: {}".format(
', '.join(required_output_keys)))
self.input_tensor_name = self.model.inputs[0].get_any_name()
compiled_model = self.core.compile_model(self.model, self.device)
self.infer_request = compiled_model.create_infer_request()
log.info('The model {} is loaded to {}'.format(net_model_xml_path, self.device))
def infer(self, img):
img = img[0:img.shape[0] - (img.shape[0] % self.stride),
0:img.shape[1] - (img.shape[1] % self.stride)]
n, c, h, w = self.model.inputs[0].shape
if h != img.shape[0] or w != img.shape[1]:
self.model.reshape({self.input_tensor_name: PartialShape([n, c, img.shape[0], img.shape[1]])})
compiled_model = self.core.compile_model(self.model, self.device)
self.infer_request = compiled_model.create_infer_request()
img = np.transpose(img, (2, 0, 1))[None, ]
self.infer_request.infer({self.input_tensor_name: img})
inference_result = {name: self.infer_request.get_tensor(name).data[:] for name in {'features', 'heatmaps', 'pafs'}}
inference_result = (inference_result['features'][0],
inference_result['heatmaps'][0], inference_result['pafs'][0])
return inference_result
|
the-stack_0_2385 | import pytest
from page_objects.home_page import HomePage
from page_objects.item_details import ItemDetails
class TestProductDetails:
@pytest.mark.skip("Skip for now")
def test_product_details(self, driver):
s = ItemDetails(driver)
h = HomePage(driver)
h.navigate_to_homepage()
s.item_search()
s.product_details()
s.product_review_form()
s.add_item_to_cart()
|
the-stack_0_2386 | LITHO_ROOT = "//"
LITHO_VISIBILITY = [
"PUBLIC",
]
LITHO_STUBS_VISIBILITY = [
"//litho-core/...",
]
LITHO_TESTING_UTIL_VISIBILITY = [
"PUBLIC",
]
LITHO_IS_OSS_BUILD = True
def make_dep_path(pth):
return LITHO_ROOT + pth
LITHO_ROOT_TARGET = make_dep_path(":components")
# Java source
LITHO_JAVA_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho:litho")
LITHO_ANNOTATIONS_TARGET = make_dep_path("litho-annotations/src/main/java/com/facebook/litho/annotations:annotations")
LITHO_CONFIG_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/config:config")
LITHO_BOOST_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/boost:boost")
LITHO_DISPLAYLISTSTUBS_TARGET = make_dep_path("litho-stubs:stubs")
LITHO_VIEWCOMPAT_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/viewcompat:viewcompat")
LITHO_UTILS_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/utils:utils")
LITHO_WIDGET_ACCESSIBILITIES_TARGET = make_dep_path("litho-widget/src/main/java/com/facebook/litho/widget/accessibility:accessibility")
LITHO_WIDGET_TARGET = make_dep_path("litho-widget/src/main/java/com/facebook/litho/widget:widget")
LITHO_LITHO_FRESCO_TARGET = make_dep_path("litho-fresco/src/main/java/com/facebook/litho/fresco:fresco")
LITHO_STATS_TARGET = make_dep_path("litho-core/src/main/java/com/facebook/litho/stats:stats")
LITHO_TESTING_CORE_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho:litho")
LITHO_TESTING_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing:testing")
LITHO_TESTING_ASSERTJ_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/assertj:assertj")
LITHO_TESTING_HELPER_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/helper:helper")
LITHO_TESTING_SUBCOMPONENTS_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/testing/subcomponents:subcomponents")
LITHO_TESTING_WIDGET_TARGET = make_dep_path("litho-testing/src/main/java/com/facebook/litho/widget:widget")
LITHO_TESTING_ESPRESSO_TARGET = make_dep_path("litho-espresso/src/main/java/com/facebook/litho/testing/espresso:espresso")
LITHO_TEST_RES = make_dep_path("litho-it/src/main:res")
LITHO_SECTIONS_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections:sections")
LITHO_SECTIONS_COMMON_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections/common:common")
LITHO_SECTIONS_WIDGET_TARGET = make_dep_path("litho-sections-widget/src/main/java/com/facebook/litho/sections/widget:widget")
LITHO_SECTIONS_ANNOTATIONS_TARGET = make_dep_path("litho-sections-annotations/src/main/java/com/facebook/litho/sections/annotations:annotations")
LITHO_SECTIONS_PROCESSOR_TARGET = make_dep_path("litho-sections-processor/src/main/java/com/facebook/litho/sections/specmodels/processor:processor")
LITHO_SECTIONS_CONFIG_TARGET = make_dep_path("litho-sections-core/src/main/java/com/facebook/litho/sections/config:config")
LITHO_FBJNI_JAVA_TARGET = make_dep_path("lib/fbjni/src/main/java/com/facebook/jni:jni")
# Test source
LITHO_TEST_TARGET = make_dep_path("litho-it/src/test/java/com/facebook/litho:litho")
# Java source with local upstream
LITHO_PROGUARDANNOTATIONS_TARGET = make_dep_path("litho-annotations/src/main/java/com/facebook/proguard/annotations:annotations")
# Resources
LITHO_RES_TARGET = make_dep_path("litho-core:res")
# Libraries
LITHO_INFERANNOTATIONS_TARGET = make_dep_path("lib/infer-annotations:infer-annotations")
LITHO_JSR_TARGET = make_dep_path("lib/jsr-305:jsr-305")
LITHO_ANDROIDSUPPORT_TARGET = make_dep_path("lib/android-support:android-support")
LITHO_ANDROIDSUPPORT_RECYCLERVIEW_TARGET = make_dep_path("lib/android-support:android-support-recyclerview")
LITHO_ANDROIDSUPPORT_APPCOMPAT_TARGET = make_dep_path("lib/appcompat:appcompat")
LITHO_ANDROIDSUPPORT_TESTING_TARGET = make_dep_path("lib/android-support:android-support-testing")
LITHO_YOGA_TARGET = make_dep_path("lib/yoga:yoga")
LITHO_YOGAJNI_TARGET = make_dep_path("lib/yogajni:jni")
LITHO_BUILD_CONFIG_TARGET = make_dep_path(":build_config")
LITHO_COMMONS_CLI_TARGET = make_dep_path("lib/commons-cli:commons-cli")
LITHO_TEXTLAYOUTBUILDER_TARGET = make_dep_path("lib/textlayoutbuilder:textlayoutbuilder")
LITHO_JAVAPOET_TARGET = make_dep_path("lib/javapoet:javapoet")
LITHO_FBCORE_TARGET = make_dep_path("lib/fbcore:fbcore")
LITHO_SOLOADER_TARGET = make_dep_path("lib/soloader:soloader")
LITHO_ASSERTJ_TARGET = make_dep_path("lib/assertj:assertj")
LITHO_COMPILE_TESTING_TARGET = make_dep_path("lib/compile-testing:compile-testing")
LITHO_TRUTH_TARGET = make_dep_path("lib/truth:truth")
LITHO_MOCKITO_TARGET = make_dep_path("lib/mockito:mockito")
LITHO_POWERMOCK_REFLECT_TARGET = make_dep_path("lib/powermock:powermock-reflect")
LITHO_POWERMOCK_MOCKITO_TARGET = make_dep_path("lib/powermock:powermock-mockito")
LITHO_JNI_TARGET = make_dep_path("lib/jni-hack:jni-hack")
LITHO_FBJNI_TARGET = make_dep_path("lib/fbjni:jni")
LITHO_GUAVA_TARGET = make_dep_path("lib/guava:guava")
LITHO_DIFFUTILS_TARGET = make_dep_path("lib/diff-utils:diff-utils")
LITHO_ESPRESSO_TARGET = make_dep_path("lib/espresso:espresso")
LITHO_SCREENSHOT_TARGET = make_dep_path("lib/screenshot:screenshot")
LITHO_JAVAC_TOOLS_TARGET = make_dep_path("lib/javac-tools:javac-tools")
# Fresco
LITHO_FRESCO_TARGET = make_dep_path("lib/fresco:fresco")
LITHO_ROBOLECTRIC_TARGET = make_dep_path("lib/robolectric3:robolectric3")
LITHO_JUNIT_TARGET = make_dep_path("lib/junit:junit")
LITHO_HAMCREST_LIBRARY_TARGET = make_dep_path("lib/hamcrest:hamcrest")
LITHO_HAMCREST_CORE_TARGET = make_dep_path("lib/hamcrest:hamcrest")
# Annotation processors
LITHO_PROCESSOR_TARGET = make_dep_path("litho-processor/src/main/java/com/facebook/litho/specmodels/processor:processor")
LITHO_PROCESSOR_LIB_TARGET = make_dep_path("litho-processor/src/main/java/com/facebook/litho/specmodels/processor:processor-lib")
LITHO_SECTIONS_PROCESSOR_LIB_TARGET = make_dep_path("litho-sections-processor/src/main/java/com/facebook/litho/sections/specmodels/processor:processor-lib")
# Sample app
LITHO_SAMPLE_JAVA = make_dep_path("sample/src/main/java/com/facebook/samples/litho:litho")
LITHO_SAMPLE_BAREBONES_JAVA = make_dep_path("sample-barebones/src/main/java/com/facebook/samples/lithobarebones:lithobarebones")
LITHO_SAMPLE_BAREBONES_RES = make_dep_path("sample-barebones:res")
LITHO_SAMPLE_CODELAB_JAVA = make_dep_path("sample-codelab/src/main/java/com/facebook/samples/lithocodelab:lithocodelab")
LITHO_SAMPLE_CODELAB_RES = make_dep_path("sample-codelab:res")
LITHO_SAMPLE_RES = make_dep_path("sample:res")
# Other targets
LITHO_OSS_TARGET = make_dep_path(":components")
# Targets that sometimes exist and sometimes don't
LITHO_TEXTLAYOUTBUILDER_UTILS_TARGET = []
LITHO_FRESCO_TARGETS = [
make_dep_path("lib/fbcore:fbcore"),
make_dep_path("lib/fresco:fresco-drawee"),
make_dep_path("lib/fresco:fresco"),
]
LITHO_FLIPPER_TARGETS = [
make_dep_path("lib/flipper:flipper"),
]
LITHO_FRESCO_PIPELINE_TARGET = [make_dep_path("lib/fresco:imagepipeline")]
LITHO_FRESCO_CONTROLLER_TARGET = []
LITHO_FRESCO_INTERFACES_TARGET = []
def components_robolectric_test(
name,
*args,
**kwargs):
"""Tests that can successfully run from the library root folder."""
extra_vm_args = [
"-Drobolectric.dependency.dir=lib/android-all",
"-Dcom.facebook.litho.is_oss=true",
]
kwargs["vm_args"] = extra_vm_args
kwargs["use_cxx_libraries"] = True
kwargs["cxx_library_whitelist"] = [
"//lib/yogajni:jni",
"//lib/fbjni:jni",
]
native.robolectric_test(
name = name,
*args,
**kwargs
)
def fb_java_test(*args, **kwargs):
"""Uses native java_test for OSS project."""
java_test(*args, **kwargs)
def litho_android_library(name, srcs = None, *args, **kwargs):
srcs = srcs or []
# This has no meaning in OSS.
kwargs.pop("fblite", None)
native.android_library(name, srcs = srcs, *args, **kwargs)
components_robolectric_powermock_test = components_robolectric_test
def fb_xplat_cxx_library(*args, **kwargs):
"""Delegates to cxx_library for OSS project."""
native.cxx_library(*args, **kwargs)
def fb_android_resource(**kwargs):
"""Delegates to native android_resource rule."""
android_resource(**kwargs)
def fb_java_library(**kwargs):
"""Delegates to native java_library rule."""
native.java_library(**kwargs)
def fb_android_library(**kwargs):
"""Delegates to native android_library rule."""
native.android_library(**kwargs)
def fb_prebuilt_cxx_library(**kwargs):
"""Delegates to native prebuilt_cxx_library."""
native.prebuilt_cxx_library(**kwargs)
def fb_instrumentation_test(**kwargs):
"""
We don't support this in the OSS build for now.
Please use Gradle instead.
"""
_ignore = kwargs
pass
def fb_core_android_library(**kwargs):
native.android_library(**kwargs)
def define_fbjni_targets():
# This target is only used in open source
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "jni",
srcs = native.glob(
[
"src/main/cpp/fb/**/*.cpp",
],
),
header_namespace = "",
exported_headers = subdir_glob(
[
("src/main/cpp", "fb/**/*.h"),
],
),
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-frtti",
"-Wall",
"-std=c++11",
"-DDISABLE_CPUCAP",
"-DDISABLE_XPLAT",
],
exported_platform_headers = [
(
"^(?!android-arm$).*$",
subdir_glob([
("src/main/cpp", "lyra/*.h"),
]),
),
],
platform_srcs = [
(
"^(?!android-arm$).*$",
glob([
"src/main/cpp/lyra/*.cpp",
]),
),
],
soname = "libfb.$(ext)",
visibility = LITHO_VISIBILITY,
deps = [
LITHO_JNI_TARGET,
":ndklog",
],
)
# This target is only used in open source and will break the monobuild
# because we cannot define `soname` multiple times.
def define_yogajni_targets():
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "jni",
srcs = native.glob(["src/main/cpp/jni/*.cpp"]),
header_namespace = "",
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-O3",
"-std=c++11",
],
soname = "libyoga.$(ext)",
visibility = LITHO_VISIBILITY,
deps = [
make_dep_path("lib/yoga/src/main/cpp:yoga"),
LITHO_FBJNI_TARGET,
":ndklog",
],
)
# This target is only used in open source and will break the monobuild
# because we cannot define `soname` multiple times.
def define_cpp_yoga_targets():
fb_prebuilt_cxx_library(
name = "ndklog",
exported_platform_linker_flags = [
(
"^android.*",
["-llog"],
),
],
header_only = True,
visibility = LITHO_VISIBILITY,
)
fb_xplat_cxx_library(
name = "yoga",
srcs = native.glob(["yoga/*.cpp"]),
header_namespace = "",
exported_headers = native.glob(["yoga/*.h"]),
compiler_flags = [
"-fno-omit-frame-pointer",
"-fexceptions",
"-Wall",
"-std=c++11",
"-O3",
],
force_static = True,
visibility = LITHO_VISIBILITY,
deps = [
":ndklog",
],
)
|
the-stack_0_2390 | # -*- coding: utf-8 -*-
"""Unit tests of everything related to retrieving the version
There are four tree states we want to check:
A: sitting on the 1.0 tag
B: dirtying the tree after 1.0
C: a commit after a tag, clean tree
D: a commit after a tag, dirty tree
"""
from __future__ import absolute_import, division, print_function
import inspect
import os
import re
import shutil
import sys
from contextlib import contextmanager
from os.path import join as path_join
from os.path import exists
from shutil import copyfile, rmtree
import pytest
from pyscaffold import shell
from pyscaffold.cli import main as putup
from pyscaffold.repo import add_tag
from pyscaffold.shell import command_exists, git
from pyscaffold.utils import chdir
pytestmark = pytest.mark.slow
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
def cmd_path(cmd):
"""Try to get a fully specified command path.
Returns the full path when possible, otherwise just the command name.
Useful when running from virtualenv context.
"""
candidates = os.getenv('PATH', '').split(os.pathsep)
candidates.insert(0, path_join(sys.prefix, 'bin'))
if hasattr(sys, 'real_prefix'):
candidates.insert(1, path_join(getattr(sys, 'real_prefix'), 'bin'))
for candidate in candidates:
full_path = path_join(candidate, cmd)
if exists(full_path):
return full_path
return cmd
def venv_cmd(cmd, *args):
"""Create a callable from a command inside a virtualenv."""
return shell.ShellCommand(' '.join([cmd_path(cmd)] + list(args)))
pip = venv_cmd("pip")
setup_py = venv_cmd("python", "setup.py")
untar = shell.ShellCommand(
("gtar" if command_exists("gtar") else "tar") + " xvzkf")
type_ = shell.ShellCommand('file')
# ^ BSD tar differs in options from GNU tar,
# so make sure to use the correct one...
# https://xkcd.com/1168/
def is_inside_venv():
return hasattr(sys, 'real_prefix')
def check_clean_venv():
installed = [line.split()[0] for line in pip('list')]
dirty = ['demoapp', 'demoapp_data', 'UNKNOWN']
app_list = [x for x in dirty if x in installed]
if not app_list:
return
else:
raise RuntimeError("Dirty virtual environment:\n{} found".format(
', '.join(app_list)))
def create_demoapp(data=False):
if data:
demoapp = 'demoapp_data'
else:
demoapp = 'demoapp'
putup([demoapp])
with chdir(demoapp):
demoapp_src_dir = os.path.join(__location__, demoapp)
demoapp_dst_root = os.getcwd()
demoapp_dst_pkg = os.path.join(demoapp_dst_root, 'src', demoapp)
copyfile(os.path.join(demoapp_src_dir, 'runner.py'),
os.path.join(demoapp_dst_pkg, 'runner.py'))
git('add', os.path.join(demoapp_dst_pkg, 'runner.py'))
copyfile(os.path.join(demoapp_src_dir, 'setup.cfg'),
os.path.join(demoapp_dst_root, 'setup.cfg'))
copyfile(os.path.join(demoapp_src_dir, 'setup.py'),
os.path.join(demoapp_dst_root, 'setup.py'))
git('add', os.path.join(demoapp_dst_root, 'setup.cfg'))
git('add', os.path.join(demoapp_dst_root, 'setup.py'))
if data:
data_src_dir = os.path.join(demoapp_src_dir, 'data')
data_dst_dir = os.path.join(demoapp_dst_pkg, 'data')
os.mkdir(data_dst_dir)
copyfile(os.path.join(data_src_dir, 'hello_world.txt'),
os.path.join(data_dst_dir, 'hello_world.txt'))
git('add', os.path.join(data_dst_dir, 'hello_world.txt'))
git('commit', '-m', 'Added basic application logic')
def build_demoapp(dist, path=None, demoapp='demoapp'):
if path is None:
path = os.getcwd()
path = os.path.join(path, demoapp)
with chdir(path):
setup_py(dist)
@contextmanager
def installed_demoapp(dist=None, path=None, demoapp='demoapp'):
check_clean_venv()
if path is None:
path = os.getcwd()
path = os.path.join(path, demoapp, "dist", "{}*".format(demoapp))
if dist == 'bdist':
with chdir('/'):
output = untar(path)
install_dirs = list()
install_bin = None
for line in output:
if re.search(r".*/site-packages/{}.*?/$".format(demoapp), line):
install_dirs.append(line)
if re.search(r".*/bin/{}$".format(demoapp), line):
install_bin = line
elif dist == 'install':
with chdir(demoapp):
setup_py('install')
else:
pip("install", path)
try:
yield venv_cmd(demoapp)
finally:
if dist == 'bdist':
with chdir('/'):
os.remove(install_bin)
for path in install_dirs:
rmtree(path, ignore_errors=True)
else:
pip("uninstall", "-y", demoapp)
def check_version(output, exp_version, dirty=False):
version = output.split(' ')[-1]
# for some setuptools version a directory with + is generated, sometimes _
if dirty:
if '+' in version:
ver, local = version.split('+')
else:
ver, local = version.split('_')
assert local.endswith('dirty')
assert ver == exp_version
else:
if '+' in version:
ver = version.split('+')
else:
ver = version.split('_')
if len(ver) > 1:
assert not ver[1].endswith('dirty')
assert ver[0] == exp_version
def make_dirty_tree(demoapp='demoapp'):
dirty_file = os.path.join('src', demoapp, 'runner.py')
with chdir(demoapp):
with open(dirty_file, 'a') as fh:
fh.write("\n\ndirty_variable = 69\n")
def make_commit(demoapp='demoapp'):
with chdir(demoapp):
git('commit', '-a', '-m', 'message')
def rm_git_tree(demoapp='demoapp'):
git_path = os.path.join(demoapp, '.git')
shutil.rmtree(git_path)
def test_sdist_install(tmpfolder):
create_demoapp()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
def test_sdist_install_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v0.1', 'first tag')
make_dirty_tree()
make_commit()
make_dirty_tree()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.1.post0.dev1"
check_version(out, exp, dirty=True)
def test_sdist_install_with_1_0_tag(tmpfolder):
create_demoapp()
make_dirty_tree()
make_commit()
add_tag('demoapp', 'v1.0', 'final release')
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "1.0"
check_version(out, exp, dirty=False)
def test_sdist_install_with_1_0_tag_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
make_dirty_tree()
build_demoapp('sdist')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "1.0"
check_version(out, exp, dirty=True)
# bdist works like sdist so we only try one combination
def test_bdist_install(tmpfolder):
create_demoapp()
build_demoapp('bdist')
with installed_demoapp('bdist') as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
# bdist wheel works like sdist so we only try one combination
@pytest.mark.skipif(not is_inside_venv(),
reason='Needs to run in a virtualenv')
def test_bdist_wheel_install(tmpfolder):
create_demoapp()
build_demoapp('bdist_wheel')
with installed_demoapp() as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
def test_git_repo(tmpfolder):
create_demoapp()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '0.0.post0.dev2'
check_version(out, exp, dirty=False)
def test_git_repo_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v0.1', 'first tag')
make_dirty_tree()
make_commit()
make_dirty_tree()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '0.1.post0.dev1'
check_version(out, exp, dirty=True)
def test_git_repo_with_1_0_tag(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '1.0'
check_version(out, exp, dirty=False)
def test_git_repo_with_1_0_tag_dirty(tmpfolder):
create_demoapp()
add_tag('demoapp', 'v1.0', 'final release')
make_dirty_tree()
with installed_demoapp('install'), chdir('demoapp'):
out = next(setup_py('--version'))
exp = '1.0'
check_version(out, exp, dirty=True)
def test_sdist_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('sdist', demoapp='demoapp_data')
with installed_demoapp(demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
def test_bdist_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('bdist', demoapp='demoapp_data')
with installed_demoapp('bdist', demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
@pytest.mark.skipif(not is_inside_venv(),
reason='Needs to run in a virtualenv')
def test_bdist_wheel_install_with_data(tmpfolder):
create_demoapp(data=True)
build_demoapp('bdist_wheel', demoapp='demoapp_data')
with installed_demoapp(demoapp='demoapp_data') as demoapp_data:
out = next(demoapp_data())
exp = "Hello World"
assert out.startswith(exp)
def test_setup_py_install(tmpfolder):
create_demoapp()
with installed_demoapp('install', demoapp='demoapp') as demoapp:
out = next(demoapp('--version'))
exp = "0.0.post0.dev2"
check_version(out, exp, dirty=False)
|
the-stack_0_2392 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import setuptools
name = "google-cloud-automl"
description = "Cloud AutoML API client library"
version = "0.2.0"
release_status = "Development Status :: 3 - Alpha"
dependencies = [
"google-api-core[grpc] >= 1.6.0, < 2.0.0dev",
'enum34; python_version < "3.4"',
]
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, "README.rst")
with io.open(readme_filename, encoding="utf-8") as readme_file:
readme = readme_file.read()
packages = [
package for package in setuptools.find_packages() if package.startswith("google")
]
namespaces = ["google"]
if "google.cloud" in packages:
namespaces.append("google.cloud")
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author="Google LLC",
author_email="[email protected]",
license="Apache 2.0",
url="https://github.com/GoogleCloudPlatform/google-cloud-python",
classifiers=[
release_status,
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
"Topic :: Internet",
],
platforms="Posix; MacOS X; Windows",
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*",
include_package_data=True,
zip_safe=False,
)
|
the-stack_0_2394 | # coding: utf8
from __future__ import unicode_literals
from ...attrs import LIKE_NUM
_num_words = ['zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete',
'oito', 'nove', 'dez', 'onze', 'doze', 'treze', 'catorze',
'quinze', 'dezesseis', 'dezasseis', 'dezessete', 'dezassete', 'dezoito', 'dezenove', 'dezanove', 'vinte',
'trinta', 'quarenta', 'cinquenta', 'sessenta', 'setenta',
'oitenta', 'noventa', 'cem', 'mil', 'milhão', 'bilhão', 'bilião', 'trilhão', 'trilião',
'quatrilhão']
_ordinal_words = ['primeiro', 'segundo', 'terceiro', 'quarto', 'quinto', 'sexto',
'sétimo', 'oitavo', 'nono', 'décimo', 'vigésimo', 'trigésimo',
'quadragésimo', 'quinquagésimo', 'sexagésimo', 'septuagésimo',
'octogésimo', 'nonagésimo', 'centésimo', 'ducentésimo',
'trecentésimo', 'quadringentésimo', 'quingentésimo', 'sexcentésimo',
'septingentésimo', 'octingentésimo', 'nongentésimo', 'milésimo',
'milionésimo', 'bilionésimo']
def like_num(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if num.isdigit() and denom.isdigit():
return True
if text.lower() in _num_words:
return True
if text.lower() in _ordinal_words:
return True
return False
LEX_ATTRS = {
LIKE_NUM: like_num
}
|
the-stack_0_2397 | #!/usr/bin/env python3
# coding=utf-8
"""
Parser that uses the ENTSOE API to return the following data types.
Consumption
Production
Exchanges
Exchange Forecast
Day-ahead Price
Generation Forecast
Consumption Forecast
"""
import itertools
import numpy as np
from bs4 import BeautifulSoup
from collections import defaultdict
import arrow
import logging, os, re
import requests
import pandas as pd
from .lib.validation import validate
from .lib.utils import sum_production_dicts, get_token
ENTSOE_ENDPOINT = 'https://transparency.entsoe.eu/api'
ENTSOE_PARAMETER_DESC = {
'B01': 'Biomass',
'B02': 'Fossil Brown coal/Lignite',
'B03': 'Fossil Coal-derived gas',
'B04': 'Fossil Gas',
'B05': 'Fossil Hard coal',
'B06': 'Fossil Oil',
'B07': 'Fossil Oil shale',
'B08': 'Fossil Peat',
'B09': 'Geothermal',
'B10': 'Hydro Pumped Storage',
'B11': 'Hydro Run-of-river and poundage',
'B12': 'Hydro Water Reservoir',
'B13': 'Marine',
'B14': 'Nuclear',
'B15': 'Other renewable',
'B16': 'Solar',
'B17': 'Waste',
'B18': 'Wind Offshore',
'B19': 'Wind Onshore',
'B20': 'Other',
}
ENTSOE_PARAMETER_BY_DESC = {v: k for k, v in ENTSOE_PARAMETER_DESC.items()}
ENTSOE_PARAMETER_GROUPS = {
'production': {
'biomass': ['B01', 'B17'],
'coal': ['B02', 'B05', 'B07', 'B08'],
'gas': ['B03', 'B04'],
'geothermal': ['B09'],
'hydro': ['B11', 'B12'],
'nuclear': ['B14'],
'oil': ['B06'],
'solar': ['B16'],
'wind': ['B18', 'B19'],
'unknown': ['B20', 'B13', 'B15']
},
'storage': {
'hydro storage': ['B10']
}
}
ENTSOE_PARAMETER_BY_GROUP = {v: k for k, g in ENTSOE_PARAMETER_GROUPS.items() for v in g}
# Get all the individual storage parameters in one list
ENTSOE_STORAGE_PARAMETERS = list(itertools.chain.from_iterable(
ENTSOE_PARAMETER_GROUPS['storage'].values()))
# Define all ENTSOE zone_key <-> domain mapping
# see https://transparency.entsoe.eu/content/static_content/Static%20content/web%20api/Guide.html
ENTSOE_DOMAIN_MAPPINGS = {
'AL': '10YAL-KESH-----5',
'AT': '10YAT-APG------L',
'AX': '10Y1001A1001A46L', # for price only; Åland has SE-SE3 area price
'BA': '10YBA-JPCC-----D',
'BE': '10YBE----------2',
'BG': '10YCA-BULGARIA-R',
'BY': '10Y1001A1001A51S',
'CH': '10YCH-SWISSGRIDZ',
'CZ': '10YCZ-CEPS-----N',
'DE': '10Y1001A1001A83F',
'DE-LU': '10Y1001A1001A82H',
'DK': '10Y1001A1001A65H',
'DK-DK1': '10YDK-1--------W',
'DK-DK2': '10YDK-2--------M',
'EE': '10Y1001A1001A39I',
'ES': '10YES-REE------0',
'FI': '10YFI-1--------U',
'FR': '10YFR-RTE------C',
'GB': '10YGB----------A',
'GB-NIR': '10Y1001A1001A016',
'GR': '10YGR-HTSO-----Y',
'HR': '10YHR-HEP------M',
'HU': '10YHU-MAVIR----U',
'IE': '10YIE-1001A00010',
'IT': '10YIT-GRTN-----B',
'IT-BR': '10Y1001A1001A699',
'IT-CA': '10Y1001C--00096J',
'IT-CNO': '10Y1001A1001A70O',
'IT-CSO': '10Y1001A1001A71M',
'IT-FO': '10Y1001A1001A72K',
'IT-NO': '10Y1001A1001A73I',
'IT-PR': '10Y1001A1001A76C',
'IT-SAR': '10Y1001A1001A74G',
'IT-SIC': '10Y1001A1001A75E',
'IT-SO': '10Y1001A1001A788',
'LT': '10YLT-1001A0008Q',
'LU': '10YLU-CEGEDEL-NQ',
'LV': '10YLV-1001A00074',
# 'MD': 'MD',
'ME': '10YCS-CG-TSO---S',
'MK': '10YMK-MEPSO----8',
'MT': '10Y1001A1001A93C',
'NL': '10YNL----------L',
'NO': '10YNO-0--------C',
'NO-NO1': '10YNO-1--------2',
'NO-NO2': '10YNO-2--------T',
'NO-NO3': '10YNO-3--------J',
'NO-NO4': '10YNO-4--------9',
'NO-NO5': '10Y1001A1001A48H',
'PL': '10YPL-AREA-----S',
'PT': '10YPT-REN------W',
'RO': '10YRO-TEL------P',
'RS': '10YCS-SERBIATSOV',
'RU': '10Y1001A1001A49F',
'RU-KGD': '10Y1001A1001A50U',
'SE': '10YSE-1--------K',
'SE-SE1': '10Y1001A1001A44P',
'SE-SE2': '10Y1001A1001A45N',
'SE-SE3': '10Y1001A1001A46L',
'SE-SE4': '10Y1001A1001A47J',
'SI': '10YSI-ELES-----O',
'SK': '10YSK-SEPS-----K',
'TR': '10YTR-TEIAS----W',
'UA': '10YUA-WEPS-----0'
}
# Generation per unit can only be obtained at EIC (Control Area) level
ENTSOE_EIC_MAPPING = {
'DK-DK1': '10Y1001A1001A796',
'DK-DK2': '10Y1001A1001A796',
'FI': '10YFI-1--------U',
'PL': '10YPL-AREA-----S',
'SE': '10YSE-1--------K',
# TODO: ADD DE
}
# Some exchanges require specific domains
ENTSOE_EXCHANGE_DOMAIN_OVERRIDE = {
'AT->IT-NO': [ENTSOE_DOMAIN_MAPPINGS['AT'], ENTSOE_DOMAIN_MAPPINGS['IT']],
'BY->UA': [ENTSOE_DOMAIN_MAPPINGS['BY'], '10Y1001C--00003F'],
'DE->DK-DK1': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['DK-DK1']],
'DE->DK-DK2': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['DK-DK2']],
'DE->SE-SE4': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE4']],
'DK-DK2->SE': [ENTSOE_DOMAIN_MAPPINGS['DK-DK2'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE4']],
'DE->NO-NO2': [ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
ENTSOE_DOMAIN_MAPPINGS['NO-NO2']],
'FR-COR->IT-CNO': ['10Y1001A1001A893', ENTSOE_DOMAIN_MAPPINGS['IT-CNO']],
'GR->IT-SO': [ENTSOE_DOMAIN_MAPPINGS['GR'],
ENTSOE_DOMAIN_MAPPINGS['IT-SO']],
'IT-CSO->ME': [ENTSOE_DOMAIN_MAPPINGS['IT'],
ENTSOE_DOMAIN_MAPPINGS['ME']],
'NO-NO3->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO3'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE2']],
'NO-NO4->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO4'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE2']],
'NO-NO1->SE': [ENTSOE_DOMAIN_MAPPINGS['NO-NO1'],
ENTSOE_DOMAIN_MAPPINGS['SE-SE3']],
'PL->UA': [ENTSOE_DOMAIN_MAPPINGS['PL'], '10Y1001A1001A869'],
'IT-SIC->IT-SO': [ENTSOE_DOMAIN_MAPPINGS['IT-SIC'], ENTSOE_DOMAIN_MAPPINGS['IT-CA']],
}
# Some zone_keys are part of bidding zone domains for price data
ENTSOE_PRICE_DOMAIN_OVERRIDE = {
'DK-BHM': ENTSOE_DOMAIN_MAPPINGS['DK-DK2'],
'DE': ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
'IE': '10Y1001A1001A59C',
'LU': ENTSOE_DOMAIN_MAPPINGS['DE-LU'],
}
ENTSOE_UNITS_TO_ZONE = {
# DK-DK1
'Anholt': 'DK-DK1',
'Esbjergvaerket 3': 'DK-DK1',
'Fynsvaerket 7': 'DK-DK1',
'Horns Rev A': 'DK-DK1',
'Horns Rev B': 'DK-DK1',
'Nordjyllandsvaerket 3': 'DK-DK1',
'Silkeborgvaerket': 'DK-DK1',
'Skaerbaekvaerket 3': 'DK-DK1',
'Studstrupvaerket 3': 'DK-DK1',
'Studstrupvaerket 4': 'DK-DK1',
# DK-DK2
'Amagervaerket 3': 'DK-DK2',
'Asnaesvaerket 2': 'DK-DK2',
'Asnaesvaerket 5': 'DK-DK2',
'Avedoerevaerket 1': 'DK-DK2',
'Avedoerevaerket 2': 'DK-DK2',
'Kyndbyvaerket 21': 'DK-DK2',
'Kyndbyvaerket 22': 'DK-DK2',
'Roedsand 1': 'DK-DK2',
'Roedsand 2': 'DK-DK2',
# FI
'Alholmens B2': 'FI',
'Haapavesi B1': 'FI',
'Kaukaan Voima G10': 'FI',
'Keljonlahti B1': 'FI',
'Loviisa 1 G11': 'FI',
'Loviisa 1 G12': 'FI',
'Loviisa 2 G21': 'FI',
'Loviisa 2 G22': 'FI',
'Olkiluoto 1 B1': 'FI',
'Olkiluoto 2 B2': 'FI',
'Toppila B2': 'FI',
# SE
'Bastusel G1': 'SE',
'Forsmark block 1 G11': 'SE',
'Forsmark block 1 G12': 'SE',
'Forsmark block 2 G21': 'SE',
'Forsmark block 2 G22': 'SE',
'Forsmark block 3 G31': 'SE',
'Gallejaur G1': 'SE',
'Gallejaur G2': 'SE',
'Gasturbiner Halmstad G12': 'SE',
'Harsprånget G1': 'SE',
'Harsprånget G2': 'SE',
'Harsprånget G4': 'SE',
'Harsprånget G5': 'SE',
'KVV Västerås G3': 'SE',
'KVV1 Värtaverket': 'SE',
'KVV6 Värtaverket ': 'SE',
'KVV8 Värtaverket': 'SE',
'Karlshamn G1': 'SE',
'Karlshamn G2': 'SE',
'Karlshamn G3': 'SE',
'Letsi G1': 'SE',
'Letsi G2': 'SE',
'Letsi G3': 'SE',
'Ligga G3': 'SE',
'Messaure G1': 'SE',
'Messaure G2': 'SE',
'Messaure G3': 'SE',
'Oskarshamn G1Ö+G1V': 'SE',
'Oskarshamn G3': 'SE',
'Porjus G11': 'SE',
'Porjus G12': 'SE',
'Porsi G3': 'SE',
'Ringhals block 1 G11': 'SE',
'Ringhals block 1 G12': 'SE',
'Ringhals block 2 G21': 'SE',
'Ringhals block 2 G22': 'SE',
'Ringhals block 3 G31': 'SE',
'Ringhals block 3 G32': 'SE',
'Ringhals block 4 G41': 'SE',
'Ringhals block 4 G42': 'SE',
'Ritsem G1': 'SE',
'Rya KVV': 'SE',
'Seitevare G1': 'SE',
'Stalon G1': 'SE',
'Stenungsund B3': 'SE',
'Stenungsund B4': 'SE',
'Stornorrfors G1': 'SE',
'Stornorrfors G2': 'SE',
'Stornorrfors G3': 'SE',
'Stornorrfors G4': 'SE',
'Trängslet G1': 'SE',
'Trängslet G2': 'SE',
'Trängslet G3': 'SE',
'Uppsala KVV': 'SE',
'Vietas G1': 'SE',
'Vietas G2': 'SE',
'Ã
byverket Ãrebro': 'SE',
}
VALIDATIONS = {
# This is a list of criteria to ensure validity of data,
# used in validate_production()
# Note that "required" means data is present in ENTSOE.
# It will still work if data is present but 0.
# "expected_range" and "floor" only count production and storage
# - not exchanges!
'AT': {
'required': ['hydro'],
},
'BE': {
'required': ['gas', 'nuclear'],
'expected_range': (3000, 25000),
},
'BG': {
'required': ['coal', 'nuclear', 'hydro'],
'expected_range': (2000, 20000),
},
'CH': {
'required': ['hydro', 'nuclear'],
'expected_range': (2000, 25000),
},
'CZ': {
# usual load is in 7-12 GW range
'required': ['coal', 'nuclear'],
'expected_range': (3000, 25000),
},
'DE': {
# Germany sometimes has problems with categories of generation missing from ENTSOE.
# Normally there is constant production of a few GW from hydro and biomass
# and when those are missing this can indicate that others are missing as well.
# We have also never seen unknown being 0.
# Usual load is in 30 to 80 GW range.
'required': ['coal', 'gas', 'nuclear', 'wind',
'biomass', 'hydro', 'unknown', 'solar'],
'expected_range': (20000, 100000),
},
'EE': {
'required': ['coal'],
},
'ES': {
'required': ['coal', 'nuclear'],
'expected_range': (10000, 80000),
},
'FI': {
'required': ['coal', 'nuclear', 'hydro', 'biomass'],
'expected_range': (2000, 20000),
},
'GB': {
# usual load is in 15 to 50 GW range
'required': ['coal', 'gas', 'nuclear'],
'expected_range': (10000, 80000),
},
'GR': {
'required': ['coal', 'gas'],
'expected_range': (2000, 20000),
},
'HU': {
'required': ['coal', 'nuclear'],
},
'IE': {
'required': ['coal'],
'expected_range': (1000, 15000),
},
'IT': {
'required': ['coal'],
'expected_range': (5000, 50000),
},
'PL': {
# usual load is in 10-20 GW range and coal is always present
'required': ['coal'],
'expected_range': (5000, 35000),
},
'PT': {
'required': ['coal', 'gas'],
'expected_range': (1000, 20000),
},
'RO': {
'required': ['coal', 'nuclear', 'hydro'],
'expected_range': (2000, 25000),
},
'RS': {
'required': ['coal'],
},
'SI': {
# own total generation capacity is around 4 GW
'required': ['nuclear'],
'expected_range': (1000, 5000),
},
'SK': {
'required': ['nuclear']
},
}
class QueryError(Exception):
"""Raised when a query to ENTSOE returns no matching data."""
def closest_in_time_key(x, target_datetime, datetime_key='datetime'):
target_datetime = arrow.get(target_datetime)
return np.abs((x[datetime_key] - target_datetime).seconds)
def check_response(response, function_name):
"""
Searches for an error message in response if the query to ENTSOE fails.
Returns a QueryError message containing function name and reason for failure.
"""
soup = BeautifulSoup(response.text, 'html.parser')
text = soup.find_all('text')
if not response.ok:
if len(text):
error_text = soup.find_all('text')[0].prettify()
if 'No matching data found' in error_text:
return
raise QueryError('{0} failed in ENTSOE.py. Reason: {1}'.format(function_name, error_text))
else:
raise QueryError('{0} failed in ENTSOE.py. Reason: {1}'.format(function_name, response.text))
def query_ENTSOE(session, params, target_datetime=None, span=(-48, 24)):
"""
Makes a standard query to the ENTSOE API with a modifiable set of parameters.
Allows an existing session to be passed.
Raises an exception if no API token is found.
Returns a request object.
"""
if target_datetime is None:
target_datetime = arrow.utcnow()
else:
# make sure we have an arrow object
target_datetime = arrow.get(target_datetime)
params['periodStart'] = target_datetime.shift(hours=span[0]).format('YYYYMMDDHH00')
params['periodEnd'] = target_datetime.shift(hours=span[1]).format('YYYYMMDDHH00')
# Due to rate limiting, we need to spread our requests across different tokens
tokens = get_token('ENTSOE_TOKEN').split(',')
params['securityToken'] = np.random.choice(tokens)
return session.get(ENTSOE_ENDPOINT, params=params)
def query_consumption(domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A65',
'processType': 'A16',
'outBiddingZone_Domain': domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_consumption.__name__)
def query_production(in_domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A75',
'processType': 'A16', # Realised
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime, span=(-48, 0))
if response.ok:
return response.text
else:
check_response(response, query_production.__name__)
def query_production_per_units(psr_type, domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A73',
'processType': 'A16',
'psrType': psr_type,
'in_Domain': domain,
}
# Note: ENTSOE only supports 1d queries for this type
response = query_ENTSOE(session, params, target_datetime, span=(-24, 0))
if response.ok:
return response.text
else:
check_response(response, query_production_per_units.__name__)
def query_exchange(in_domain, out_domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A11',
'in_Domain': in_domain,
'out_Domain': out_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_exchange.__name__)
def query_exchange_forecast(in_domain, out_domain, session, target_datetime=None):
"""
Gets exchange forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A09', # Finalised schedule
'in_Domain': in_domain,
'out_Domain': out_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_exchange_forecast.__name__)
def query_price(domain, session, target_datetime=None):
"""Returns a string object if the query succeeds."""
params = {
'documentType': 'A44',
'in_Domain': domain,
'out_Domain': domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_price.__name__)
def query_generation_forecast(in_domain, session, target_datetime=None):
"""
Gets generation forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
# Note: this does not give a breakdown of the production
params = {
'documentType': 'A71', # Generation Forecast
'processType': 'A01', # Realised
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def query_consumption_forecast(in_domain, session, target_datetime=None):
"""
Gets consumption forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A65', # Load Forecast
'processType': 'A01',
'outBiddingZone_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def query_wind_solar_production_forecast(in_domain, session, target_datetime=None):
"""
Gets consumption forecast for 48 hours ahead and previous 24 hours.
Returns a string object if the query succeeds.
"""
params = {
'documentType': 'A69', # Forecast
'processType': 'A01',
'in_Domain': in_domain,
}
response = query_ENTSOE(session, params, target_datetime=target_datetime)
if response.ok:
return response.text
else:
check_response(response, query_generation_forecast.__name__)
def datetime_from_position(start, position, resolution):
"""Finds time granularity of data."""
m = re.search(r'PT(\d+)([M])', resolution)
if m:
digits = int(m.group(1))
scale = m.group(2)
if scale == 'M':
return start.shift(minutes=(position - 1) * digits)
raise NotImplementedError('Could not recognise resolution %s' % resolution)
def parse_scalar(xml_text, only_inBiddingZone_Domain=False, only_outBiddingZone_Domain=False):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
values = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
if only_inBiddingZone_Domain:
if not len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())):
continue
elif only_outBiddingZone_Domain:
if not len(timeseries.find_all('outBiddingZone_Domain.mRID'.lower())):
continue
for entry in timeseries.find_all('point'):
position = int(entry.find_all('position')[0].contents[0])
value = float(entry.find_all('quantity')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
values.append(value)
datetimes.append(datetime)
return values, datetimes
def parse_production(xml_text):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
productions = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
is_production = len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())) > 0
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
try:
i = datetimes.index(datetime)
if is_production:
productions[i][psr_type] += quantity
elif psr_type in ENTSOE_STORAGE_PARAMETERS:
# Only include consumption if it's for storage. In other cases
# it is power plant self-consumption which should be ignored.
productions[i][psr_type] -= quantity
except ValueError: # Not in list
datetimes.append(datetime)
productions.append(defaultdict(lambda: 0))
productions[-1][psr_type] = quantity if is_production else -1 * quantity
return productions, datetimes
def parse_self_consumption(xml_text):
"""
Parses the XML text and returns a dict of datetimes to the total self-consumption
value from all sources.
Self-consumption is the electricity used by a generation source.
This is defined as any consumption source (i.e. outBiddingZone_Domain.mRID)
that is not storage, e.g. consumption for B04 (Fossil Gas) is counted as
self-consumption, but consumption for B10 (Hydro Pumped Storage) is not.
In most cases, total self-consumption is reported by ENTSOE as 0, therefore the returned
dict only includes datetimes where the value > 0.
"""
if not xml_text: return None
soup = BeautifulSoup(xml_text, 'html.parser')
res = {}
for timeseries in soup.find_all('timeseries'):
is_consumption = len(timeseries.find_all('outBiddingZone_Domain.mRID'.lower())) > 0
if not is_consumption: continue
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
if psr_type in ENTSOE_STORAGE_PARAMETERS: continue
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
if quantity == 0: continue
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
res[datetime] = res[datetime] + quantity if datetime in res else quantity
return res
def parse_production_per_units(xml_text):
"""Returns a dict indexed by the (datetime, unit_key) key"""
values = {}
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
is_production = len(timeseries.find_all('inBiddingZone_Domain.mRID'.lower())) > 0
psr_type = timeseries.find_all('mktpsrtype')[0].find_all('psrtype')[0].contents[0]
unit_key = timeseries.find_all('mktpsrtype')[0].find_all(
'powersystemresources')[0].find_all('mrid')[0].contents[0]
unit_name = timeseries.find_all('mktpsrtype')[0].find_all(
'powersystemresources')[0].find_all('name')[0].contents[0]
if not is_production: continue
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
key = (unit_key, datetime)
if key in values:
if is_production:
values[key]['production'] += quantity
else:
values[key]['production'] -= quantity
else:
values[key] = {
'datetime': datetime,
'production': quantity,
'productionType': ENTSOE_PARAMETER_BY_GROUP[psr_type],
'unitKey': unit_key,
'unitName': unit_name
}
return values.values()
def parse_exchange(xml_text, is_import, quantities=None, datetimes=None):
"""Returns a tuple containing two lists."""
if not xml_text:
return None
quantities = quantities or []
datetimes = datetimes or []
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
for timeseries in soup.find_all('timeseries'):
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
# Only use contract_marketagreement.type == A01 (Total to avoid double counting some columns)
if timeseries.find_all('contract_marketagreement.type') and \
timeseries.find_all('contract_marketagreement.type')[0].contents[0] != 'A05':
continue
for entry in timeseries.find_all('point'):
quantity = float(entry.find_all('quantity')[0].contents[0])
if not is_import:
quantity *= -1
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
# Find out whether or not we should update the net production
try:
i = datetimes.index(datetime)
quantities[i] += quantity
except ValueError: # Not in list
quantities.append(quantity)
datetimes.append(datetime)
return quantities, datetimes
def parse_price(xml_text):
"""Returns a tuple containing three lists."""
if not xml_text:
return None
soup = BeautifulSoup(xml_text, 'html.parser')
# Get all points
prices = []
currencies = []
datetimes = []
for timeseries in soup.find_all('timeseries'):
currency = timeseries.find_all('currency_unit.name')[0].contents[0]
resolution = timeseries.find_all('resolution')[0].contents[0]
datetime_start = arrow.get(timeseries.find_all('start')[0].contents[0])
for entry in timeseries.find_all('point'):
position = int(entry.find_all('position')[0].contents[0])
datetime = datetime_from_position(datetime_start, position, resolution)
prices.append(float(entry.find_all('price.amount')[0].contents[0]))
datetimes.append(datetime)
currencies.append(currency)
return prices, currencies, datetimes
def validate_production(datapoint, logger):
"""
Production data can sometimes be available but clearly wrong.
The most common occurrence is when the production total is very low and
main generation types are missing. In reality a country's electrical grid
could not function in this scenario.
This function checks datapoints for a selection of countries and returns
False if invalid and True otherwise.
"""
zone_key = datapoint['zoneKey']
validation_criteria = VALIDATIONS.get(zone_key, {})
if validation_criteria:
return validate(datapoint, logger=logger, **validation_criteria)
if zone_key.startswith('DK-'):
return validate(datapoint, logger=logger, required=['coal', 'solar', 'wind'])
if zone_key.startswith('NO-'):
return validate(datapoint, logger=logger, required=['hydro'])
return True
def get_wind(values):
if 'Wind Onshore' in values or 'Wind Offshore' in values:
return values.get('Wind Onshore', 0) + values.get('Wind Offshore', 0)
def fetch_consumption(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""Gets consumption for a specified zone, returns a dictionary."""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(
query_consumption(domain, session, target_datetime=target_datetime),
only_outBiddingZone_Domain=True)
if parsed:
quantities, datetimes = parsed
# Add power plant self-consumption data. This is reported as part of the
# production data by ENTSOE.
# self_consumption is a dict of datetimes to the total self-consumption value
# from all sources.
# Only datetimes where the value > 0 are included.
self_consumption = parse_self_consumption(
query_production(domain, session,
target_datetime=target_datetime))
for dt, value in self_consumption.items():
try:
i = datetimes.index(dt)
except ValueError:
logger.warning(
f'No corresponding consumption value found for self-consumption at {dt}')
continue
quantities[i] += value
# if a target_datetime was requested, we return everything
if target_datetime:
return [{
'zoneKey': zone_key,
'datetime': dt.datetime,
'consumption': quantity,
'source': 'entsoe.eu'
} for dt, quantity in zip(datetimes, quantities)]
# else we keep the last stored value
# Note, this may not include self-consumption data as sometimes consumption
# data is available for a given TZ a few minutes before production data is.
dt, quantity = datetimes[-1].datetime, quantities[-1]
if dt not in self_consumption:
logger.warning(f'Self-consumption data not yet available for {zone_key} at {dt}')
data = {
'zoneKey': zone_key,
'datetime': dt,
'consumption': quantity,
'source': 'entsoe.eu'
}
return data
def fetch_production(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets values and corresponding datetimes for all production types in the
specified zone. Removes any values that are in the future or don't have
a datetime associated with them.
Returns a list of dictionaries that have been validated.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab production
parsed = parse_production(
query_production(domain, session,
target_datetime=target_datetime))
if not parsed:
return None
productions, production_dates = parsed
data = []
for i in range(len(production_dates)):
production_values = {k: v for k, v in productions[i].items()}
production_date = production_dates[i]
production_types = {'production': {}, 'storage': {}}
for key in ['production', 'storage']:
parameter_groups = ENTSOE_PARAMETER_GROUPS[key]
multiplier = -1 if key == 'storage' else 1
for fuel, groups in parameter_groups.items():
has_value = any([production_values.get(grp) is not None for grp in groups])
if has_value:
value = sum([production_values.get(grp, 0) for grp in groups])
value *= multiplier
else:
value = None
production_types[key][fuel] = value
data.append({
'zoneKey': zone_key,
'datetime': production_date.datetime,
'production': production_types['production'],
'storage': {
'hydro': production_types['storage']['hydro storage'],
},
'source': 'entsoe.eu'
})
for d in data:
for k, v in d['production'].items():
if v is None: continue
if v < 0 and v > -50:
# Set small negative values to 0
logger.warning('Setting small value of %s (%s) to 0.' % (k, v),
extra={'key': zone_key})
d['production'][k] = 0
return list(filter(lambda x: validate_production(x, logger), data))
ZONE_KEY_AGGREGATES = {
'IT-SO': ['IT-CA', 'IT-SO'],
}
# TODO: generalize and move to lib.utils so other parsers can reuse it. (it's
# currently used by US_SEC.)
def merge_production_outputs(parser_outputs, merge_zone_key, merge_source=None):
"""
Given multiple parser outputs, sum the production and storage
of corresponding datetimes to create a production list.
This will drop rows where the datetime is missing in at least a
parser_output.
"""
if len(parser_outputs) == 0:
return []
if merge_source is None:
merge_source = parser_outputs[0][0]['source']
prod_and_storage_dfs = [
pd.DataFrame(output).set_index('datetime')[['production', 'storage']]
for output in parser_outputs
]
to_return = prod_and_storage_dfs[0]
for prod_and_storage in prod_and_storage_dfs[1:]:
# `inner` join drops rows where one of the production is missing
to_return = to_return.join(
prod_and_storage, how='inner', rsuffix='_other')
to_return['production'] = to_return.apply(
lambda row: sum_production_dicts(row.production,
row.production_other),
axis=1)
to_return['storage'] = to_return.apply(
lambda row: sum_production_dicts(row.storage, row.storage_other),
axis=1)
to_return = to_return[['production', 'storage']]
return [{
'datetime': dt.to_pydatetime(),
'production': row.production,
'storage': row.storage,
'source': merge_source,
'zoneKey': merge_zone_key,
} for dt, row in to_return.iterrows()]
def fetch_production_aggregate(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
if zone_key not in ZONE_KEY_AGGREGATES:
raise ValueError('Unknown aggregate key %s' % zone_key)
return merge_production_outputs(
[fetch_production(k, session, target_datetime, logger)
for k in ZONE_KEY_AGGREGATES[zone_key]],
zone_key)
def fetch_production_per_units(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Returns a list of all production units and production values as a list
of dictionaries
"""
if not session:
session = requests.session()
domain = ENTSOE_EIC_MAPPING[zone_key]
data = []
# Iterate over all psr types
for k in ENTSOE_PARAMETER_DESC.keys():
try:
values = parse_production_per_units(
query_production_per_units(k, domain, session, target_datetime)) or []
for v in values:
if not v:
continue
v['datetime'] = v['datetime'].datetime
v['source'] = 'entsoe.eu'
if not v['unitName'] in ENTSOE_UNITS_TO_ZONE:
logger.warning('Unknown unit %s with id %s' % (v['unitName'], v['unitKey']))
else:
v['zoneKey'] = ENTSOE_UNITS_TO_ZONE[v['unitName']]
if v['zoneKey'] == zone_key:
data.append(v)
except QueryError:
pass
return data
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets exchange status between two specified zones.
Removes any datapoints that are in the future.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
sorted_zone_keys = sorted([zone_key1, zone_key2])
key = '->'.join(sorted_zone_keys)
if key in ENTSOE_EXCHANGE_DOMAIN_OVERRIDE:
domain1, domain2 = ENTSOE_EXCHANGE_DOMAIN_OVERRIDE[key]
else:
domain1 = ENTSOE_DOMAIN_MAPPINGS[zone_key1]
domain2 = ENTSOE_DOMAIN_MAPPINGS[zone_key2]
# Create a hashmap with key (datetime)
exchange_hashmap = {}
# Grab exchange
# Import
parsed = parse_exchange(
query_exchange(domain1, domain2, session, target_datetime=target_datetime),
is_import=True)
if parsed:
# Export
parsed = parse_exchange(
xml_text=query_exchange(domain2, domain1, session, target_datetime=target_datetime),
is_import=False, quantities=parsed[0], datetimes=parsed[1])
if parsed:
quantities, datetimes = parsed
for i in range(len(quantities)):
exchange_hashmap[datetimes[i]] = quantities[i]
# Remove all dates in the future
exchange_dates = sorted(set(exchange_hashmap.keys()), reverse=True)
exchange_dates = list(filter(lambda x: x <= arrow.now(), exchange_dates))
if not len(exchange_dates):
return None
data = []
for exchange_date in exchange_dates:
net_flow = exchange_hashmap[exchange_date]
data.append({
'sortedZoneKeys': key,
'datetime': exchange_date.datetime,
'netFlow': net_flow if zone_key1[0] == sorted_zone_keys else -1 * net_flow,
'source': 'entsoe.eu'
})
return data
def fetch_exchange_forecast(zone_key1, zone_key2, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets exchange forecast between two specified zones.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
sorted_zone_keys = sorted([zone_key1, zone_key2])
key = '->'.join(sorted_zone_keys)
if key in ENTSOE_EXCHANGE_DOMAIN_OVERRIDE:
domain1, domain2 = ENTSOE_EXCHANGE_DOMAIN_OVERRIDE[key]
else:
domain1 = ENTSOE_DOMAIN_MAPPINGS[zone_key1]
domain2 = ENTSOE_DOMAIN_MAPPINGS[zone_key2]
# Create a hashmap with key (datetime)
exchange_hashmap = {}
# Grab exchange
# Import
parsed = parse_exchange(
query_exchange_forecast(domain1, domain2, session, target_datetime=target_datetime),
is_import=True)
if parsed:
# Export
parsed = parse_exchange(
xml_text=query_exchange_forecast(domain2, domain1, session,
target_datetime=target_datetime),
is_import=False, quantities=parsed[0], datetimes=parsed[1])
if parsed:
quantities, datetimes = parsed
for i in range(len(quantities)):
exchange_hashmap[datetimes[i]] = quantities[i]
# Remove all dates in the future
sorted_zone_keys = sorted([zone_key1, zone_key2])
exchange_dates = list(sorted(set(exchange_hashmap.keys()), reverse=True))
if not len(exchange_dates):
return None
data = []
for exchange_date in exchange_dates:
netFlow = exchange_hashmap[exchange_date]
data.append({
'sortedZoneKeys': key,
'datetime': exchange_date.datetime,
'netFlow': netFlow if zone_key1[0] == sorted_zone_keys else -1 * netFlow,
'source': 'entsoe.eu'
})
return data
def fetch_price(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets day-ahead price for specified zone.
Returns a list of dictionaries.
"""
# Note: This is day-ahead prices
if not session:
session = requests.session()
if zone_key in ENTSOE_PRICE_DOMAIN_OVERRIDE:
domain = ENTSOE_PRICE_DOMAIN_OVERRIDE[zone_key]
else:
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_price(query_price(domain, session, target_datetime=target_datetime))
if parsed:
data = []
prices, currencies, datetimes = parsed
for i in range(len(prices)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'currency': currencies[i],
'price': prices[i],
'source': 'entsoe.eu'
})
return data
def fetch_generation_forecast(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets generation forecast for specified zone.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(query_generation_forecast(
domain, session, target_datetime=target_datetime), only_inBiddingZone_Domain=True)
if parsed:
data = []
values, datetimes = parsed
for i in range(len(values)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'value': values[i],
'source': 'entsoe.eu'
})
return data
def fetch_consumption_forecast(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets consumption forecast for specified zone.
Returns a list of dictionaries.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab consumption
parsed = parse_scalar(query_consumption_forecast(
domain, session, target_datetime=target_datetime), only_outBiddingZone_Domain=True)
if parsed:
data = []
values, datetimes = parsed
for i in range(len(values)):
data.append({
'zoneKey': zone_key,
'datetime': datetimes[i].datetime,
'value': values[i],
'source': 'entsoe.eu'
})
return data
def fetch_wind_solar_forecasts(zone_key, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
"""
Gets values and corresponding datetimes for all production types in the
specified zone. Removes any values that are in the future or don't have
a datetime associated with them.
Returns a list of dictionaries that have been validated.
"""
if not session:
session = requests.session()
domain = ENTSOE_DOMAIN_MAPPINGS[zone_key]
# Grab production
parsed = parse_production(
query_wind_solar_production_forecast(domain, session,
target_datetime=target_datetime))
if not parsed:
return None
productions, production_dates = parsed
data = []
for i in range(len(production_dates)):
production_values = {ENTSOE_PARAMETER_DESC[k]: v for k, v in
productions[i].items()}
production_date = production_dates[i]
data.append({
'zoneKey': zone_key,
'datetime': production_date.datetime,
'production': {
'solar': production_values.get('Solar', None),
'wind': get_wind(production_values),
},
'source': 'entsoe.eu'
})
return data
|
the-stack_0_2398 | """Support for ADS covers."""
import logging
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
CONF_ADS_VAR,
CONF_ADS_VAR_POSITION,
DATA_ADS,
STATE_KEY_POSITION,
STATE_KEY_STATE,
AdsEntity,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "ADS Cover"
CONF_ADS_VAR_SET_POS = "adsvar_set_position"
CONF_ADS_VAR_OPEN = "adsvar_open"
CONF_ADS_VAR_CLOSE = "adsvar_close"
CONF_ADS_VAR_STOP = "adsvar_stop"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_ADS_VAR): cv.string,
vol.Optional(CONF_ADS_VAR_POSITION): cv.string,
vol.Optional(CONF_ADS_VAR_SET_POS): cv.string,
vol.Optional(CONF_ADS_VAR_CLOSE): cv.string,
vol.Optional(CONF_ADS_VAR_OPEN): cv.string,
vol.Optional(CONF_ADS_VAR_STOP): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the cover platform for ADS."""
ads_hub = hass.data[DATA_ADS]
ads_var_is_closed = config.get(CONF_ADS_VAR)
ads_var_position = config.get(CONF_ADS_VAR_POSITION)
ads_var_pos_set = config.get(CONF_ADS_VAR_SET_POS)
ads_var_open = config.get(CONF_ADS_VAR_OPEN)
ads_var_close = config.get(CONF_ADS_VAR_CLOSE)
ads_var_stop = config.get(CONF_ADS_VAR_STOP)
name = config[CONF_NAME]
device_class = config.get(CONF_DEVICE_CLASS)
add_entities(
[
AdsCover(
ads_hub,
ads_var_is_closed,
ads_var_position,
ads_var_pos_set,
ads_var_open,
ads_var_close,
ads_var_stop,
name,
device_class,
)
]
)
class AdsCover(AdsEntity, CoverEntity):
"""Representation of ADS cover."""
def __init__(
self,
ads_hub,
ads_var_is_closed,
ads_var_position,
ads_var_pos_set,
ads_var_open,
ads_var_close,
ads_var_stop,
name,
device_class,
):
"""Initialize AdsCover entity."""
super().__init__(ads_hub, name, ads_var_is_closed)
if self._ads_var is None:
if ads_var_position is not None:
self._unique_id = ads_var_position
elif ads_var_pos_set is not None:
self._unique_id = ads_var_pos_set
elif ads_var_open is not None:
self._unique_id = ads_var_open
self._state_dict[STATE_KEY_POSITION] = None
self._ads_var_position = ads_var_position
self._ads_var_pos_set = ads_var_pos_set
self._ads_var_open = ads_var_open
self._ads_var_close = ads_var_close
self._ads_var_stop = ads_var_stop
self._device_class = device_class
async def async_added_to_hass(self):
"""Register device notification."""
if self._ads_var is not None:
await self.async_initialize_device(
self._ads_var, self._ads_hub.PLCTYPE_BOOL
)
if self._ads_var_position is not None:
await self.async_initialize_device(
self._ads_var_position, self._ads_hub.PLCTYPE_BYTE, STATE_KEY_POSITION
)
@property
def device_class(self):
"""Return the class of this cover."""
return self._device_class
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._ads_var is not None:
return self._state_dict[STATE_KEY_STATE]
if self._ads_var_position is not None:
return self._state_dict[STATE_KEY_POSITION] == 0
return None
@property
def current_cover_position(self):
"""Return current position of cover."""
return self._state_dict[STATE_KEY_POSITION]
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE
if self._ads_var_stop is not None:
supported_features |= SUPPORT_STOP
if self._ads_var_pos_set is not None:
supported_features |= SUPPORT_SET_POSITION
return supported_features
def stop_cover(self, **kwargs):
"""Fire the stop action."""
if self._ads_var_stop:
self._ads_hub.write_by_name(
self._ads_var_stop, True, self._ads_hub.PLCTYPE_BOOL
)
def set_cover_position(self, **kwargs):
"""Set cover position."""
position = kwargs[ATTR_POSITION]
if self._ads_var_pos_set is not None:
self._ads_hub.write_by_name(
self._ads_var_pos_set, position, self._ads_hub.PLCTYPE_BYTE
)
def open_cover(self, **kwargs):
"""Move the cover up."""
if self._ads_var_open is not None:
self._ads_hub.write_by_name(
self._ads_var_open, True, self._ads_hub.PLCTYPE_BOOL
)
elif self._ads_var_pos_set is not None:
self.set_cover_position(position=100)
def close_cover(self, **kwargs):
"""Move the cover down."""
if self._ads_var_close is not None:
self._ads_hub.write_by_name(
self._ads_var_close, True, self._ads_hub.PLCTYPE_BOOL
)
elif self._ads_var_pos_set is not None:
self.set_cover_position(position=0)
@property
def available(self):
"""Return False if state has not been updated yet."""
if self._ads_var is not None or self._ads_var_position is not None:
return (
self._state_dict[STATE_KEY_STATE] is not None
or self._state_dict[STATE_KEY_POSITION] is not None
)
return True
|
the-stack_0_2399 | """DataUpdateCoordinator for the Yale integration."""
from __future__ import annotations
from datetime import timedelta
from typing import Any
from yalesmartalarmclient.client import YaleSmartAlarmClient
from yalesmartalarmclient.exceptions import AuthenticationError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN, LOGGER, YALE_BASE_ERRORS
class YaleDataUpdateCoordinator(DataUpdateCoordinator):
"""A Yale Data Update Coordinator."""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize the Yale hub."""
self.entry = entry
self.yale: YaleSmartAlarmClient | None = None
super().__init__(
hass,
LOGGER,
name=DOMAIN,
update_interval=timedelta(seconds=DEFAULT_SCAN_INTERVAL),
)
async def _async_update_data(self) -> dict[str, Any]:
"""Fetch data from Yale."""
updates = await self.hass.async_add_executor_job(self.get_updates)
locks = []
door_windows = []
for device in updates["cycle"]["device_status"]:
state = device["status1"]
if device["type"] == "device_type.door_lock":
lock_status_str = device["minigw_lock_status"]
lock_status = int(str(lock_status_str or 0), 16)
closed = (lock_status & 16) == 16
locked = (lock_status & 1) == 1
if not lock_status and "device_status.lock" in state:
device["_state"] = "locked"
device["_state2"] = "unknown"
locks.append(device)
continue
if not lock_status and "device_status.unlock" in state:
device["_state"] = "unlocked"
device["_state2"] = "unknown"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and closed
and locked
):
device["_state"] = "locked"
device["_state2"] = "closed"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and closed
and not locked
):
device["_state"] = "unlocked"
device["_state2"] = "closed"
locks.append(device)
continue
if (
lock_status
and (
"device_status.lock" in state or "device_status.unlock" in state
)
and not closed
):
device["_state"] = "unlocked"
device["_state2"] = "open"
locks.append(device)
continue
device["_state"] = "unavailable"
locks.append(device)
continue
if device["type"] == "device_type.door_contact":
if "device_status.dc_close" in state:
device["_state"] = "closed"
door_windows.append(device)
continue
if "device_status.dc_open" in state:
device["_state"] = "open"
door_windows.append(device)
continue
device["_state"] = "unavailable"
door_windows.append(device)
continue
_sensor_map = {
contact["address"]: contact["_state"] for contact in door_windows
}
_lock_map = {lock["address"]: lock["_state"] for lock in locks}
return {
"alarm": updates["arm_status"],
"locks": locks,
"door_windows": door_windows,
"status": updates["status"],
"online": updates["online"],
"sensor_map": _sensor_map,
"lock_map": _lock_map,
"panel_info": updates["panel_info"],
}
def get_updates(self) -> dict[str, Any]:
"""Fetch data from Yale."""
if self.yale is None:
try:
self.yale = YaleSmartAlarmClient(
self.entry.data[CONF_USERNAME], self.entry.data[CONF_PASSWORD]
)
except AuthenticationError as error:
raise ConfigEntryAuthFailed from error
except YALE_BASE_ERRORS as error:
raise UpdateFailed from error
try:
arm_status = self.yale.get_armed_status()
data = self.yale.get_all()
cycle = data["CYCLE"]
status = data["STATUS"]
online = data["ONLINE"]
panel_info = data["PANEL INFO"]
except AuthenticationError as error:
raise ConfigEntryAuthFailed from error
except YALE_BASE_ERRORS as error:
raise UpdateFailed from error
return {
"arm_status": arm_status,
"cycle": cycle,
"status": status,
"online": online,
"panel_info": panel_info,
}
|
the-stack_0_2400 | """
weasyprint.layout.percentages
-----------------------------
Resolve percentages into fixed values.
:copyright: Copyright 2011-2018 Simon Sapin and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from ..formatting_structure import boxes
def _percentage(value, refer_to):
"""Get the value corresponding to the value/percentage and the reference
``refer_to`` is the length for 100%. If ``refer_to`` is not a number, it
just replaces percentages.
"""
if value == 'auto':
result = value
elif value.unit == 'px':
result = value.value
else:
assert value.unit == '%'
result = value.value * refer_to / 100.
return result
def resolve_one_percentage(box, property_name, refer_to,
main_flex_direction=None):
"""Set a used length value from a computed length value.
``refer_to`` is the length for 100%. If ``refer_to`` is not a number, it
just replaces percentages.
"""
# box.style has computed values
value = box.style[property_name]
# box attributes are used values
percentage = _percentage(value, refer_to)
setattr(box, property_name, percentage)
if property_name in ('min_width', 'min_height') and percentage == 'auto':
if (main_flex_direction is None or
property_name != ('min_%s' % main_flex_direction)):
setattr(box, property_name, 0)
def resolve_position_percentages(box, containing_block):
cb_width, cb_height = containing_block
resolve_one_percentage(box, 'left', cb_width)
resolve_one_percentage(box, 'right', cb_width)
resolve_one_percentage(box, 'top', cb_height)
resolve_one_percentage(box, 'bottom', cb_height)
def resolve_percentages(box, containing_block, main_flex_direction=None):
"""Set used values as attributes of the box object."""
if isinstance(containing_block, boxes.Box):
# cb is short for containing block
cb_width = containing_block.width
cb_height = containing_block.height
else:
cb_width, cb_height = containing_block
if isinstance(box, boxes.PageBox):
maybe_height = cb_height
else:
maybe_height = cb_width
resolve_one_percentage(box, 'margin_left', cb_width)
resolve_one_percentage(box, 'margin_right', cb_width)
resolve_one_percentage(box, 'margin_top', maybe_height)
resolve_one_percentage(box, 'margin_bottom', maybe_height)
resolve_one_percentage(box, 'padding_left', cb_width)
resolve_one_percentage(box, 'padding_right', cb_width)
resolve_one_percentage(box, 'padding_top', maybe_height)
resolve_one_percentage(box, 'padding_bottom', maybe_height)
resolve_one_percentage(box, 'width', cb_width)
resolve_one_percentage(box, 'min_width', cb_width, main_flex_direction)
resolve_one_percentage(box, 'max_width', cb_width, main_flex_direction)
# XXX later: top, bottom, left and right on positioned elements
if cb_height == 'auto':
# Special handling when the height of the containing block
# depends on its content.
height = box.style['height']
if height == 'auto' or height.unit == '%':
box.height = 'auto'
else:
assert height.unit == 'px'
box.height = height.value
resolve_one_percentage(box, 'min_height', 0)
resolve_one_percentage(box, 'max_height', float('inf'))
else:
resolve_one_percentage(box, 'height', cb_height)
resolve_one_percentage(box, 'min_height', cb_height)
resolve_one_percentage(box, 'max_height', cb_height)
# Used value == computed value
for side in ['top', 'right', 'bottom', 'left']:
prop = 'border_{0}_width'.format(side)
setattr(box, prop, box.style[prop])
if box.style['box_sizing'] == 'border-box':
if box.width != 'auto':
box.width -= (box.padding_left + box.padding_right +
box.border_left_width + box.border_right_width)
if box.height != 'auto':
box.height -= (box.padding_top + box.padding_bottom +
box.border_top_width + box.border_bottom_width)
elif box.style['box_sizing'] == 'padding-box':
if box.width != 'auto':
box.width -= box.padding_left + box.padding_right
if box.height != 'auto':
box.height -= box.padding_top + box.padding_bottom
else:
assert box.style['box_sizing'] == 'content-box'
def resolve_radii_percentages(box):
corners = ('top_left', 'top_right', 'bottom_right', 'bottom_left')
for corner in corners:
property_name = 'border_%s_radius' % corner
rx, ry = box.style[property_name]
rx = _percentage(rx, box.border_width())
ry = _percentage(ry, box.border_height())
setattr(box, property_name, (rx, ry))
|
the-stack_0_2401 | # -*- coding: utf-8 -*-
#
# django-otp-yubikey documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 22 16:13:25 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# autodoc and viewcode need valid settings in order to process Django modules.
import django
import django.conf
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'otpdocs',
]
django.conf.settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django_otp',
'otp_yubikey',
],
SECRET_KEY='properly-configured',
)
django.setup()
intersphinx_mapping = {
'python': ('http://docs.python.org/3/', None),
'django': ('https://docs.djangoproject.com/en/1.11/',
'https://docs.djangoproject.com/en/1.11/_objects/'),
'django-otp': ('http://django-otp-official.readthedocs.io/en/latest/', None),
'yubiotp': ('http://yubiotp.readthedocs.io/en/latest/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-otp-yubikey'
copyright = '2012, Peter Sagerson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The short X.Y version.
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-otp-yubikeydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-otp-yubikey.tex', 'django-otp-yubikey Documentation',
'Peter Sagerson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-otp-yubikey', 'django-otp-yubikey Documentation',
['Peter Sagerson'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'django-otp-yubikey', 'django-otp-yubikey Documentation',
'Peter Sagerson', 'django-otp-yubikey', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
the-stack_0_2404 | """Rules for writing tests with JUnit"""
load("@bazel_skylib//lib:dicts.bzl", _dicts = "dicts")
load(
"@io_bazel_rules_scala//scala/private:common_attributes.bzl",
"common_attrs",
"implicit_deps",
"launcher_template",
)
load("@io_bazel_rules_scala//scala/private:common_outputs.bzl", "common_outputs")
load(
"@io_bazel_rules_scala//scala/private:phases/phases.bzl",
"extras_phases",
"phase_collect_jars_junit_test",
"phase_compile_junit_test",
"phase_coverage_common",
"phase_declare_executable",
"phase_default_info",
"phase_java_wrapper_common",
"phase_jvm_flags",
"phase_merge_jars",
"phase_runfiles_common",
"phase_scalac_provider",
"phase_unused_deps_checker",
"phase_write_executable_junit_test",
"phase_write_manifest",
"run_phases",
)
def _scala_junit_test_impl(ctx):
if (not (ctx.attr.prefixes) and not (ctx.attr.suffixes)):
fail(
"Setting at least one of the attributes ('prefixes','suffixes') is required",
)
return run_phases(
ctx,
# customizable phases
[
("scalac_provider", phase_scalac_provider),
("write_manifest", phase_write_manifest),
("unused_deps_checker", phase_unused_deps_checker),
("collect_jars", phase_collect_jars_junit_test),
("java_wrapper", phase_java_wrapper_common),
("declare_executable", phase_declare_executable),
# no need to build an ijar for an executable
("compile", phase_compile_junit_test),
("coverage", phase_coverage_common),
("merge_jars", phase_merge_jars),
("runfiles", phase_runfiles_common),
("jvm_flags", phase_jvm_flags),
("write_executable", phase_write_executable_junit_test),
("default_info", phase_default_info),
],
)
_scala_junit_test_attrs = {
"prefixes": attr.string_list(default = []),
"suffixes": attr.string_list(default = []),
"suite_label": attr.label(
default = Label(
"//src/java/io/bazel/rulesscala/test_discovery:test_discovery",
),
),
"suite_class": attr.string(
default = "io.bazel.rulesscala.test_discovery.DiscoveredTestSuite",
),
"print_discovered_classes": attr.bool(
default = False,
mandatory = False,
),
"jvm_flags": attr.string_list(),
"_junit": attr.label(
default = Label(
"//external:io_bazel_rules_scala/dependency/junit/junit",
),
),
"_hamcrest": attr.label(
default = Label(
"//external:io_bazel_rules_scala/dependency/hamcrest/hamcrest_core",
),
),
"_bazel_test_runner": attr.label(
default = Label(
"@io_bazel_rules_scala//scala:bazel_test_runner_deploy",
),
allow_files = True,
),
}
_junit_resolve_deps = {
"_scala_toolchain": attr.label_list(
default = [
Label(
"//external:io_bazel_rules_scala/dependency/scala/scala_library",
),
Label("//external:io_bazel_rules_scala/dependency/junit/junit"),
Label(
"//external:io_bazel_rules_scala/dependency/hamcrest/hamcrest_core",
),
],
allow_files = False,
),
}
_scala_junit_test_attrs.update(launcher_template)
_scala_junit_test_attrs.update(implicit_deps)
_scala_junit_test_attrs.update(common_attrs)
_scala_junit_test_attrs.update(_junit_resolve_deps)
_scala_junit_test_attrs.update({
"tests_from": attr.label_list(providers = [[JavaInfo]]),
})
def make_scala_junit_test(*extras):
return rule(
attrs = _dicts.add(
_scala_junit_test_attrs,
extras_phases(extras),
*[extra["attrs"] for extra in extras if "attrs" in extra]
),
fragments = ["java"],
outputs = _dicts.add(
common_outputs,
*[extra["outputs"] for extra in extras if "outputs" in extra]
),
test = True,
toolchains = ["@io_bazel_rules_scala//scala:toolchain_type"],
implementation = _scala_junit_test_impl,
)
scala_junit_test = make_scala_junit_test()
|
the-stack_0_2406 | """
Descriptor data structure.
Descriptors are basic data structure used throughout PSD files. Descriptor is
one kind of serialization protocol for data objects, and
enum classes in :py:mod:`psd_tools.terminology` or bytes indicates what kind
of descriptor it is.
The class ID can be pre-defined enum if the tag is 4-byte length or plain
bytes if the length is arbitrary. They depend on the internal version of
Adobe Photoshop but the detail is unknown.
Pretty printing is the best approach to check the descriptor content::
from IPython.pretty import pprint
pprint(descriptor)
"""
from __future__ import absolute_import, unicode_literals
import attr
import logging
from psd_tools.psd.base import (
BaseElement,
BooleanElement,
DictElement,
IntegerElement,
ListElement,
NumericElement,
StringElement,
)
from psd_tools.constants import OSType
from psd_tools.terminology import Klass, Enum, Event, Form, Key, Type, Unit
from psd_tools.validators import in_
from psd_tools.utils import (
read_fmt,
write_fmt,
read_unicode_string,
write_unicode_string,
write_bytes,
read_length_block,
write_length_block,
write_padding,
new_registry,
trimmed_repr,
)
logger = logging.getLogger(__name__)
TYPES, register = new_registry(attribute='ostype')
_TERMS = set(
item.value for kls in (Klass, Enum, Event, Form, Key, Type, Unit)
for item in kls
)
def read_length_and_key(fp):
"""
Helper to read descriptor key.
"""
length = read_fmt('I', fp)[0]
key = fp.read(length or 4)
if length == 0 and key not in _TERMS:
logger.debug('Unknown term: %r' % (key))
_TERMS.add(key)
return key
def write_length_and_key(fp, value):
"""
Helper to write descriptor key.
"""
written = write_fmt(fp, 'I', 0 if value in _TERMS else len(value))
written += write_bytes(fp, value)
return written
class _DescriptorMixin(DictElement):
@classmethod
def _read_body(cls, fp):
name = read_unicode_string(fp, padding=1)
classID = read_length_and_key(fp)
items = []
count = read_fmt('I', fp)[0]
for _ in range(count):
key = read_length_and_key(fp)
ostype = OSType(fp.read(4))
kls = TYPES.get(ostype)
value = kls.read(fp)
items.append((key, value))
return dict(name=name, classID=classID, items=items)
def _write_body(self, fp):
written = write_unicode_string(fp, self.name, padding=1)
written += write_length_and_key(fp, self.classID)
written += write_fmt(fp, 'I', len(self))
for key in self:
written += write_length_and_key(fp, key)
written += write_bytes(fp, self[key].ostype.value)
written += self[key].write(fp)
return written
@classmethod
def _key_converter(cls, key):
if hasattr(key, 'encode'):
return key.encode('ascii')
return getattr(key, 'value', key)
def _repr_pretty_(self, p, cycle):
if cycle:
return "{name}{{...}".format(name=self.__class__.__name__)
prefix = '{cls}({name}){{'.format(
cls=self.__class__.__name__,
name=getattr(self.classID, 'name', self.classID),
)
with p.group(2, prefix, '}'):
p.breakable('')
for idx, key in enumerate(self):
if idx:
p.text(',')
p.breakable()
value = self[key]
p.pretty(key.decode('ascii'))
p.text(': ')
if isinstance(value, bytes):
p.text(trimmed_repr(value))
else:
p.pretty(value)
p.breakable('')
@register(OSType.DESCRIPTOR)
@attr.s(repr=False)
class Descriptor(_DescriptorMixin):
"""
Dict-like descriptor structure.
Key values can be 4-character `bytes` in
:py:class:`~psd_tools.terminology.Key` or arbitrary length `bytes`.
Supports direct access by :py:class:`~psd_tools.terminology.Key`.
Example::
from psd_tools.terminology import Key
descriptor[Key.Enabled]
for key in descriptor:
print(descriptor[key])
.. py:attribute:: name
`str`
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=Klass.Null.value)
@classmethod
def read(cls, fp):
return cls(**cls._read_body(fp))
def write(self, fp):
return self._write_body(fp)
@register(OSType.OBJECT_ARRAY)
@attr.s(repr=False)
class ObjectArray(_DescriptorMixin):
"""
Object array structure almost equivalent to
:py:class:`~psd_tools.psd.descriptor.Descriptor`.
.. py:attribute:: items_count
`int` value
.. py:attribute:: name
`str` value
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
"""
items_count = attr.ib(default=0, type=int)
name = attr.ib(default='', type=str)
classID = attr.ib(default=Klass.Null.value)
@classmethod
def read(cls, fp):
items_count = read_fmt('I', fp)[0]
return cls(items_count=items_count, **cls._read_body(fp))
def write(self, fp):
written = write_fmt(fp, 'I', self.items_count)
written += self._write_body(fp)
return written
@register(OSType.LIST)
@attr.s(repr=False)
class List(ListElement):
"""
List structure.
Example::
for item in list_value:
print(item)
"""
@classmethod
def read(cls, fp):
items = []
count = read_fmt('I', fp)[0]
for _ in range(count):
key = OSType(fp.read(4))
kls = TYPES.get(key)
value = kls.read(fp)
items.append(value)
return cls(items)
def write(self, fp):
written = write_fmt(fp, 'I', len(self))
for item in self:
written += write_bytes(fp, item.ostype.value)
written += item.write(fp)
return written
@register(OSType.PROPERTY)
@attr.s(repr=False)
class Property(BaseElement):
"""
Property structure.
.. py:attribute:: name
`str` value
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
.. py:attribute:: keyID
bytes in :py:class:`~psd_tools.terminology.Key`
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
keyID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
@classmethod
def read(cls, fp):
name = read_unicode_string(fp)
classID = read_length_and_key(fp)
keyID = read_length_and_key(fp)
return cls(name, classID, keyID)
def write(self, fp):
written = write_unicode_string(fp, self.name)
written += write_length_and_key(fp, self.classID)
written += write_length_and_key(fp, self.keyID)
return written
@register(OSType.UNIT_FLOAT)
@attr.s(slots=True, repr=False, eq=False, order=False)
class UnitFloat(NumericElement):
"""
Unit float structure.
.. py:attribute:: unit
unit of the value in :py:class:`Unit`
.. py:attribute:: value
`float` value
"""
value = attr.ib(default=0.0, type=float)
unit = attr.ib(default=Unit._None, converter=Unit, validator=in_(Unit))
@classmethod
def read(cls, fp):
unit, value = read_fmt('4sd', fp)
return cls(unit=Unit(unit), value=value)
def write(self, fp):
return write_fmt(fp, '4sd', self.unit.value, self.value)
def _repr_pretty_(self, p, cycle):
if cycle:
return self.__repr__()
p.pretty(self.value)
p.text(' ')
p.text(self.unit.name)
@register(OSType.UNIT_FLOATS)
@attr.s(repr=False)
class UnitFloats(BaseElement):
"""
Unit floats structure.
.. py:attribute:: unit
unit of the value in :py:class:`Unit`
.. py:attribute:: values
List of `float` values
"""
unit = attr.ib(default=Unit._None, converter=Unit, validator=in_(Unit))
values = attr.ib(factory=list)
@classmethod
def read(cls, fp):
unit, count = read_fmt('4sI', fp)
values = list(read_fmt('%dd' % count, fp))
return cls(unit, values)
def write(self, fp):
return write_fmt(
fp, '4sI%dd' % len(self.values), self.unit.value, len(self.values),
*self.values
)
def __iter__(self):
for value in self.values:
yield value
def __getitem__(self, index):
return self.values[index]
def __len__(self):
return len(self.values)
@register(OSType.DOUBLE)
class Double(NumericElement):
"""
Double structure.
.. py:attribute:: value
`float` value
"""
@classmethod
def read(cls, fp):
return cls(*read_fmt('d', fp))
def write(self, fp):
return write_fmt(fp, 'd', self.value)
@attr.s(repr=False)
class Class(BaseElement):
"""
Class structure.
.. py:attribute:: name
`str` value
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
@classmethod
def read(cls, fp):
name = read_unicode_string(fp)
classID = read_length_and_key(fp)
return cls(name, classID)
def write(self, fp):
written = write_unicode_string(fp, self.name)
written += write_length_and_key(fp, self.classID)
return written
@register(OSType.STRING)
class String(StringElement):
"""
String structure.
.. py:attribute:: value
`str` value
"""
pass
@register(OSType.ENUMERATED_REFERENCE)
@attr.s(repr=False)
class EnumeratedReference(BaseElement):
"""
Enumerated reference structure.
.. py:attribute:: name
`str` value
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
.. py:attribute:: typeID
bytes in :py:class:`~psd_tools.terminology.Type`
.. py:attribute:: enum
bytes in :py:class:`~psd_tools.terminology.Enum`
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
typeID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
enum = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
@classmethod
def read(cls, fp):
name = read_unicode_string(fp)
classID = read_length_and_key(fp)
typeID = read_length_and_key(fp)
enum = read_length_and_key(fp)
return cls(name, classID, typeID, enum)
def write(self, fp):
written = write_unicode_string(fp, self.name)
written += write_length_and_key(fp, self.classID)
written += write_length_and_key(fp, self.typeID)
written += write_length_and_key(fp, self.enum)
return written
@register(OSType.OFFSET)
@attr.s(repr=False)
class Offset(BaseElement):
"""
Offset structure.
.. py:attribute:: name
`str` value
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
.. py:attribute:: value
`int` value
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
value = attr.ib(default=0)
@classmethod
def read(cls, fp):
name = read_unicode_string(fp)
classID = read_length_and_key(fp)
offset = read_fmt('I', fp)[0]
return cls(name, classID, offset)
def write(self, fp):
written = write_unicode_string(fp, self.name)
written += write_length_and_key(fp, self.classID)
written += write_fmt(fp, 'I', self.value)
return written
@register(OSType.BOOLEAN)
class Bool(BooleanElement):
"""
Bool structure.
.. py:attribute:: value
`bool` value
"""
@classmethod
def read(cls, fp):
return cls(read_fmt('?', fp)[0])
def write(self, fp):
return write_fmt(fp, '?', self.value)
@register(OSType.LARGE_INTEGER)
class LargeInteger(IntegerElement):
"""
LargeInteger structure.
.. py:attribute:: value
`int` value
"""
@classmethod
def read(cls, fp):
return cls(read_fmt('q', fp)[0])
def write(self, fp):
return write_fmt(fp, 'q', self.value)
@register(OSType.INTEGER)
class Integer(IntegerElement):
"""
Integer structure.
.. py:attribute:: value
`int` value
"""
@classmethod
def read(cls, fp):
return cls(read_fmt('i', fp)[0])
def write(self, fp):
return write_fmt(fp, 'i', self.value)
@register(OSType.ENUMERATED)
@attr.s(repr=False)
class Enumerated(BaseElement):
"""
Enum structure.
.. py:attribute:: typeID
bytes in :py:class:`~psd_tools.terminology.Type`
.. py:attribute:: enum
bytes in :py:class:`~psd_tools.terminology.Enum`
"""
typeID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
enum = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
@classmethod
def read(cls, fp):
typeID = read_length_and_key(fp)
enum = read_length_and_key(fp)
return cls(typeID, enum)
def write(self, fp):
written = write_length_and_key(fp, self.typeID)
written += write_length_and_key(fp, self.enum)
return written
def _repr_pretty_(self, p, cycle):
if cycle:
return self.__repr__()
p.text('(')
p.pretty(getattr(self.typeID, 'name', self.typeID))
p.text(', ')
p.pretty(getattr(self.enum, 'name', self.enum))
p.text(')')
def get_name(self):
"""Get enum name."""
if len(self.enum) == 4:
try:
return Enum(self.enum).name
except ValueError:
pass
return str(self.enum)
@register(OSType.RAW_DATA)
@attr.s(repr=False)
class RawData(BaseElement):
"""
RawData structure.
.. py:attribute:: value
`bytes` value
"""
value = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
@classmethod
def read(cls, fp):
return cls(read_length_block(fp))
def write(self, fp):
def writer(f):
if hasattr(self.value, 'write'):
return self.value.write(f)
return write_bytes(f, self.value)
return write_length_block(fp, writer)
@register(OSType.CLASS1)
class Class1(Class):
"""
Class structure equivalent to
:py:class:`~psd_tools.psd.descriptor.Class`.
"""
pass
@register(OSType.CLASS2)
class Class2(Class):
"""
Class structure equivalent to
:py:class:`~psd_tools.psd.descriptor.Class`.
"""
pass
@register(OSType.CLASS3)
class Class3(Class):
"""
Class structure equivalent to
:py:class:`~psd_tools.psd.descriptor.Class`.
"""
pass
@register(OSType.REFERENCE)
class Reference(List):
"""
Reference structure equivalent to
:py:class:`~psd_tools.psd.descriptor.List`.
"""
pass
@register(OSType.ALIAS)
class Alias(RawData):
"""
Alias structure equivalent to
:py:class:`~psd_tools.psd.descriptor.RawData`.
"""
pass
@register(OSType.GLOBAL_OBJECT)
class GlobalObject(Descriptor):
"""
Global object structure equivalent to
:py:class:`~psd_tools.psd.descriptor.Descriptor`.
"""
pass
@register(OSType.PATH)
class Path(RawData):
"""
Undocumented path structure equivalent to
:py:class:`~psd_tools.psd.descriptor.RawData`.
"""
pass
@register(OSType.IDENTIFIER)
class Identifier(Integer):
"""
Identifier equivalent to
:py:class:`~psd_tools.psd.descriptor.Integer`.
"""
pass
@register(OSType.INDEX)
class Index(Integer):
"""
Index equivalent to :py:class:`~psd_tools.psd.descriptor.Integer`.
"""
pass
@register(OSType.NAME)
@attr.s(repr=False)
class Name(BaseElement):
"""
Name structure (Undocumented).
.. py:attribute:: name
str
.. py:attribute:: classID
bytes in :py:class:`~psd_tools.terminology.Klass`
.. py:attribute:: value
str
"""
name = attr.ib(default='', type=str)
classID = attr.ib(default=b'\x00\x00\x00\x00', type=bytes)
value = attr.ib(default='', type=str)
@classmethod
def read(cls, fp):
name = read_unicode_string(fp)
classID = read_length_and_key(fp)
value = read_unicode_string(fp)
return cls(name, classID, value)
def write(self, fp):
written = write_unicode_string(fp, self.name)
written += write_length_and_key(fp, self.classID)
written += write_unicode_string(fp, self.value)
return written
@attr.s(repr=False)
class DescriptorBlock(Descriptor):
"""
Dict-like Descriptor-based structure that has `version` field. See
:py:class:`~psd_tools.psd.descriptor.Descriptor`.
.. py:attribute:: version
"""
version = attr.ib(default=16, type=int, validator=in_((16, )))
@classmethod
def read(cls, fp, **kwargs):
version = read_fmt('I', fp)[0]
return cls(version=version, **cls._read_body(fp))
def write(self, fp, padding=4, **kwargs):
written = write_fmt(fp, 'I', self.version)
written += self._write_body(fp)
written += write_padding(fp, written, padding)
return written
@attr.s(repr=False)
class DescriptorBlock2(Descriptor):
"""
Dict-like Descriptor-based structure that has `version` and
`data_version` fields. See
:py:class:`~psd_tools.psd.descriptor.Descriptor`.
.. py:attribute:: version
.. py:attribute:: data_version
"""
version = attr.ib(default=1, type=int)
data_version = attr.ib(default=16, type=int, validator=in_((16, )))
@classmethod
def read(cls, fp, **kwargs):
version, data_version = read_fmt('2I', fp)
return cls(
version=version, data_version=data_version, **cls._read_body(fp)
)
def write(self, fp, padding=4, **kwargs):
written = write_fmt(fp, '2I', self.version, self.data_version)
written += self._write_body(fp)
written += write_padding(fp, written, padding)
return written
|
the-stack_0_2407 | from tdw.controller import Controller
from tdw.tdw_utils import TDWUtils
from tdw.add_ons.object_manager import ObjectManager
from magnebot import Magnebot, ActionStatus
class CollisionDetection(Controller):
"""
Show the difference between arrived_offset values and collision detection settings.
"""
def __init__(self, port: int = 1071, check_version: bool = True, launch_build: bool = True):
super().__init__(port=port, check_version=check_version, launch_build=launch_build)
self.object_id = self.get_unique_id()
self.magnebot = Magnebot()
self.object_manager = ObjectManager()
self.add_ons.extend([self.object_manager, self.magnebot])
self.object_id: int = -1
def init_scene(self):
self.object_id = self.get_unique_id()
self.magnebot.reset()
commands = [{"$type": "load_scene",
"scene_name": "ProcGenScene"},
TDWUtils.create_empty_room(12, 12)]
commands.extend(self.get_add_physics_object(model_name="rh10",
position={"x": 0.04, "y": 0, "z": 1.081},
object_id=self.object_id))
self.communicate(commands)
def run(self, arrived_offset: float, objects: bool) -> None:
self.init_scene()
self.object_manager.initialized = False
self.magnebot.collision_detection.objects = objects
self.magnebot.move_to(self.object_id, arrived_at=0.3, aligned_at=1, arrived_offset=arrived_offset)
while self.magnebot.action.status == ActionStatus.ongoing:
self.communicate([])
self.communicate([])
print(self.magnebot.action.status)
print(self.object_manager.transforms[self.object_id].position)
if __name__ == "__main__":
c = CollisionDetection()
c.run(arrived_offset=0, objects=True)
c.run(arrived_offset=0.3, objects=True)
c.run(arrived_offset=0, objects=False)
c.communicate({"$type": "terminate"})
|
the-stack_0_2410 | import os
import sys
from _io import BytesIO
from Tea.stream import BaseStream
from alibabacloud_tea_fileform.models import FileField
def _length(o):
if hasattr(o, 'len'):
return o.len
elif isinstance(o, BytesIO):
return o.getbuffer().nbytes
elif hasattr(o, 'fileno'):
return os.path.getsize(o.name)
return len(o)
class FileFormInputStream(BaseStream):
def __init__(self, form, boundary, size=1024):
super().__init__(size)
self.form = form
self.boundary = boundary
self.file_size_left = 0
self.forms = {}
self.files = {}
self.files_keys = []
self._to_map()
self.form_str = b''
self._build_str_forms()
self.str_length = len(self.form_str)
def _to_map(self):
for k, v in self.form.items():
if isinstance(v, FileField):
self.files[k] = v
self.files_keys.append(k)
else:
self.forms[k] = v
def _build_str_forms(self):
form_str = ''
str_fmt = '--%s\r\nContent-Disposition: form-data; name="%s"\r\n\r\n%s\r\n'
forms_list = sorted(list(self.forms))
for key in forms_list:
value = self.forms[key]
form_str += str_fmt % (self.boundary, key, value)
self.form_str = form_str.encode('utf-8')
def _get_stream_length(self):
file_length = 0
for k, ff in self.files.items():
field_length = len(ff.filename.encode('utf-8')) + len(ff.content_type) +\
len(k.encode('utf-8')) + len(self.boundary) + 78
file_length += _length(ff.content) + field_length
stream_length = self.str_length + file_length + len(self.boundary) + 6
return stream_length
def __len__(self):
return self._get_stream_length()
def __iter__(self):
return self
def __next__(self):
return self.read(self.size, loop=True)
def file_str(self, size):
# handle file object
form_str = b''
start_fmt = '--%s\r\nContent-Disposition: form-data; name="%s";'
content_fmt = b' filename="%s"\r\nContent-Type: %s\r\n\r\n%s'
if self.file_size_left:
for key in self.files_keys[:]:
if size <= 0:
break
file_field = self.files[key]
file_content = file_field.content.read(size)
if isinstance(file_content, str):
file_content = file_content.encode('utf-8')
if self.file_size_left <= size:
form_str += b'%s\r\n' % file_content
self.file_size_left = 0
size -= len(file_content)
self.files_keys.remove(key)
else:
form_str += file_content
self.file_size_left -= size
size -= len(file_content)
else:
for key in self.files_keys[:]:
if size <= 0:
break
file_field = self.files[key]
file_size = _length(file_field.content)
self.file_size_left = file_size
file_content = file_field.content.read(size)
if isinstance(file_content, str):
file_content = file_content.encode('utf-8')
# build form_str
start = start_fmt % (self.boundary, key)
content = content_fmt % (
file_field.filename.encode('utf-8'),
file_field.content_type.encode('utf-8'),
file_content
)
if self.file_size_left < size:
form_str += b'%s%s\r\n' % (start.encode('utf-8'), content)
self.file_size_left = 0
size -= len(file_content)
self.files_keys.remove(key)
else:
form_str += b'%s%s' % (start.encode('utf-8'), content)
self.file_size_left -= size
size -= len(file_content)
return form_str
def read(self, size=None, loop=False):
if not self.files_keys and not self.form_str:
self.refresh()
if loop:
raise StopIteration
else:
return b''
if size is None:
size = sys.maxsize
if self.form_str:
form_str = self.form_str[:size]
self.form_str = self.form_str[size:]
if len(form_str) < size:
form_str += self.file_str(size)
else:
form_str = self.file_str(size)
if not self.form_str and not self.files_keys:
form_str += b'--%s--\r\n' % self.boundary.encode('utf-8')
return form_str
def refresh_cursor(self):
for ff in self.files.values():
if hasattr(ff.content, 'seek'):
ff.content.seek(0, 0)
def refresh(self):
self.file_size_left = 0
self._to_map()
self._build_str_forms()
self.refresh_cursor()
|
the-stack_0_2411 | """
In this example a Bell state is made.
"""
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
from qiskit import execute
from qiskit_qcgpu_provider import QCGPUProvider
Provider = QCGPUProvider()
# Create a Quantum Register with 2 qubits.
q = QuantumRegister(2)
# Create a Quantum Circuit with 2 Qubits
qc = QuantumCircuit(q)
# Add a H gate on qubit 0, putting this qubit in superposition.
qc.h(q[0])
# Add a CX (CNOT) gate on control qubit 0 and target qubit 1, putting
# the qubits in a Bell state.
qc.cx(q[0], q[1])
# See a list of available local simulators
print("QCGPU backends: ", Provider.backends())
backend_sim = Provider.get_backend('statevector_simulator')
# Compile and run the Quantum circuit on a simulator backend
job_sim = execute(qc, backend_sim)
result_sim = job_sim.result()
# Show the results
print("Simulation Results: ", result_sim)
print(result_sim.get_statevector(qc))
|
the-stack_0_2414 | #!/usr/bin/env python2.7
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' main.py '''
from __future__ import print_function
import argparse
import os
import signal
import sys
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import define
from tornado.httpclient import AsyncHTTPClient
import heron.tools.common.src.python.utils.config as common_config
import heron.common.src.python.utils.log as log
from heron.tools.tracker.src.python import constants
from heron.tools.tracker.src.python import handlers
from heron.tools.tracker.src.python import utils
from heron.tools.tracker.src.python.config import Config, STATEMGRS_KEY
from heron.tools.tracker.src.python.tracker import Tracker
Log = log.Log
class Application(tornado.web.Application):
""" Tornado server application """
def __init__(self, config):
AsyncHTTPClient.configure(None, defaults=dict(request_timeout=120.0))
self.tracker = Tracker(config)
self.tracker.synch_topologies()
tornadoHandlers = [
(r"/", handlers.MainHandler),
(r"/clusters", handlers.ClustersHandler, {"tracker":self.tracker}),
(r"/topologies", handlers.TopologiesHandler, {"tracker":self.tracker}),
(r"/topologies/states", handlers.StatesHandler, {"tracker":self.tracker}),
(r"/topologies/info", handlers.TopologyHandler, {"tracker":self.tracker}),
(r"/topologies/logicalplan", handlers.LogicalPlanHandler, {"tracker":self.tracker}),
(r"/topologies/config", handlers.TopologyConfigHandler, {"tracker":self.tracker}),
(r"/topologies/containerfiledata", handlers.ContainerFileDataHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfiledownload", handlers.ContainerFileDownloadHandler,
{"tracker":self.tracker}),
(r"/topologies/containerfilestats",
handlers.ContainerFileStatsHandler, {"tracker":self.tracker}),
(r"/topologies/physicalplan", handlers.PhysicalPlanHandler, {"tracker":self.tracker}),
# Deprecated. See https://github.com/apache/incubator-heron/issues/1754
(r"/topologies/executionstate", handlers.ExecutionStateHandler, {"tracker":self.tracker}),
(r"/topologies/schedulerlocation", handlers.SchedulerLocationHandler,
{"tracker":self.tracker}),
(r"/topologies/metadata", handlers.MetaDataHandler, {"tracker":self.tracker}),
(r"/topologies/runtimestate", handlers.RuntimeStateHandler, {"tracker":self.tracker}),
(r"/topologies/metrics", handlers.MetricsHandler, {"tracker":self.tracker}),
(r"/topologies/metricstimeline", handlers.MetricsTimelineHandler, {"tracker":self.tracker}),
(r"/topologies/metricsquery", handlers.MetricsQueryHandler, {"tracker":self.tracker}),
(r"/topologies/exceptions", handlers.ExceptionHandler, {"tracker":self.tracker}),
(r"/topologies/exceptionsummary", handlers.ExceptionSummaryHandler,
{"tracker":self.tracker}),
(r"/machines", handlers.MachinesHandler, {"tracker":self.tracker}),
(r"/topologies/pid", handlers.PidHandler, {"tracker":self.tracker}),
(r"/topologies/jstack", handlers.JstackHandler, {"tracker":self.tracker}),
(r"/topologies/jmap", handlers.JmapHandler, {"tracker":self.tracker}),
(r"/topologies/histo", handlers.MemoryHistogramHandler, {"tracker":self.tracker}),
(r"(.*)", handlers.DefaultHandler),
]
settings = dict(
debug=True,
serve_traceback=True,
static_path=os.path.dirname(__file__)
)
tornado.web.Application.__init__(self, tornadoHandlers, **settings)
Log.info("Tracker has started")
def stop(self):
self.tracker.stop_sync()
# pylint: disable=protected-access
class _HelpAction(argparse._HelpAction):
""" HelpAction """
def __call__(self, parser, namespace, values, option_string=None):
parser.print_help()
# retrieve subparsers from parser
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
# there will probably only be one subparser_action,
# but better save than sorry
for subparsers_action in subparsers_actions:
# get all subparsers and print help
for choice, subparser in subparsers_action.choices.items():
print("Subparser '{}'".format(choice))
print(subparser.format_help())
parser.exit()
# pylint: disable=bad-super-call
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
""" Subcommand help formatter """
def _format_action(self, action):
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
parts = "\n".join(parts.split("\n")[1:])
return parts
def add_titles(parser):
""" add titles """
parser._positionals.title = "Required arguments"
parser._optionals.title = "Optional arguments"
return parser
def add_arguments(parser):
""" add arguments """
default_config_file = os.path.join(
utils.get_heron_tracker_conf_dir(), constants.DEFAULT_CONFIG_FILE)
parser.add_argument(
'--config-file',
metavar='(a string; path to config file; default: "' + default_config_file + '")',
default=default_config_file)
parser.add_argument(
'--type',
metavar='(an string; type of state manager (zookeeper or file, etc.); example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TYPE) + ')',
choices=["file", "zookeeper"])
parser.add_argument(
'--name',
metavar='(an string; name to be used for the state manager; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_NAME) + ')')
parser.add_argument(
'--rootpath',
metavar='(an string; where all the states are stored; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_ROOTPATH) + ')')
parser.add_argument(
'--tunnelhost',
metavar='(an string; if ssh tunneling needs to be established to connect to it; example: ' \
+ str(constants.DEFAULT_STATE_MANAGER_TUNNELHOST) + ')')
parser.add_argument(
'--hostport',
metavar='(an string; only used to connect to zk, must be of the form \'host:port\';'\
' example: ' + str(constants.DEFAULT_STATE_MANAGER_HOSTPORT) + ')')
parser.add_argument(
'--port',
metavar='(an integer; port to listen; default: ' + str(constants.DEFAULT_PORT) + ')',
type=int,
default=constants.DEFAULT_PORT)
parser.add_argument(
'--verbose',
action='store_true')
return parser
def create_parsers():
""" create argument parser """
parser = argparse.ArgumentParser(
epilog='For detailed documentation, go to http://github.com/apache/incubator-heron',
usage="%(prog)s [options] [help]",
add_help=False)
parser = add_titles(parser)
parser = add_arguments(parser)
ya_parser = argparse.ArgumentParser(
parents=[parser],
formatter_class=SubcommandHelpFormatter,
add_help=False)
subparsers = ya_parser.add_subparsers(
title="Available commands")
help_parser = subparsers.add_parser(
'help',
help='Prints help',
add_help=False)
help_parser.set_defaults(help=True)
subparsers.add_parser(
'version',
help='Prints version',
add_help=True)
return parser, ya_parser
def define_options(port, config_file):
""" define Tornado global variables """
define("port", default=port)
define("config_file", default=config_file)
def create_tracker_config(namespace):
# try to parse the config file if we find one
config_file = namespace["config_file"]
config = utils.parse_config_file(config_file)
if config is None:
Log.debug("Config file does not exists: %s" % config_file)
config = {STATEMGRS_KEY:[{}]}
# update the config if we have any flags
config_flags = ["type", "name", "rootpath", "tunnelhost", "hostport"]
config_to_update = config[STATEMGRS_KEY][0]
for flag in config_flags:
value = namespace.get(flag, None)
if value is not None:
config_to_update[flag] = value
return config
def main():
""" main """
# create the parser and parse the arguments
(parser, _) = create_parsers()
(args, remaining) = parser.parse_known_args()
if remaining == ['help']:
parser.print_help()
parser.exit()
elif remaining == ['version']:
common_config.print_build_info()
parser.exit()
elif remaining != []:
Log.error('Invalid subcommand')
sys.exit(1)
namespace = vars(args)
log.set_logging_level(namespace)
# set Tornado global option
define_options(namespace['port'], namespace['config_file'])
config = Config(create_tracker_config(namespace))
# create Tornado application
application = Application(config)
# pylint: disable=unused-argument
# SIGINT handler:
# 1. stop all the running zkstatemanager and filestatemanagers
# 2. stop the Tornado IO loop
def signal_handler(signum, frame):
# start a new line after ^C character because this looks nice
print('\n', end='')
application.stop()
tornado.ioloop.IOLoop.instance().stop()
# associate SIGINT and SIGTERM with a handler
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
Log.info("Running on port: %d", namespace['port'])
if namespace["config_file"]:
Log.info("Using config file: %s", namespace['config_file'])
Log.info("Using state manager:\n" + str(config))
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(namespace['port'])
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
the-stack_0_2417 | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.tf2onnx - rewrite tensorflow graph to onnx graph
"""
import collections
import sys
import traceback
import numpy as np
from onnx import onnx_pb
import tf2onnx
import tf2onnx.onnx_opset # pylint: disable=unused-import
import tf2onnx.tflite_handlers # pylint: disable=unused-import
import tf2onnx.custom_opsets # pylint: disable=unused-import
from tf2onnx.graph import Graph
from tf2onnx.rewriter import * # pylint: disable=wildcard-import
from tf2onnx.tflite_rewriters import * # pylint: disable=wildcard-import
from tf2onnx.late_rewriters import rewrite_channels_last
from tf2onnx.shape_inference import infer_shape
from tf2onnx.tf_loader import is_function, resolve_functions, set_function, clear_functions
from tf2onnx.tf_utils import tensorflow_to_onnx, get_tf_version, compute_const_folding_using_tf
from tf2onnx.tflite_utils import graphs_from_tflite
from tf2onnx.tfjs_utils import graphs_from_tfjs
from . import constants, logging, schemas, utils, handler
logger = logging.getLogger(__name__)
# pylint: disable=useless-return,broad-except,logging-not-lazy,unused-argument,missing-docstring
# pylint: disable=unused-variable
def fold_constants_using_tf(g, outputs_to_values):
ops = list(g.get_nodes())
# pylint: disable=too-many-nested-blocks
keep_looking = True
while keep_looking:
keep_looking = False
for idx, op in enumerate(ops):
if op.output and op.output[0] in outputs_to_values:
logger.info("folding node using tf type=%s, name=%s" % (op.type, op.name))
val = outputs_to_values[op.output[0]]
new_node_name = utils.make_name(op.name)
new_output_name = new_node_name
old_output_name = op.output[0]
old_node_name = op.name
logger.debug("create const node [%s] replacing [%s]", new_node_name, old_node_name)
ops[idx] = g.make_const(new_node_name, val)
logger.debug("replace old output [%s] with new output [%s]", old_output_name, new_output_name)
# need to re-write the consumers input name to use the const name
consumers = g.find_output_consumers(old_output_name)
if consumers:
for consumer in consumers:
g.replace_input(consumer, old_output_name, new_output_name)
# keep looking until there is nothing we can fold.
keep_looking = True
g.reset_nodes(ops)
def rewrite_constant_fold(g, ops):
"""
We call tensorflow transform with constant folding but in some cases tensorflow does
fold all constants. Since there are a bunch of ops in onnx that use attributes where
tensorflow has dynamic inputs, we badly want constant folding to work. For cases where
tensorflow missed something, make another pass over the graph and fix want we care about.
"""
func_map = {
"Add": np.add,
"GreaterEqual": np.greater_equal,
"Cast": np.cast,
"ConcatV2": np.concatenate,
"Less": np.less,
"ListDiff": np.setdiff1d,
"Mul": np.multiply,
"Pack": np.stack,
"Range": np.arange,
"Sqrt": np.sqrt,
"Sub": np.subtract,
}
ops = list(ops)
# pylint: disable=too-many-nested-blocks
keep_looking = True
while keep_looking:
keep_looking = False
for idx, op in enumerate(ops):
func = func_map.get(op.type)
if func is None: continue
if set(op.output) & set(g.outputs): continue
try:
inputs = []
for node in op.inputs:
if not node.is_const():
break
inputs.append(node.get_tensor_value(as_list=False))
logger.debug("op name %s, %s, %s", op.name, len(op.input), len(inputs))
if inputs and len(op.input) == len(inputs):
logger.info("folding node type=%s, name=%s" % (op.type, op.name))
if op.type == "Cast":
dst = op.get_attr_int("to")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(dst)
val = np.cast[np_type](*inputs)
elif op.type == "ConcatV2":
axis = inputs[-1]
values = inputs[:-1]
val = func(tuple(values), axis)
elif op.type == "ListDiff":
out_type = op.get_attr_int("out_idx")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(out_type)
val = func(*inputs)
val = val.astype(np_type)
elif op.type in ["Pack"]:
# handle ops that need input array and axis
axis = op.get_attr_int("axis")
val = func(inputs, axis=axis)
elif op.type == "Range":
dtype = op.get_attr_int("Tidx")
np_type = tf2onnx.utils.map_onnx_to_numpy_type(dtype)
val = func(*inputs, dtype=np_type)
else:
val = func(*inputs)
new_node_name = utils.make_name(op.name)
new_output_name = new_node_name
old_output_name = op.output[0]
old_node_name = op.name
logger.debug("create const node [%s] replacing [%s]", new_node_name, old_node_name)
ops[idx] = g.make_const(new_node_name, val)
logger.debug("replace old output [%s] with new output [%s]", old_output_name, new_output_name)
# need to re-write the consumers input name to use the const name
consumers = g.find_output_consumers(old_output_name)
if consumers:
for consumer in consumers:
g.replace_input(consumer, old_output_name, new_output_name)
# keep looking until there is nothing we can fold.
# We keep the graph in topological order so if we folded,
# the result might help a following op.
keep_looking = True
except Exception as ex:
tb = traceback.format_exc() # pylint: disable=bare-except
logger.info("exception: %s, details: %s", ex, tb)
# ignore errors
# pylint: enable=too-many-nested-blocks
return ops
def rewrite_incomplete_type_support(g, ops, impacted_ops):
"""
for ops that have inclomplete type support, insert casts.
This is needed for some tensor ops in opset7 and for some ops in winml-rs5.
It is not helping performance but better than the model not working at all.
"""
ignored_input_index = {
"Tile": [1], # Tile's second input can only be int64
"Where": [0], # Where's first input is bool
}
new_ops = []
org_ops = list(ops)
for op in org_ops:
if op.type in impacted_ops:
cast_inserted = []
output_dtype = None
ignored_inputs = ignored_input_index.get(op.type)
# insert casts on inputs if the runtime only supports float
for i, input_node in enumerate(op.inputs):
if ignored_inputs and i in ignored_inputs:
continue
input_name = op.input[i]
dtype = g.get_dtype(input_name)
if dtype is None:
logger.warning("adding Cast for op %s (type is %s)' input: %s, dtype should not be None",
op.name, op.type, input_name)
if dtype != onnx_pb.TensorProto.FLOAT:
output_dtype = dtype
logger.debug("insert cast for node %s on input %s", op.name, input_name)
if input_node and input_node.type == "Cast" \
and len(g.find_output_consumers(input_node.output[0])) == 1:
input_node.set_attr("to", onnx_pb.TensorProto.FLOAT)
g.set_dtype(input_name, onnx_pb.TensorProto.FLOAT)
else:
cast_node = g.insert_new_node_on_input(op, "Cast", input_name,
to=onnx_pb.TensorProto.FLOAT)
g.set_dtype(cast_node.output[0], onnx_pb.TensorProto.FLOAT)
g.copy_shape(input_name, cast_node.output[0])
cast_inserted.append(cast_node)
if output_dtype:
# insert reverse cast if needed
for output_name in op.output:
name = utils.make_name(op.name)
logger.debug("insert cast back for node %s on output %s [dtype=%s]", op.name, output_name,
output_dtype)
output_cast = g.insert_new_node_on_output("Cast", output_name, name=name,
to=output_dtype)
g.set_dtype(output_cast.output[0], output_dtype)
g.copy_shape(output_name, output_cast.output[0])
cast_inserted.append(output_cast)
if cast_inserted:
new_ops.extend(cast_inserted)
new_ops.append(op)
return new_ops
def rewrite_incomplete_type_support_rs5(g, ops):
return rewrite_incomplete_type_support(g, ops, ["Unsqueeze", "Mul", "Concat", "Slice", "Transpose"])
def rewrite_incomplete_type_support_rs6(g, ops):
impacted_ops = [
"Div",
"IsNaN",
"Max",
"Min",
"ReduceSum",
"Slice",
"Split",
"Tile",
"Transpose",
"Where"
]
# TODO: logic to insert cast has bug, not all inputs of one node need cast
# for example, slice's input "starts" doesn't need it.
if g.opset == 10:
impacted_ops.remove("Slice")
return rewrite_incomplete_type_support(g, ops, impacted_ops)
def tensorflow_onnx_mapping(g, ops_mapping, initialized_tables=None, is_tflite=False, dequantize=False):
logger.verbose("Mapping TF node to ONNX node(s)")
mapped_op = collections.Counter()
unmapped_op = collections.Counter()
exceptions = []
if initialized_tables is None:
initialized_tables = {}
ops = list(g.get_nodes())
for node in ops:
logger.debug("Process node: %s\n%s", node.name, node.summary)
if node.need_skip():
logger.debug("explicitly skip node " + node.name)
continue
op = node.type
map_info = ops_mapping.get(op)
if map_info is None:
unmapped_op[op] += 1
if not is_tflite:
logger.error("Tensorflow op [%s: %s] is not supported", node.name, op)
continue
mapped_op[op] += 1
func, kwargs = map_info
if kwargs:
# if there is a tf_op/onnx_op key we'll map the old type to a new type
converted_op = kwargs.get("tf_op" if is_tflite else "onnx_op")
if converted_op:
# sometimes the handler wants to know what the old op name was
kwargs["tfl_op" if is_tflite else "tf_op"] = op
node.type = converted_op
body_graphs = node.get_body_graphs()
if body_graphs:
for attr, b_g in body_graphs.items():
logger.debug("start handling subgraph of %s's attribute %s", node.name, attr)
b_g.topological_sort(b_g.get_nodes())
# we assume only ONNX nodes have subgraph defined in pre-rewriters.
# that means, if we create node having subgraphs in this step, the
# created subgraphs' nodes won't be mapped.
m_ops, unm_ops, body_exceptions = tensorflow_onnx_mapping(b_g, ops_mapping)
mapped_op += m_ops
unmapped_op += unm_ops
# topological_sort on the body in case processing has changed the order
b_g.topological_sort(b_g.get_nodes())
exceptions.extend(body_exceptions)
logger.debug("finish handling subgraph of %s's attribute %s", node.name, attr)
try:
func(g, node, **kwargs, initialized_tables=initialized_tables, dequantize=dequantize)
if not is_tflite:
# tensorflow nodes must be converted in the next pass
node.skip_conversion = True
except Exception as ex:
try:
# If the graph is corrupt from the exception this can fail
summary = node.summary
except Exception:
summary = ""
logger.error("Failed to convert node %r (fct=%r)\n%r",
node.name, func, summary, exc_info=1)
exceptions.append(ex)
return mapped_op, unmapped_op, exceptions
def transpose_inputs(ctx, inputs_as_nchw):
"""Insert a transpose from NHWC to NCHW on model input on users request."""
ops = []
for node in ctx.get_nodes():
for idx, output_name in enumerate(node.output):
if output_name in inputs_as_nchw:
shape = ctx.get_shape(output_name)
if len(shape) != len(constants.NCHW_TO_NHWC):
logger.warning("transpose_input for %s: shape must be rank 4, ignored" % output_name)
ops.append(node)
continue
# insert transpose
op_name = utils.make_name(node.name)
transpose = ctx.insert_new_node_on_output("Transpose", output_name, name=op_name)
transpose.set_attr("perm", constants.NCHW_TO_NHWC)
ctx.copy_shape(output_name, transpose.output[0])
ctx.set_shape(output_name, np.array(shape)[constants.NHWC_TO_NCHW])
ops.append(transpose)
ops.append(node)
continue
ops.append(node)
ctx.reset_nodes(ops)
def topological_sort(g, continue_on_error):
ops = g.get_nodes()
if not continue_on_error:
g.topological_sort(ops)
else:
try:
g.topological_sort(ops)
except: # pylint: disable=bare-except
# if we continue on error, ignore graph cycles so we can report all missing ops
pass
def run_rewriters(g, funcs, continue_on_error):
"""Rewrite the original graph and body graphs of nodes"""
# NOTE(wayuanho):
# 1. we don't sort graph here, rewriter is expected to do it on its own.
# 2. the graph here may have circles, current topological_sort cannot handle it.
for func in funcs:
try:
ops = func(g, g.get_nodes())
g.reset_nodes(ops)
except Exception as ex:
type_, value_, traceback_ = sys.exc_info()
logger.error("rewriter %s: exception %s", func, ex)
ex_ext = traceback.format_exception(type_, value_, traceback_)
if continue_on_error:
logger.info(ex_ext)
else:
raise ex
if utils.is_debug_mode():
broken_outputs = g.check_integrity()
if broken_outputs:
logging.error(
"After rewriter %s, graph breaks at outputs %s",
func.__name__, broken_outputs
)
if g.contained_graphs:
for dict_val in g.contained_graphs.values():
for attr_name, b_g in dict_val.items():
run_rewriters(b_g, funcs, attr_name)
def process_tf_graph(tf_graph, continue_on_error=False, verbose=False, target=None,
opset=None, custom_op_handlers=None, custom_rewriter=None,
extra_opset=None, shape_override=None, inputs_as_nchw=None,
input_names=None, output_names=None, ignore_default=None, use_default=None,
is_subgraph=False, const_node_values=None, tensors_to_rename=None,
initialized_tables=None, tflite_path=None, dequantize=False, tfjs_path=None):
"""Convert tensorflow graph to onnx graph.
Args:
tf_graph: tensorflow graph
continue_on_error: if an op can't be processed (aka there is no mapping), continue
verbose: print summary stats (deprecated)
target: list of workarounds applied to help certain platforms
opset: the opset to be used (int, default is latest)
custom_op_handlers: dictionary of custom ops handlers
custom_rewriter: list of custom graph rewriters
extra_opset: list of extra opset's, for example the opset's used by custom ops
shape_override: dict with inputs that override the shapes given by tensorflow
inputs_as_nchw: transpose inputs in list from nchw to nhwc
input_names: list of input node names in graph, input name format as node_name:port_id. Optional.
output_names: list of output node names in graph, format is node_name:port_id. Optional for tflite.
ignore_default: list of node names of PlaceholderWithDefault ops to change into Placeholder ops
use_default: list of node names of PlaceholderWithDefault ops to change into Identity ops using the default
const_node_values: a dict returned by compress_graph_def mapping node names to tensor values
tensors_to_rename: an optional dict (string->string) mapping tensor names to new names
initialized_tables: mapping from table shared_names to tuple of keys and values of table
tflite_path: Path to a tflite file to convert. If used, pass None to tf_graph
Return:
onnx graph
"""
# NOTE: process_parsed_graph and Graph are always given tensors post-rename.
# process_tf_graph (this function) gets tensors pre-rename.
if verbose:
logger.warning("Argument verbose for process_tf_graph is deprecated. Please use --verbose option instead.")
del verbose
opset = utils.find_opset(opset)
logger.info("Using tensorflow=%s, onnx=%s, tf2onnx=%s/%s",
get_tf_version(), utils.get_onnx_version(), tf2onnx.__version__, tf2onnx.version.git_version[:6])
logger.info("Using opset <onnx, %s>", opset)
if opset > schemas.get_max_supported_opset_version():
logger.warning("Currently installed onnx package %s is too low to support opset %s, "
"please upgrade onnx package to avoid potential conversion issue.",
utils.get_onnx_version(), opset)
clear_functions()
if inputs_as_nchw is None:
inputs_as_nchw = []
is_tflite = False
if tflite_path is not None:
main_g, subgraphs = graphs_from_tflite(tflite_path, input_names, output_names)
is_tflite = True
elif tfjs_path is not None:
main_g, subgraphs = graphs_from_tfjs(tfjs_path, input_names, output_names, shape_override,
ignore_default, use_default)
else:
main_g, subgraphs = graphs_from_tf(tf_graph, input_names, output_names, shape_override, const_node_values,
ignore_default, use_default)
for g in [main_g] + subgraphs:
g.set_config(target, opset, extra_opset)
g = process_graphs(main_g, subgraphs, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter,
initialized_tables, tensors_to_rename, is_tflite, dequantize)
return g
def graphs_from_tf(tf_graph, input_names, output_names, shape_override=None, const_node_values=None,
ignore_default=None, use_default=None):
"""make tf2onnx internal subgraphs from the tensorflow subgraphs"""
if shape_override is None:
shape_override = {}
ordered_func = resolve_functions(tf_graph)
subgraphs = []
for func in ordered_func:
f_inputs_names = [t.name for t in func.inputs]
f_output_names = [t.name for t in func.outputs]
outputs_to_values, _ = compute_const_folding_using_tf(func, const_node_values, output_names)
onnx_nodes, _, _, output_shapes, dtypes, _ = \
tensorflow_to_onnx(func, shape_override, const_node_values, ignore_default, use_default)
fg = Graph(onnx_nodes, output_shapes, dtypes, input_names=f_inputs_names, output_names=f_output_names,
is_subgraph=True, graph_name=func.name)
fold_constants_using_tf(fg, outputs_to_values)
subgraphs.append(fg)
is_func = is_function(tf_graph)
if not is_func:
tf_graph = infer_shape(tf_graph, shape_override)
outputs_to_values, _ = compute_const_folding_using_tf(tf_graph, const_node_values, output_names)
onnx_nodes, _, _, output_shapes, dtypes, _ = \
tensorflow_to_onnx(tf_graph, shape_override, const_node_values, ignore_default, use_default)
utils.check_io(input_names, output_names, output_shapes.keys())
main_g = Graph(onnx_nodes, output_shapes, dtypes, input_names=input_names, output_names=output_names)
fold_constants_using_tf(main_g, outputs_to_values)
return main_g, subgraphs
def process_graphs(main_g, subgraphs, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter,
initialized_tables, tensors_to_rename, is_tflite=False, dequantize=False):
if tensors_to_rename is not None:
main_g.rename_tensors(tensors_to_rename)
inputs_as_nchw = [tensors_to_rename.get(t, t) for t in inputs_as_nchw]
for g in subgraphs:
fg = process_parsed_graph(g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter,
initialized_tables, is_tflite, dequantize)
set_function(fg.graph_name, fg)
g = process_parsed_graph(main_g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter,
initialized_tables, is_tflite,
dequantize)
return g
def process_parsed_graph(g, custom_op_handlers, inputs_as_nchw, continue_on_error, custom_rewriter,
initialized_tables, is_tflite=False, dequantize=False):
op_cnt, attr_cnt = g.dump_node_statistics(include_attrs=True, include_subgraphs=False)
if is_tflite:
tfl_rewriters = []
if dequantize:
tfl_rewriters.append(rewrite_tfl_qdq)
tfl_rewriters.append(rewrite_tfl_scan_outputs)
tfl_rewriters.append(rewrite_tfl_select_zero)
tfl_rewriters.append(rewrite_tfl_rfft)
run_rewriters(g, tfl_rewriters, continue_on_error)
tfl_ops_mapping = handler.tfl_op.create_tfl_to_tf_mapping()
_, _, exceptions = tensorflow_onnx_mapping(g, tfl_ops_mapping, is_tflite=True, dequantize=False)
if exceptions and not continue_on_error:
raise exceptions[0]
# create ops mapping for the desired opsets
ops_mapping = handler.tf_op.create_mapping(g.opset, g.extra_opset)
# apply custom ops on top of the assembled opset. We can either complement the opset
# or override existing ops with a custom op.
if custom_op_handlers is not None:
# below is a bit tricky since there are a few api's:
# 1. the future way we want custom ops to be registered with the @tf_op decorator. Those handlers will be
# registered via the decorator on load of the module ... nothing is required here.
# 2. the old custom op api: a dictionary of {name: (func, args[])
# We deal with this by using a compat_handler that wraps to old handler with a new style handler.
# This is tempoary to give people give to move to the new api and after tf2onnx-1.5 we want to remove this
custom_opset = {}
for k, v in custom_op_handlers.items():
# FIXME: remove this after tf2onnx-1.5
def compat_handler(ctx, node, **kwargs):
# wrap old handler
name = node.name
args = kwargs["args"]
func = kwargs["func"]
return func(ctx, node, name, args)
args = v[1]
kwargs = {"func": v[0]}
if args:
onnx_op = args[0]
kwargs["onnx_op"] = onnx_op
args = args[1:]
kwargs["args"] = args
new_handler = handler.tf_op(k,
domain=constants.TENSORFLOW_OPSET.domain,
kwargs=kwargs)
new_handler.register_compat_handler(compat_handler, 1)
custom_opset[k] = (compat_handler, kwargs)
ops_mapping.update(custom_opset)
if inputs_as_nchw:
transpose_inputs(g, inputs_as_nchw)
# pre-processing graph rewrites
# bi-directional re-writer should be placed after single directional re-writer
rewriters = [
# single directional
rewrite_constant_fold,
rewrite_quantize_and_dequantize,
rewrite_fused_ops,
rewrite_transpose,
rewrite_flatten,
rewrite_random_uniform,
rewrite_random_uniform_fold_const,
rewrite_random_normal,
rewrite_dropout,
rewrite_conv_dilations,
rewrite_eye,
rewrite_leakyrelu,
rewrite_thresholded_relu,
rewrite_conv2d_with_pad,
rewriter_lstm_tf2,
rewrite_gru_tf2,
rewrite_single_direction_lstm,
# bi-directional
rewrite_bi_direction_lstm,
rewrite_single_direction_gru,
rewrite_bi_direction_gru,
rewrite_custom_rnn_cell,
rewrite_generic_loop, rewrite_cond,
rewrite_biasadd_with_conv2d,
rewrite_layer_normalization,
rewrite_gemm,
rewrite_ragged_variant_shape,
]
if custom_rewriter is not None:
rewriters.extend(custom_rewriter)
run_rewriters(g, rewriters, continue_on_error)
# some nodes may already copied into inner Graph, so remove them from main Graph.
g.delete_unused_nodes(g.outputs)
topological_sort(g, continue_on_error)
mapped_op, unmapped_op, exceptions = \
tensorflow_onnx_mapping(g, ops_mapping, initialized_tables, dequantize=dequantize)
if unmapped_op:
logger.error("Unsupported ops: %s", unmapped_op)
if exceptions and not continue_on_error:
raise exceptions[0]
# post-processing rewriters
late_rewriters = []
if g.is_target(constants.TARGET_RS5):
late_rewriters.append(rewrite_incomplete_type_support_rs5)
if g.is_target(constants.TARGET_RS6):
late_rewriters.append(rewrite_incomplete_type_support_rs6)
if g.is_target(constants.TARGET_CHANNELS_LAST):
late_rewriters.append(rewrite_channels_last)
if late_rewriters:
run_rewriters(g, late_rewriters, continue_on_error)
# onnx requires topological sorting
topological_sort(g, continue_on_error)
g.update_proto()
logger.verbose(
"Summay Stats:\n"
"\ttensorflow ops: {}\n"
"\ttensorflow attr: {}\n"
"\tonnx mapped: {}\n"
"\tonnx unmapped: {}".format(op_cnt, attr_cnt, mapped_op, unmapped_op))
return g
def tf_optimize(input_names, output_names, graph_def):
"""optimize tensorflow graph. This is in tf_loader but some apps call this
so we proxy into tf_loader to keep them working."""
return tf2onnx.tf_loader.tf_optimize(input_names, output_names, graph_def)
|
the-stack_0_2418 | import socket
import json
VALUE_TYPE_CONVERTER = {
'int': lambda v: int(v),
'float': lambda v: float(v),
'str': lambda v: str(v).strip(),
'boolean': lambda v: v.strip().lower() == 'true',
'json': lambda v: json.loads(v)
}
class Ok(object):
"""server ok response"""
def __str__(self):
return "Ok"
OK = Ok()
class SimpleSocketClient(object):
def __init__(self, host, port, buffer_size=2048, socket_lib=socket):
self.__buffer_size = buffer_size
self.__soc = socket_lib.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__soc.connect((host, port))
def send(self, msg):
self.__soc.sendall(msg.encode())
data = self.__soc.recv(self.__buffer_size)
return data.decode()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.__soc.close()
class RemoteError(Exception):
def __init__(self, origin, message):
self.message = message
self.origin = origin
super().__init__()
def __str__(self):
return "{}({} - {})".format(
self.__class__.__name__,
self.origin,
self.message.strip()
)
class UnknownResponse(Exception):
def __init__(self, response):
self.response = response
super().__init__()
def __str__(self):
return "{}('{}')".format(
self.__class__.__name__,
self.response
)
class ConnectionClosed(Exception):
""" connection close by the server """
class Client(SimpleSocketClient):
BACKGROUND_COMMAND = 'bg'
def __init__(self, host, port, buffer_size=2048, socket_lib=socket,
run_in_background=False):
super().__init__(host, port, buffer_size=buffer_size, socket_lib=socket_lib)
self.run_in_background = run_in_background
def send(self, msg):
command = msg
if self.run_in_background:
command = "{} {}".format(self.BACKGROUND_COMMAND, command)
response = super().send(command).strip()
if not response:
raise ConnectionClosed()
if response.startswith('error'):
_, origin, message = response.split(' ', 2)
raise RemoteError(origin, message)
elif response == 'ok':
return OK
elif response.startswith('value'):
_, val_type, value = response.split(' ', 2)
converter = VALUE_TYPE_CONVERTER.get(val_type)
if converter:
return converter(value)
raise UnknownResponse(response)
|
the-stack_0_2419 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v8.errors',
marshal='google.ads.googleads.v8',
manifest={
'KeywordPlanAdGroupErrorEnum',
},
)
class KeywordPlanAdGroupErrorEnum(proto.Message):
r"""Container for enum describing possible errors from applying a
keyword plan ad group.
"""
class KeywordPlanAdGroupError(proto.Enum):
r"""Enum describing possible errors from applying a keyword plan
ad group.
"""
UNSPECIFIED = 0
UNKNOWN = 1
INVALID_NAME = 2
DUPLICATE_NAME = 3
__all__ = tuple(sorted(__protobuf__.manifest))
|
the-stack_0_2422 | #!/usr/bin/env python3
import argparse
import pono
import smt_switch as ss
from smt_switch.primops import And, BVAdd, BVSub, Equal, Ite
from smt_switch.sortkinds import BOOL, BV
def build_simple_alu_fts(s:ss.SmtSolver)->pono.Property:
'''
Creates a simple alu transition system
@param s - an SmtSolver from smt_switch
@return a property
'''
# Instantiate a functional transition system
fts = pono.FunctionalTransitionSystem(s)
# Create a bit-vector sorts
bvsort1 = s.make_sort(BV, 1)
bvsort8 = s.make_sort(BV, 8)
# Create the states
cfg = fts.make_statevar('cfg', bvsort1)
spec_res = fts.make_statevar('spec_res', bvsort8)
imp_res = fts.make_statevar('imp_res', bvsort8)
# Create the inputs
a = fts.make_inputvar('a', bvsort8)
b = fts.make_inputvar('b', bvsort8)
# Add logic for cfg
## Start at 0
fts.constrain_init(s.make_term(Equal, cfg, s.make_term(0, bvsort1)))
## Keeps the same value
fts.assign_next(cfg, cfg)
# Set logic for results
## they start equal
fts.constrain_init(s.make_term(Equal, spec_res, imp_res))
## spec_res is the sum: spec_res' = a + b
fts.assign_next(spec_res, s.make_term(BVAdd, a, b))
## imp_res depends on the configuration: imp_res' == (cfg == 0) ? a + b : a - b
fts.assign_next(imp_res, s.make_term(Ite,
s.make_term(Equal, cfg, s.make_term(0, bvsort1)),
s.make_term(BVAdd, a, b),
s.make_term(BVSub, a, b)))
# Create a property: spec_res == imp_res
prop = pono.Property(fts, s.make_term(Equal,
spec_res,
imp_res))
return prop
def k_induction_attempt():
# Create an smt_switch.SmtSolver with Boolector as the backend
# and no logging
s = ss.create_btor_solver(False)
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
print('\n============== Running k-induction ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create KInduction engine -- using same solver (in future can change the solver)
kind = pono.KInduction(prop, s)
res = kind.check_until(20)
print(res)
assert res is None, "Expecting k-induction not to prove property in 20 steps"
print("KInduction returned unknown")
def interpolant_attempt():
# Create solver and interpolator using MathSAT
# and no logging for the solver
s = ss.create_msat_solver(False)
itp = ss.create_msat_interpolator()
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
print('\n============== Running Interpolant-based Model Checking ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create InterpolantMC engine
itpmc = pono.InterpolantMC(prop, s, itp)
res = itpmc.check_until(20)
print(res)
assert res is True, "Expecting InterpolantMC to prove the property"
print("InterpolantMC returned true")
def k_induction_attempt_inductive():
# Create an smt_switch.SmtSolver with Boolector as the backend
# and no logging
s = ss.create_btor_solver(False)
s.set_opt('produce-models', 'true')
s.set_opt('incremental', 'true')
prop = build_simple_alu_fts(s)
fts = prop.transition_system
# store sets of states in a dictionary for accessing below
states = {str(sv):sv for sv in fts.statevars}
# make the property inductive manually
prop = pono.Property(fts,
s.make_term(And,
s.make_term(Equal,
states['cfg'],
s.make_term(0, s.make_sort(BV, 1))),
prop.prop))
print('\n============== Running k-induction on inductively strengthened property ==============')
print('INIT\n\t{}'.format(fts.init))
print('TRANS\n\t{}'.format(fts.trans))
print('PROP\n\t{}'.format(prop.prop))
# Create KInduction engine -- using same solver (in future can change the solver)
kind = pono.KInduction(prop, s)
res = kind.check_until(20)
print(res)
assert res is True, "Expecting k-induction to prove the inductively strengthened property"
print("KInduction returned true")
approaches = {
'kind': k_induction_attempt,
'interp': interpolant_attempt,
'kind-manual': k_induction_attempt_inductive
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Pono SimpleALU example')
parser.add_argument('approach', choices=['kind', 'interp', 'kind-manual'],
help='Select the approach: k-induction, interpolant-based,'
' or k-induction with a manually strengthened property')
parser.add_argument('-v', '--verbosity', type=int, default=0)
args = parser.parse_args()
pono.set_global_logger_verbosity(args.verbosity)
approaches[args.approach]()
|
the-stack_0_2423 | import pygame
from time import sleep
import emoji
print("{:=^70}".format("Bem-Vindo Ao Mini Jukebox"))
print("""
Escolha os Artistas ou bandas abaixo para tocar uma música:
(1) The Beatles
(2) Pink Floyd
(3) Tiny Tim
(4) Nirvana
(5) The Who
(6) Paul McCartiney
""")
opUser = int(input("Digite um número da lista: "))
if opUser == 0 or opUser > 6:
print("Numeração inválida!")
else:
print(emoji.emojize("Processando...:hourglass_flowing_sand:", use_aliases=True))
sleep(4)
print("Divita-se com esse som!")
if opUser == 1:
pygame.init()
pygame.mixer.music.load("sound/The End.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reproduzindo The Beatles :guitar: :guitar: :violin: :drum:", use_aliases=True))
sleep(200)
pygame.event.wait()
elif opUser == 2:
pygame.init()
pygame.mixer.music.load("sound/Eclips.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reporduzindo Pink Floyd :pig2: :factory: :hammer:", use_aliases=True))
sleep(200)
pygame.event.wait()
elif opUser == 3:
pygame.init()
pygame.mixer.music.load("sound/Livi.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reproduzindo Tiny Tim :violin: :tophat: :microphone:", use_aliases=True))
sleep(200)
pygame.event.wait()
elif opUser == 4:
pygame.init()
pygame.mixer.music.load("sound/About A Gir.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reporduzindo Nirvana :guitar: :guitar: :drum: :microphone:", use_aliases=True))
sleep(200)
pygame.event.wait()
elif opUser == 5:
pygame.init()
pygame.mixer.music.load("sound/Boris The Spide.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reproduzindo The Who :spider: :guitar: :microphone:", use_aliases=True))
sleep(200)
pygame.event.wait()
else:
pygame.init()
pygame.mixer.music.load("sound/Smile Away.ogg")
pygame.mixer.music.play()
print(emoji.emojize("Reproduzindo Paul McCartiney :musical_score: :violin: :guitar: :drum: :musical_keyboard:", use_aliases=True))
sleep(200)
pygame.event.wait()
|
the-stack_0_2425 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) Philipp Wagner. All rights reserved.
# Licensed under the BSD license. See LICENSE file in the project root for full license information.
import cPickle
def save_model(filename, model):
output = open(filename, 'wb')
cPickle.dump(model, output)
output.close()
def load_model(filename):
pkl_file = open(filename, 'rb')
res = cPickle.load(pkl_file)
pkl_file.close()
return res
|
the-stack_0_2426 | from typing import List
def convert_decimal_to_hex(dec_list: List[int]):
hex_list: List[hex] = list()
for dec in dec_list:
hex_list.append(hex(dec))
response_data = (
{
'isResult' : True,
'code' : 'SUCCESS',
'data' : {
'output': hex_list
}
}, 200
)
return response_data
def convert_hex_to_decimal(hex_list: List[hex]):
dec_list: List[int] = list()
for hex_element in hex_list:
dec_list.append(int(hex_element, 16))
response_data = (
{
'isResult' : True,
'code' : 'SUCCESS',
'data' : {
'output': dec_list
}
}, 200
)
return response_data |
the-stack_0_2427 | import websocket
import socket
try:
import thread
except ImportError:
import _thread as thread
import time
import json
import serial
import serial.tools.list_ports
import _thread
import logging
import time
import datetime
import serial
import serial.tools.list_ports
class MicroControllerConnection:
"""
Offers a connection to a microcontroller that can be used for data acquisition (and maybe at a later point changing parameters)
"""
def __init__(self, chromatogramid, samplerate, rheodyneswitch, portname,ws, baudrate=9600, timeout=2):
"""
:param portname:Name of the Port to be used. In Windows this could be COMX, on linux this could be a /dev/XXXX
The user running the software needs to have access to said device
:type portname:str
:param baudrate:the baud rate to be used to configure the serial port. defaults to 9600
:type baudrate:int
"""
self.chromatogram = chromatogramid
self.samplerate = samplerate
self.rheodyneswitch = rheodyneswitch
self.ws = ws
if self.isvalidportname(portname):
self.portname = portname
else:
raise ValueError("there is no serial port named " + portname)
self.baudrate = baudrate
self.timeout = timeout
self.stopacquisitionflag = False
self.prefix = self.portname.split('/')[-1]
self.prefixChannelName = True
self.serialInterface = serial.Serial(portname, baudrate, timeout=self.timeout)
def startacquisition(self):
self.stopacquisitionflag = False
self.thread = _thread.start_new_thread(self.acquisitionmethod,())
def stopacquisition(self):
self.stopacquisitionflag = True
@staticmethod
def isvalidportname(portname):
for tmp in serial.tools.list_ports.comports():
if tmp.device == portname:
return 1
return 0
def acquisitionmethod(self):
self.runNumber = 0
# Byte "x" senden, um moegliche Aktivitaeten zu stoppen
self.serialInterface.write(b"x")
# moegliche Sendereste abwarten
self.serialInterface.flushInput()
time.sleep(2)
#rest lesen und verwerfen
self.__throwRemainingBytesAway()
self.setRheodyneSwitch()
time.sleep(2)
self.__throwRemainingBytesAway()
self.__sendAquisitionMode()
time.sleep(2) # auf die ersten datensätze warten
self.__main_loop()
self.serialInterface.write(b"x")
def __sendAquisitionMode(self):
if self.samplerate == 1:
self.serialInterface.write(b"c")
elif self.chromatogram.SampleRate >= 1000:
self.serialInterface.write(b"W" + bytes(str(self.samplerate), 'ascii'))
else:
self.serialInterface.write(b"C" + bytes(str(self.samplerate), 'ascii'))
def __throwRemainingBytesAway(self):
if self.serialInterface.inWaiting() > 0:
# rest lesen und verwerfen
self.serialInterface.read(self.serialInterface.inWaiting())
def setRheodyneSwitch(self):
if self.rheodyneswitch:
self.serialInterface.write(b"m")
self.serialInterface.flushInput()
time.sleep(1.5)
res = self.serialInterface.read(1).decode("utf-8")
if res == "m":
self.serialInterface.write(b"t")
self.serialInterface.flushInput()
else:
self.serialInterface.write(b"m")
self.serialInterface.flushInput()
time.sleep(1.5)
res = self.serialInterface.read(1).decode("utf-8")
if res == "s":
self.serialInterface.write(b"t")
self.serialInterface.flushInput()
def setChromatogram(self,c):
self.chromatogram=c
def __main_loop(self):
buffer = ''
currentdatetime = 0
zyklusAlt = 1
while self.stopacquisitionflag == False:
inbuff = self.serialInterface.inWaiting()
# ab hier python3 fix, da sonst zu schnell daten gelesen werden und inWaiting immer 0 zurück gibt
if inbuff == 0:
time.sleep(0.33)
# ende python3 fix
while '\n' not in buffer:
buffer = buffer + self.serialInterface.read(1).decode("utf-8")
if '\n' in buffer: # genau dann ist eine Messreihe übertragen
zyklus, zeitInMin,uv, counts = buffer.split(',', 3)
if (int(zyklus) < int(zyklusAlt)):
zyklusAlt = 1
print("nextChromatogram")
self.runNumber = self.runNumber + 1
currentdatetime = 0
ws.send(json.dumps(
{'type':'nextChromatogram',
'chromatogram':self.chromatogram,
'runnumber':self.runNumber,
'portname':self.portname}))
zyklusAlt = int(zyklus)
# do db save here
data1 = {'type': 'data'}
data1['chromatogram'] = self.chromatogram
data1['value'] = counts.strip()
data1['datetime'] = currentdatetime
data1['channelName'] = "Counter"
if self.prefixChannelName:
data1['channelName'] = self.prefix+data1['channelName']
ws.send(json.dumps(data1))
data2 = {'type': 'data'}
data2['chromatogram'] = self.chromatogram
data2['value'] = uv
data2['datetime'] = currentdatetime
data2['channelName'] = "UV"
if self.prefixChannelName:
data2['channelName'] = self.prefix + data2['channelName']
ws.send(json.dumps(data2))
buffer = ''
currentdatetime += 1
connections = {}
def on_message(ws, message):
print(message)
text_data_json = json.loads(message)
if text_data_json['type'] == 'registrationRequest':
ports = []
for i in serial.tools.list_ports.comports():
ports.append(i.device+" - "+i.description)
response = json.dumps(
{'type': 'registration',
'ports': ports,
'fqdn': socket.getfqdn()
}
)
print("sending response:")
print(response)
ws.send(response)
if text_data_json['type'] == 'registrationResult':
print("Registration Result:")
print(text_data_json['message'])
if text_data_json['type'] == 'hplc.stopMeasurement':
print("hplc.stopMeasurement")
if text_data_json['port'] in connections:
connections[text_data_json['port']].stopacquisition()
connections.pop(text_data_json['port'])
if text_data_json['type'] == 'hplc.startMeasurement':
print("hplc.startMeasurement")
portname = text_data_json['port']
baudrate = text_data_json['baudrate']
con = MicroControllerConnection(text_data_json['id'],
text_data_json['samplerate'],
text_data_json['rheodyneswitch'],
portname,
ws,
baudrate,
2)
connections[portname] = con
con.startacquisition()
if text_data_json['type'] == 'nextChromatogram':
print("nextChromatogram ID is")
print(text_data_json['message']['id'])
print(connections[text_data_json['message']['portname']].chromatogram)
connections[text_data_json['message']['portname']].setChromatogram(int(text_data_json['message']['id']))
print(connections[text_data_json['message']['portname']].chromatogram)
def on_error(ws, error):
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
def run(*args):
while True:
time.sleep(1)
#ws.close()
print("thread terminating...")
thread.start_new_thread(run, ())
if __name__ == "__main__":
while True:
time.sleep(1)
try:
websocket.enableTrace(False)
ws = websocket.WebSocketApp("ws://hplc.inc-forschung.kfa-juelich.de/ws/JuHPLC/ThinClient/",
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
except ex:
print("exception, restarting connection to server")
print(ex) |
the-stack_0_2429 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from utilities.models import *
from honeycomb import *
@db_session
def load_statistics(statistic):
"""This function takes a string as parameter and returns a certain value, which is then displayed in the statistics
bar on the url-settings page.
"""
if statistic == 'total':
return select(p for p in Url).count()
elif statistic == 'scanned':
total = select(p for p in Url).count()
if total != 0:
scanned = select(p for p in Url if p.date_scanned is not None).count()
percentage = int(scanned/total*100)
else:
percentage = 0
return '{}%'.format(percentage)
elif statistic == 'scraped':
total = select(p for p in Url).count()
if total != 0:
scraped = select(p for p in Url if p.date_scraped is not None).count()
percentage = int(scraped/total*100)
else:
percentage = 0
return '{}%'.format(percentage)
# Creating a dataframe and filling it with one row: No data loaded.
df = pd.DataFrame(columns=['URL',
'Date Added',
'Date Scan',
'Date Scrape',
'Priority Scrape',
'Priority Scan'])
df = df.append({'URL': 'No data loaded'}, ignore_index=True)
# Defining the lay-out of this page.
layout = html.Div([
html.H3('URL Settings',
style={'text-align': 'center'}),
html.P('''On this page, you are able to add URLs to the database which will automatically receive
a priority flag. The statistics are refreshed every 30 seconds.''',
style={'width': 380,
'marginLeft': 'auto',
'marginRight': 'auto',
'textAlign': 'center',
'marginBottom': 10}),
html.Div([
html.Div([
html.Div(children=load_statistics('total'),
id='UrlStatisticsBox1',
className='statisticsBox'),
html.Div(children='Total',
className='title'),
html.Div(children='Amount of URLs in the database',
className='description')
], className='statisticsWrapper'),
html.Div([
html.Div(children=load_statistics('scanned'),
className='statisticsBox',
id='UrlStatisticsBox2'),
html.Div(children='Scanned',
className='title'),
html.Div(children='Percentage of scanned URLs',
className='description')
], className='statisticsWrapper'),
html.Div([
html.Div(children=load_statistics('scraped'),
className='statisticsBox',
id='UrlStatisticsBox3'),
html.Div(children='Scraped',
className='title'),
html.Div(children='Percentage of scraped URLs',
className='description')
], className='statisticsWrapper'),
html.Button('Refresh statistics',
id='refresh-url-statistics',
className='refresh_button')
], className='statisticsRow'),
html.Button('Load table',
id='reload-button',
style={'marginLeft': 20,
'float': 'right'}),
html.Div([
dcc.Input(id='input-box',
type='text',
style={'width': 480},
placeholder='URL which need to be added to the database.'),
html.Button('Submit',
id='urlsubmit',
style={'marginLeft': 20}),
html.Br(),
html.Br(),
html.Div(id='output-container-button')
]),
html.Br(),
html.Div(
dt.DataTable(
rows=df.to_dict('records'),
sortable=True,
id='url-table')
),
])
|
the-stack_0_2430 | # (C) 2015 by Mareike Picklum ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import sys
from dnutils import logs
from prac.core.base import PRAC
from prac.core.inference import PRACInference
from prac.gui import PRACQueryGUI, DEFAULT_CONFIG
from prac.pracutils.utils import prac_heading
from pracmln.mln.util import headline
from pracmln.utils.project import PRACMLNConfig
logger = logs.getlogger(__name__)
try:
from pymongo import MongoClient
except ImportError:
logger.warning('MongoDB modules cannot be used.')
def are_requirements_set_to_load_module(module_name):
if module_name == 'role_look_up' or module_name == 'complex_achieved_by':
if 'pymongo' in sys.modules:
client = MongoClient()
try:
database_name_list = client.database_names()
if 'prac' in database_name_list:
database = client.prac
collections = database.collection_names()
if module_name == 'role_look_up':
if 'howtos' in collections:
return True
else:
print('"Role look up" module needs a "Frames" collection.')
return False
elif module_name == 'complex_achieved_by':
if 'howtos' in collections:
return True
else:
print('"Complex achieved by module" needs a "Instructions" collection.')
return False
else:
print('No PRAC database is stored at local MongoDB server instance.')
return False
except:
print('No local MongoDB server instance is running.')
return False
#IsCollection available
else:
return False
return True
def main():
logger.level = logs.DEBUG
usage = 'PRAC Query Tool'
parser = argparse.ArgumentParser(description=usage)
parser.add_argument("instruction", help="The instruction.")
parser.add_argument("-i", "--interactive", dest="interactive", default=False, action='store_true', help="Starts PRAC inference with an interactive GUI tool.")
parser.add_argument("-v", "--verbose", dest="verbose", default=1, type=int, action="store", help="Set verbosity level {0..3}. Default is 1.")
args = parser.parse_args()
opts_ = vars(args)
sentences = args.instruction
prac = PRAC()
prac.verbose = args.verbose
conf = PRACMLNConfig(DEFAULT_CONFIG)
if args.interactive: # use the GUI
from tkinter import Tk
root = Tk()
# in case we have natural-language parameters, parse them
infer = PRACInference(prac, sentences)
if len(sentences) > 0:
# module = prac.module('nl_parsing')
# prac.run(infer, module)
n = infer.runstep()
# print parsing result
for odb in n.outdbs:
odb.write()
# print input sentence
print(n.nlinstr())
#Started control structure handling
'''
cs_recognition = prac.module('cs_recognition')
prac.run(inference, cs_recognition)
dbs = inference.inference_steps[-1].output_dbs
dbs_ = []
for db in dbs:
dbs_.extend(parser.extract_multiple_action_cores(db))
inference.inference_steps[-1].output_dbs = dbs_
'''
app = PRACQueryGUI(root, infer.prac, n, conf, directory=args[0] if args else None)
root.mainloop()
exit(0)
# regular PRAC pipeline
infer = PRACInference(prac, sentences)
infer.run()
print(headline('inference results'))
print('instructions:')
for i in infer.root:
print(i)
frames = []
for step in infer.steps():
print(step.frame)
print(prac_heading('cram plans', color='blue'))
for step in infer.steps():
if hasattr(step, 'plan'):
print(step.plan)
# infer.write()
exit(0)
if __name__ == '__main__':
main()
|
the-stack_0_2432 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 10:56:33 2018
@author: barnabasnomo
"""
import numpy as np
import random
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
# Neural Network architecture
class Network(nn.Module):
def __init__(self, input_size, nb_action):
super(Network, self).__init__()
self.input_size = input_size
self.nb_action = nb_action
self.fc1 = nn.Linear(input_size, 30)
self.fc2 = nn.Linear(30, nb_action)
def forward(self, state):
x = F.relu(self.fc1(state))
q_values = self.fc2(x)
return q_values
# Experience Replay
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
# Append events to memory's capacity
def push(self, event):
self.memory.append(event)
if len(self.memory) > self.capacity:
del self.memory[0]
def sample(self, batch_size):
samples = zip(* random.sample(self.memory, batch_size))
return map(lambda x: Variable(torch.cat(x, 0)), samples)
# Create Deep Q Learning Network
class Dqn():
def __init__(self, input_size, nb_action, gamma):
self.gamma = gamma
self.reward_window = []
self.model = Network(input_size, nb_action)
self.memory = ReplayMemory(100000)
self.optimizer = optim.Adam(self.model.parameters(), lr=0.001)
self.last_state = torch.Tensor(input_size).unsqueeze(0)
self.last_action = 0
self.last_reward = 0
|
the-stack_0_2433 | import numpy as np
import openpnm as op
import openpnm.models.physics as pm
class MeniscusTest:
def setup_class(self):
np.random.seed(1)
self.net = op.network.Cubic(shape=[5, 1, 5], spacing=5e-5)
self.geo = op.geometry.SpheresAndCylinders(network=self.net,
pores=self.net.pores(),
throats=self.net.throats())
self.phase = op.phases.Water(network=self.net)
self.phys = op.physics.Standard(network=self.net,
phase=self.phase,
geometry=self.geo)
def test_toroidal_touch(self):
phys = self.phys
r_tor = 1e-6
self.geo["throat.touch_length"] = 2e-6
phys.add_model(propname="throat.tor_max",
model=pm.meniscus.purcell,
mode="max",
r_toroid=r_tor,)
phys.add_model(propname="throat.tor_touch",
model=pm.meniscus.purcell,
mode="touch",
r_toroid=r_tor,)
assert np.any(phys["throat.tor_touch"] < phys["throat.tor_max"])
def test_sinusoidal_touch(self):
phys = self.phys
self.geo["throat.amplitude"] = 5e-6
self.geo["throat.touch_length"] = 1e-6
phys.add_model(propname="throat.sin_pressure_max",
model=pm.meniscus.sinusoidal, mode="max")
phys.add_model(propname="throat.sin_pressure_touch",
model=pm.meniscus.sinusoidal,
mode="touch")
h = phys.project.check_data_health(phys)
for check in h.values():
if len(check) > 0:
assert 1 == 2
assert np.any(
(phys["throat.sin_pressure_touch"] < phys["throat.sin_pressure_max"])
)
def test_sinusoidal(self):
phys = self.phys
self.geo["throat.amplitude"] = 5e-6
phys.add_model(propname="throat.sin_pressure",
model=pm.meniscus.sinusoidal,
mode="max")
phys.add_model(propname="throat.sin_meniscus",
model=pm.meniscus.sinusoidal,
mode="men",
target_Pc=5000)
h = phys.project.check_data_health(phys)
for check in h.values():
if len(check) > 0:
assert 1 == 2
def test_toroidal(self):
phys = self.phys
r_tor = 1e-6
phys.add_model(propname="throat.purcell_pressure",
model=pm.capillary_pressure.purcell,
r_toroid=r_tor)
phys.add_model(propname="throat.tor_pressure",
model=pm.meniscus.purcell,
mode="max",
r_toroid=r_tor,
num_points=1000)
phys.add_model(propname="throat.tor_meniscus",
model=pm.meniscus.purcell,
mode="men",
r_toroid=r_tor,
target_Pc=5000)
a = np.around(phys["throat.purcell_pressure"], 10)
b = np.around(phys["throat.tor_pressure"], 10)
assert np.allclose(a, b)
h = phys.project.check_data_health(phys)
for check in h.values():
if len(check) > 0:
assert 1 == 2
def test_general_toroidal(self):
phys = self.phys
r_tor = 1e-6
phys.add_model(propname="throat.purcell_pressure",
model=pm.capillary_pressure.purcell,
r_toroid=r_tor)
phys["throat.scale_a"] = r_tor
phys["throat.scale_b"] = r_tor
phys.add_model(propname="throat.general_pressure",
model=pm.meniscus.general_toroidal,
mode="max",
num_points=1000)
a = np.around(phys["throat.purcell_pressure"], 10)
b = np.around(phys["throat.general_pressure"], 10)
assert np.allclose(a, b)
h = phys.project.check_data_health(phys)
for check in h.values():
if len(check) > 0:
assert 1 == 2
def test_exceptions(self):
phys = self.phys
r_tor = 1e-6
phys["throat.scale_a"] = r_tor
phys["throat.scale_b"] = r_tor
phys.add_model(propname="throat.elliptical_pressure",
model=pm.meniscus.general_toroidal,
mode="max",
profile_equation="elliptical",
num_points=1000)
phys.add_model(propname="throat.exception_pressure",
model=pm.meniscus.general_toroidal,
mode="max",
profile_equation="scooby-doo",
num_points=1000)
a = np.around(phys["throat.elliptical_pressure"], 10)
b = np.around(phys["throat.exception_pressure"], 10)
assert np.allclose(a, b)
phys.add_model(propname="throat.no_target_pressure",
model=pm.meniscus.general_toroidal,
mode="men",
num_points=1000)
phys.add_model(propname="throat.small_target_pressure",
model=pm.meniscus.general_toroidal,
mode="men",
target_Pc=1.0e-7,
num_points=1000)
a = np.around(phys["throat.no_target_pressure.radius"], 10)
b = np.around(phys["throat.small_target_pressure.radius"], 10)
assert np.allclose(a, b)
h = phys.project.check_data_health(phys)
for check in h.values():
if len(check) > 0:
assert 1 == 2
if __name__ == "__main__":
t = MeniscusTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith("test"):
print("running test: " + item)
t.__getattribute__(item)()
|
the-stack_0_2434 | #!/usr/bin/env python3
import numpy as np
import tensorflow as tf
import morpho_dataset
class Network:
def __init__(self, threads, seed=42):
# Create an empty graph and a session
graph = tf.Graph()
graph.seed = seed
self.session = tf.Session(graph=graph, config=tf.ConfigProto(inter_op_parallelism_threads=threads,
intra_op_parallelism_threads=threads))
def construct(self, args, source_chars, target_chars, bow, eow):
with self.session.graph.as_default():
if args.recodex:
tf.get_variable_scope().set_initializer(tf.glorot_uniform_initializer(seed=42))
# Inputs
self.sentence_lens = tf.placeholder(tf.int32, [None], name="sentence_lens")
self.source_ids = tf.placeholder(tf.int32, [None, None], name="source_ids")
self.source_seqs = tf.placeholder(tf.int32, [None, None], name="source_seqs")
self.source_seq_lens = tf.placeholder(tf.int32, [None], name="source_seq_lens")
self.target_ids = tf.placeholder(tf.int32, [None, None], name="target_ids")
self.target_seqs = tf.placeholder(tf.int32, [None, None], name="target_seqs")
self.target_seq_lens = tf.placeholder(tf.int32, [None], name="target_seq_lens")
# Append EOW after target_seqs
target_seqs = tf.reverse_sequence(self.target_seqs, self.target_seq_lens, 1)
target_seqs = tf.pad(target_seqs, [[0, 0], [1, 0]], constant_values=eow)
target_seq_lens = self.target_seq_lens + 1
target_seqs = tf.reverse_sequence(target_seqs, target_seq_lens, 1)
# Encoder
# Generate source embeddings for source chars, of shape [source_chars, args.char_dim].
source_embeddings = tf.get_variable("source_embeddings", [source_chars, args.char_dim])
# Embed the self.source_seqs using the source embeddings.
embedded_source_seqs = tf.nn.embedding_lookup(source_embeddings, self.source_seqs)
# Using a GRU with dimension args.rnn_dim, process the embedded self.source_seqs
# using forward RNN and store the resulting states into `source_states`.
__, source_states = tf.nn.dynamic_rnn(tf.nn.rnn_cell.GRUCell(args.rnn_dim),
embedded_source_seqs,
sequence_length=self.source_seq_lens,
dtype=tf.float32)
# Index the unique words using self.source_ids and self.target_id
sentence_mask = tf.sequence_mask(self.sentence_lens)
source_states = tf.boolean_mask(tf.nn.embedding_lookup(source_states, self.source_ids), sentence_mask)
source_lens = tf.boolean_mask(tf.nn.embedding_lookup(self.source_seq_lens, self.source_ids), sentence_mask)
target_seqs = tf.boolean_mask(tf.nn.embedding_lookup(target_seqs, self.target_ids), sentence_mask)
target_lens = tf.boolean_mask(tf.nn.embedding_lookup(target_seq_lens, self.target_ids), sentence_mask)
# Decoder
# Generate target embeddings for target chars, of shape [target_chars, args.char_dim].
target_embeddings = tf.get_variable("target_embeddings", [target_chars, args.char_dim])
# Embed the target_seqs using the target embeddings.
embedded_target_seqs = tf.nn.embedding_lookup(target_embeddings, target_seqs)
# Generate a decoder GRU with dimension args.rnn_dim.
decoder_rnn = tf.nn.rnn_cell.GRUCell(args.rnn_dim)
# Create a `decoder_layer` -- a fully connected layer with
# target_chars neurons used in the decoder to classify into target characters.
decoder_layer = tf.layers.Dense(target_chars)
# The DecoderTraining will be used during training. It will output logits for each
# target character.
class DecoderTraining(tf.contrib.seq2seq.Decoder):
@property
def batch_size(self): return tf.shape(source_states)[0] # Return size of the batch, using for example source_states size
@property
def output_dtype(self): return tf.float32 # Type for logits of target characters
@property
def output_size(self): return target_chars # Length of logits for every output
def initialize(self, name=None):
finished = tf.less_equal(target_lens, 0) # False if target_lens > 0, True otherwise
states = source_states # Initial decoder state to use
inputs = tf.nn.embedding_lookup(target_embeddings, tf.fill([self.batch_size], bow)) # embedded BOW characters of shape [self.batch_size]. You can use
# tf.fill to generate BOWs of appropriate size.
return finished, inputs, states
def step(self, time, inputs, states, name=None):
outputs, states = decoder_rnn(inputs, states) # Run the decoder GRU cell using inputs and states.
outputs = decoder_layer(outputs) # Apply the decoder_layer on outputs.
next_input = embedded_target_seqs[:, time] # Next input are words with index `time` in target_embedded.
finished = tf.less_equal(target_lens, time + 1) # False if target_lens > time + 1, True otherwise.
return outputs, states, next_input, finished
output_layer, _, _ = tf.contrib.seq2seq.dynamic_decode(DecoderTraining())
self.predictions_training = tf.argmax(output_layer, axis=2, output_type=tf.int32)
# The DecoderPrediction will be used during prediction. It will
# directly output the predicted target characters.
class DecoderPrediction(tf.contrib.seq2seq.Decoder):
@property
def batch_size(self): return tf.shape(source_states)[0] # Return size of the batch, using for example source_states size
@property
def output_dtype(self): return tf.int32 # Type for predicted target characters
@property
def output_size(self): return 1 # Will return just one output
def initialize(self, name=None):
finished = tf.fill([self.batch_size], False) # False of shape [self.batch_size].
states = source_states # Initial decoder state to use.
inputs = tf.nn.embedding_lookup(target_embeddings, tf.fill([self.batch_size], bow)) # embedded BOW characters of shape [self.batch_size]. You can use
# tf.fill to generate BOWs of appropriate size.
return finished, inputs, states
def step(self, time, inputs, states, name=None):
outputs, states = decoder_rnn(inputs, states) # Run the decoder GRU cell using inputs and states.
outputs = decoder_layer(outputs) # Apply the decoder_layer on outputs.
outputs = tf.argmax(outputs, output_type=tf.int32, axis=1) # Use tf.argmax to choose most probable class (supply parameter `output_type=tf.int32`).
next_input = tf.nn.embedding_lookup(target_embeddings, outputs) # Embed `outputs` using target_embeddings
finished = tf.equal(outputs, eow) # True where outputs==eow, False otherwise
return outputs, states, next_input, finished
self.predictions, _, self.prediction_lens = tf.contrib.seq2seq.dynamic_decode(
DecoderPrediction(), maximum_iterations=tf.reduce_max(source_lens) + 10)
# Training
weights = tf.sequence_mask(target_lens, dtype=tf.float32)
loss = tf.losses.sparse_softmax_cross_entropy(target_seqs, output_layer, weights=weights)
global_step = tf.train.create_global_step()
self.training = tf.train.AdamOptimizer().minimize(loss, global_step=global_step, name="training")
# Summaries
accuracy_training = tf.reduce_all(tf.logical_or(
tf.equal(self.predictions_training, target_seqs),
tf.logical_not(tf.sequence_mask(target_lens))), axis=1)
self.current_accuracy_training, self.update_accuracy_training = tf.metrics.mean(accuracy_training)
minimum_length = tf.minimum(tf.shape(self.predictions)[1], tf.shape(target_seqs)[1])
accuracy = tf.logical_and(
tf.equal(self.prediction_lens, target_lens),
tf.reduce_all(tf.logical_or(
tf.equal(self.predictions[:, :minimum_length], target_seqs[:, :minimum_length]),
tf.logical_not(tf.sequence_mask(target_lens, maxlen=minimum_length))), axis=1))
self.current_accuracy, self.update_accuracy = tf.metrics.mean(accuracy)
self.current_loss, self.update_loss = tf.metrics.mean(loss, weights=tf.reduce_sum(weights))
self.reset_metrics = tf.variables_initializer(tf.get_collection(tf.GraphKeys.METRIC_VARIABLES))
summary_writer = tf.contrib.summary.create_file_writer(args.logdir, flush_millis=10 * 1000)
self.summaries = {}
with summary_writer.as_default(), tf.contrib.summary.record_summaries_every_n_global_steps(10):
self.summaries["train"] = [tf.contrib.summary.scalar("train/loss", self.update_loss),
tf.contrib.summary.scalar("train/accuracy", self.update_accuracy_training)]
with summary_writer.as_default(), tf.contrib.summary.always_record_summaries():
for dataset in ["dev", "test"]:
self.summaries[dataset] = [tf.contrib.summary.scalar(dataset + "/loss", self.current_loss),
tf.contrib.summary.scalar(dataset + "/accuracy", self.current_accuracy)]
# Initialize variables
self.session.run(tf.global_variables_initializer())
with summary_writer.as_default():
tf.contrib.summary.initialize(session=self.session, graph=self.session.graph)
def train_epoch(self, train, batch_size):
import sys
while not train.epoch_finished():
sentence_lens, _, charseq_ids, charseqs, charseq_lens = train.next_batch(batch_size,
including_charseqs=True)
self.session.run(self.reset_metrics)
predictions, _, _ = self.session.run(
[self.predictions_training, self.training, self.summaries["train"]],
{self.sentence_lens: sentence_lens,
self.source_ids: charseq_ids[train.FORMS], self.target_ids: charseq_ids[train.LEMMAS],
self.source_seqs: charseqs[train.FORMS], self.target_seqs: charseqs[train.LEMMAS],
self.source_seq_lens: charseq_lens[train.FORMS],
self.target_seq_lens: charseq_lens[train.LEMMAS]})
form, gold_lemma, system_lemma = "", "", ""
for i in range(charseq_lens[train.FORMS][0]):
form += train.factors[train.FORMS].alphabet[charseqs[train.FORMS][0][i]]
for i in range(charseq_lens[train.LEMMAS][0]):
gold_lemma += train.factors[train.LEMMAS].alphabet[charseqs[train.LEMMAS][0][i]]
system_lemma += train.factors[train.LEMMAS].alphabet[predictions[0][i]]
print("Gold form: {}, gold lemma: {}, predicted lemma: {}".format(form, gold_lemma, system_lemma),
file=sys.stderr)
def evaluate(self, dataset_name, dataset, batch_size):
self.session.run(self.reset_metrics)
while not dataset.epoch_finished():
sentence_lens, _, charseq_ids, charseqs, charseq_lens = dataset.next_batch(batch_size,
including_charseqs=True)
self.session.run([self.update_accuracy, self.update_loss],
{self.sentence_lens: sentence_lens,
self.source_ids: charseq_ids[train.FORMS], self.target_ids: charseq_ids[train.LEMMAS],
self.source_seqs: charseqs[train.FORMS], self.target_seqs: charseqs[train.LEMMAS],
self.source_seq_lens: charseq_lens[train.FORMS],
self.target_seq_lens: charseq_lens[train.LEMMAS]})
return self.session.run([self.current_accuracy, self.summaries[dataset_name]])[0]
if __name__ == "__main__":
import argparse
import datetime
import os
import re
# Fix random seed
np.random.seed(42)
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size.")
parser.add_argument("--char_dim", default=64, type=int, help="Character embedding dimension.")
parser.add_argument("--epochs", default=10, type=int, help="Number of epochs.")
parser.add_argument("--recodex", default=False, action="store_true", help="ReCodEx mode.")
parser.add_argument("--rnn_dim", default=64, type=int, help="Dimension of the encoder and the decoder.")
parser.add_argument("--threads", default=1, type=int, help="Maximum number of threads to use.")
args = parser.parse_args()
# Create logdir name
args.logdir = "logs/{}-{}-{}".format(
os.path.basename(__file__),
datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S"),
",".join(("{}={}".format(re.sub("(.)[^_]*_?", r"\1", key), value) for key, value in sorted(vars(args).items())))
)
if not os.path.exists("logs"): os.mkdir("logs") # TF 1.6 will do this by itself
# Load the data
train = morpho_dataset.MorphoDataset("czech-cac-train.txt", max_sentences=5000)
dev = morpho_dataset.MorphoDataset("czech-cac-dev.txt", train=train, shuffle_batches=False)
# Construct the network
network = Network(threads=args.threads)
network.construct(args, len(train.factors[train.FORMS].alphabet), len(train.factors[train.LEMMAS].alphabet),
train.factors[train.LEMMAS].alphabet_map["<bow>"],
train.factors[train.LEMMAS].alphabet_map["<eow>"])
# Train
for i in range(args.epochs):
network.train_epoch(train, args.batch_size)
accuracy = network.evaluate("dev", dev, args.batch_size)
print("{:.2f}".format(100 * accuracy))
|
the-stack_0_2435 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2001, 2002, 2004, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Text Widget tests
"""
import datetime
import unittest
import doctest
from zope.component.testing import setUp, tearDown
from zope.interface.verify import verifyClass
from zope.schema import TextLine
from zope.publisher.browser import TestRequest
from zope.schema import Password
from zope.formlib.interfaces import IInputWidget
from zope.formlib.widgets import TextWidget
from zope.formlib.widgets import TextAreaWidget
from zope.formlib.widgets import BytesAreaWidget
from zope.formlib.widgets import PasswordWidget
from zope.formlib.widgets import FileWidget
from zope.formlib.widgets import IntWidget
from zope.formlib.widgets import FloatWidget
from zope.formlib.widgets import BytesWidget
from zope.formlib.widgets import ASCIIWidget
from zope.formlib.widgets import DateDisplayWidget
from zope.formlib.widgets import DatetimeDisplayWidget
from zope.formlib.widgets import URIDisplayWidget
from zope.formlib.tests.test_browserwidget import BrowserWidgetTest
from zope.formlib.tests.test_browserwidget import SimpleInputWidgetTest
from zope.formlib.tests.support import checker
class TextWidgetTest(SimpleInputWidgetTest):
"""Documents and tests the text widget.
>>> setUp()
>>> verifyClass(IInputWidget, TextWidget)
True
Converting Missing Values
-------------------------
String fields (TextLine, Text, etc.) values can be classified as one of the
following:
- Non-empty string
- Empty string
- None
Text browser widgets only support the first two types: non-empty strings
and empty strings. There's no facility to explicitly set a None value in a
text browser widget.
However, it is possible to interpret an empty string as None for some
applications. For example, when inputing a User Name, an empty string means
'the user hasn't provided a value'. In another application, an empty string
may mean 'the user has provided a value, specifically <empty string>'.
To support both modes, the text widget provides a 'convert_missing_value'
flag. When True, empty strings will be converted by the widget to the
field's 'missing_value' (None by default). This mode accommodates the
'user hasn't provided a value' scenario.
To illustrate this mode, we'll use an optional field, where missing_value
is None:
>>> field = TextLine(
... __name__='foo',
... missing_value=None,
... required=False)
The HTTP form submission contains an empty string for the field value:
>>> request = TestRequest(form={'field.foo':u''})
A text widget configured for the field, where convert_missing_value is True
(the default value)...
>>> widget = TextWidget(field, request)
>>> widget.convert_missing_value
True
will convert the form's empty string into the field's missing_value, which
is None:
>>> widget.getInputValue() is None
True
When 'convert_missing_value' is False, the text widget will not convert
an empty string to the field's missing_value. This supports the 'user has
provided a value, specifically <empty string>' mode:
>>> widget.convert_missing_value = False
>>> widget.getInputValue()
u''
>>> tearDown()
"""
_WidgetFactory = TextWidget
def testProperties(self):
self.assertEqual(self._widget.tag, 'input')
self.assertEqual(self._widget.type, 'text')
self.assertEqual(self._widget.cssClass, '')
self.assertEqual(self._widget.extra, '')
self.assertEqual(self._widget.default, '')
self.assertEqual(self._widget.displayWidth, 20)
self.assertEqual(self._widget.displayMaxWidth, '')
def testRender(self):
value = 'Foo Value'
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
'value="Foo Value"', 'size="20"')
self.verifyResult(self._widget(), check_list)
check_list = ('type="hidden"',) + check_list[1:-1]
self.verifyResult(self._widget.hidden(), check_list)
check_list = ('style="color: red"',) + check_list
self._widget.extra = 'style="color: red"'
self.verifyResult(self._widget.hidden(), check_list)
def testRenderUTF8Input(self):
value = u"☃".encode('utf-8') # results in \u2603
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
u'value="\u2603"', 'size="20"')
self.verifyResult(self._widget(), check_list)
class URIDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = URIDisplayWidget
def testProperties(self):
# check the default linkTarget
self.assertFalse(self._widget.linkTarget)
def testRender(self):
value = "uri:fake"
self._widget.setRenderedValue(value)
self.verifyResult(self._widget(), ["<a", 'href="uri:fake"'])
self._widget.linkTarget = "there"
self.verifyResult(self._widget(), ["<a", 'href="uri:fake"',
'target="there"'])
def testEmptyRenderReturnsEmptyString(self):
self._widget.setRenderedValue(None)
self.assertEqual(self._widget(), "")
self._widget.setRenderedValue('')
self.assertEqual(self._widget(), "")
class DateDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = DateDisplayWidget
expected_class = "date"
def setUp(self):
super(DateDisplayWidgetTest, self).setUp()
self._value = datetime.date(2004, 12, 0o1)
def testDefaultDisplayStyle(self):
self.assertFalse(self._widget.displayStyle)
def testRenderDefault(self):
self._widget.setRenderedValue(self._value)
self.verifyResult(self._widget(),
["<span",
'class="%s"' % self.expected_class,
"01.12.2004",
"</span"])
def testRenderShort(self):
self._widget.setRenderedValue(self._value)
self._widget.displayStyle = "short"
self.verifyResult(self._widget(),
["<span",
'class="%s"' % self.expected_class,
u"01.12.04",
"</span"])
def testRenderMedium(self):
self._widget.setRenderedValue(self._value)
self._widget.displayStyle = "medium"
self.verifyResult(self._widget(),
["<span",
'class="%s"' % self.expected_class,
u"01.12.2004",
"</span"])
def testRenderLong(self):
self._widget.setRenderedValue(self._value)
self._widget.displayStyle = "long"
self.verifyResult(self._widget(),
["<span",
'class="%s"' % self.expected_class,
u"1 \u0434\u0435\u043a\u0430\u0431\u0440\u044f"
u" 2004 \u0433.",
"</span"])
def testRenderFull(self):
self._widget.setRenderedValue(self._value)
self._widget.displayStyle = "full"
self.verifyResult(self._widget(),
["<span",
'class="%s"' % self.expected_class,
u"1 \u0434\u0435\u043a\u0430\u0431\u0440\u044f"
u" 2004 \u0433.",
"</span"])
class DatetimeDisplayWidgetTest(DateDisplayWidgetTest):
_WidgetFactory = DatetimeDisplayWidget
expected_class = "dateTime"
def setUp(self):
super(DatetimeDisplayWidgetTest, self).setUp()
self._value = datetime.datetime(2004, 12, 0o1, 14, 39, 0o1)
def testRenderDefault(self):
super(DatetimeDisplayWidgetTest, self).testRenderDefault()
self.verifyResult(self._widget(), ["14:39:01"])
def testRenderShort(self):
super(DatetimeDisplayWidgetTest, self).testRenderShort()
self.verifyResult(self._widget(), ["14:39"])
def testRenderMedium(self):
super(DatetimeDisplayWidgetTest, self).testRenderMedium()
self.verifyResult(self._widget(), ["14:39:01"])
def testRenderLong(self):
super(DatetimeDisplayWidgetTest, self).testRenderLong()
self.verifyResult(self._widget(), ["14:39:01 +000"])
def testRenderFull(self):
super(DatetimeDisplayWidgetTest, self).testRenderFull()
self.verifyResult(self._widget(), ["14:39:01 +000"])
class TextAreaDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = TextAreaWidget
# It uses the default DisplayWidget
def testRender(self):
value = u"""
texttexttexttexttexttextexttexttext\xE9\xE9\xE9\xE9\xE9\xE9\xE9\xE9\xE9
texttexttexttexttextte\xE9\xE9\xE9\xE9\xE9xttexttexttexttexttexttexttex
texttexttexttexttexttexttexttexttexttexttexttexttexttexttext
"""
self._widget.setRenderedValue(value)
self.assertTrue(value, self._widget._toFieldValue(value))
self.verifyResult(self._widget(), ["<textarea",
self._widget._toFormValue(value)])
check_list = (
('id', 'field.foo'),
('name', 'field.foo'),
# ('value', ), tested above
('cols', '60'),
('rows', '15'),
)
for a, v in check_list:
self.verifyResult(self._widget(), [a, v])
class BytesAreaDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = BytesAreaWidget
# It uses the default DisplayWidget
def testRender(self):
value = """
texttexttexttexttexttexttexttexttexttexttexttexttexttexttext
texttexttexttexttexttexttexttexttexttexttexttexttexttexttext
texttexttexttexttexttexttexttexttexttexttexttexttexttexttext
"""
self._widget.setRenderedValue(value)
self.assertTrue(value, self._widget._toFieldValue(value))
self.verifyResult(self._widget(), ["<textarea",
self._widget._toFormValue(value)])
check_list = (
('id', 'field.foo'),
('name', 'field.foo'),
# ('value', ), tested above
('cols', '60'),
('rows', '15'),
)
for a, v in check_list:
self.verifyResult(self._widget(), [a, v])
class BytesDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = BytesWidget
# It uses the BytesDisplayWidget
def testRender(self):
value = "Food Value"
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
'value="%s"' % value, 'size="20"')
self.verifyResult(self._widget(), check_list)
class ASCIIDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = ASCIIWidget
# It uses the default BytesDisplayWidget
def testRender(self):
value = "Food Value"
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
'value="%s"' % value, 'size="20"')
self.verifyResult(self._widget(), check_list)
class PasswordDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = PasswordWidget
_FieldFactory = Password
# It uses the default DisplayWidget
def testRender(self):
value = 'Foo Value'
self._widget.setRenderedValue(value)
check_list = ('type="password"', 'id="field.foo"', 'name="field.foo"',
'value=""', 'size="20"')
self.verifyResult(self._widget(), check_list)
def testUnchangedPassword(self):
# The password hasn't been set yet, so an empty string
# is regarded as an empty field.
self.assertEqual(None, self._widget._toFieldValue(''))
# Now the password has been filled in, so the empty string
# is regarded as the special value for UNCHANGED_PASSWORD.
self._widget.context.context.foo = u'existing password'
self.assertEqual(self._widget.context.UNCHANGED_PASSWORD,
self._widget._toFieldValue(''))
class FileDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = FileWidget
# It uses the default DisplayWidget
def testRender(self):
value = 'Foo Value'
self._widget.setRenderedValue(value)
check_list = ('type="file"', 'id="field.foo"', 'name="field.foo"',
'size="20"')
self.verifyResult(self._widget(), check_list)
check_list = ('type="hidden"',) + check_list[1:-1]
self.verifyResult(self._widget.hidden(), check_list)
check_list = ('style="color: red"',) + check_list
self._widget.extra = 'style="color: red"'
self.verifyResult(self._widget.hidden(), check_list)
class IntDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = IntWidget
# It uses the default DisplayWidget
def testRender(self):
value = 1
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
'size="10"', 'value="%s"' % str(value))
self.verifyResult(self._widget(), check_list)
class FloatDisplayWidgetTest(BrowserWidgetTest):
_WidgetFactory = FloatWidget
# It uses the default DisplayWidget
def testRender(self):
value = 1.2
self._widget.setRenderedValue(value)
check_list = ('type="text"', 'id="field.foo"', 'name="field.foo"',
'size="10"', 'value="%s"' % str(value))
self.verifyResult(self._widget(), check_list)
def test_w_nonrequired_and_missing_value_and_no_inout():
"""
There was a bug that caused the value attribute to be set to
'value' under these circumstances.
>>> from zope.schema import TextLine
>>> field = TextLine(__name__='foo', title=u'on',
... required=False, missing_value=u'')
>>> request = TestRequest()
>>> widget = TextWidget(field, request)
>>> def normalize(s):
... return '\\n '.join(filter(None, s.split(' ')))
>>> print(normalize( widget() ))
<input
class="textType"
id="field.foo"
name="field.foo"
size="20"
type="text"
value=""
/>
"""
def test_no_error_on_render_only():
"""This is really a test of a bug fix to SimpleInputWidget.
_error shouldn't be set due to an *internal* call to getInputValue
when rendering.
>>> from zope.schema import TextLine
>>> field = TextLine(__name__='foo')
>>> request = TestRequest(form={'field.foo': ''})
>>> widget = TextWidget(field, request)
>>> ignored = widget()
>>> str(widget.error())
u''
"""
def test_text_area_works_with_missing_value():
"""
>>> from zope.schema import Text
>>> field = Text(__name__='foo', title=u'on',
... required=False, missing_value=u'')
>>> request = TestRequest()
>>> widget = TextAreaWidget(field, request)
>>> def normalize(s):
... return '\\n '.join(filter(None, s.split(' ')))
>>> print(normalize( widget() ))
<textarea
cols="60"
id="field.foo"
name="field.foo"
rows="15"
></textarea>
>>> print(normalize( widget.hidden() ))
<input
class="hiddenType"
id="field.foo"
name="field.foo"
type="hidden"
value=""
/>
"""
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TextWidgetTest),
unittest.makeSuite(URIDisplayWidgetTest),
unittest.makeSuite(DateDisplayWidgetTest),
unittest.makeSuite(DatetimeDisplayWidgetTest),
unittest.makeSuite(TextAreaDisplayWidgetTest),
unittest.makeSuite(BytesAreaDisplayWidgetTest),
unittest.makeSuite(PasswordDisplayWidgetTest),
unittest.makeSuite(FileDisplayWidgetTest),
unittest.makeSuite(IntDisplayWidgetTest),
unittest.makeSuite(FloatDisplayWidgetTest),
unittest.makeSuite(BytesDisplayWidgetTest),
unittest.makeSuite(ASCIIDisplayWidgetTest),
doctest.DocTestSuite(checker=checker),
))
|
the-stack_0_2436 | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('image01.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
def test_create_file_in_memory(self):
"""Test the creation of a simple XlsxWriter file with image(s)."""
workbook = Workbook(self.got_filename, {'in_memory': True})
worksheet = workbook.add_worksheet()
worksheet.insert_image('E9', self.image_dir + 'red.png')
workbook.close()
self.assertExcelEqual()
|
the-stack_0_2437 | '''
Created on Mar 10, 2019
@author: Burkhard A. Meier
'''
import sys
from PyQt5 import QtWidgets, QtGui
from Section4.Designer_code.Video2_2_slots_Design import Ui_MainWindow
class RunDesignerGUI():
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
self.MainWindow = QtWidgets.QMainWindow()
self.ui = Ui_MainWindow()
self.ui.setupUi(self.MainWindow)
self.update_widgets()
self.widget_actions()
self.MainWindow.show()
sys.exit(app.exec_())
def widget_actions(self):
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("icons/new_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) # correct relative path to icon
self.ui.actionNew.setIcon(icon)
self.ui.actionNew.setShortcut('Ctrl+N')
self.ui.actionExit.setStatusTip('Click to exit the application') # use ui reference to update status bar
self.ui.actionExit.triggered.connect(self.close_GUI) # connect widget to method when triggered (clicked)
self.ui.actionExit.setShortcut('Ctrl+Q') # keyboard shortcut, window has focus
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap("icons/exit_icon.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off) # modify icon location
self.ui.actionExit.setIcon(icon1) # use: self.ui.
#-------------------------------
self.ui.pushButton.clicked.connect(self.set_label) # add functionality to second button
def set_label(self):
window_text = self.MainWindow.windowTitle()
self.ui.label.setText(window_text) # set label text to window title
def close_GUI(self):
self.MainWindow.close() # call MainWindow close method, which closes the GUI
def update_widgets(self):
self.MainWindow.setWindowTitle('PyQt5 GUI') # use: self.MainWindow
if __name__ == "__main__":
RunDesignerGUI()
|
the-stack_0_2438 | """
Various tools for extracting signal components from a fit of the amplitude
distribution
"""
from . import pdf
from .Classdef import Statfit
import numpy as np
import time
import random
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_fit
def param0(sample, method='basic'):
"""Estimate initial parameters for HK fitting
Arguments
---------
sample : sequence
amplitudes
Keywords
--------
method : string
method to compute the initial parameters
"""
if method is 'basic':
a = np.nanmean(sample)
s = np.nanstd(sample)
mu = 1.
return {'a':a, 's':s, 'mu':mu}
def lmfit(sample, fit_model='hk', bins='auto', p0 = None,
xtol=1e-4, ftol=1e-4):
"""Lmfit
Arguments
---------
sample : sequence
amplitudes between 0 and 1.
Keywords
--------
fit_model : string
name of the function (in pdf module) to use for the fit
bins : string
method to compute the bin width (inherited from numpy.histogram)
p0 : dict
Initial parameters. If None, estimated automatically.
xtol : float
??
ftol : float
??
Return
------
A Statfit Class
"""
start = time.time()
winsize = len(sample)
bad = False
#--------------------------------------------------------------------------
# Clean sample
#--------------------------------------------------------------------------
sample = np.array(sample)
sample = sample[~np.isnan(sample)]
if len(sample) == 0:
bad = True
sample = [random.random() for r in np.arange(winsize)]
#--------------------------------------------------------------------------
# Make the histogram
#--------------------------------------------------------------------------
# n, edges, patches = hist(sample, bins=bins, normed=True)
n, edges = np.histogram(sample, bins=bins, density=True)
plt.clf()
x = ((np.roll(edges, -1) + edges)/2.)[0:-1]
#--------------------------------------------------------------------------
# Initial Parameters for the fit
#--------------------------------------------------------------------------
if p0 is None:
p0 = param0(sample)
prm0 = Parameters()
# (Name, Value, Vary, Min, Max, Expr)
prm0.add('a', p0['a'], True, 0, 1, None)
prm0.add('s', p0['s'], True, 0, 1, None)
prm0.add('mu', p0['mu'], True, 0, 1000, None)
prm0.add('pt', np.average(sample)**2,None, 0, 1, 'a**2+2*s**2')
#--------------------------------------------------------------------------
# Fit
#--------------------------------------------------------------------------
pdf2use = getattr(pdf, fit_model)
# use 'lbfgs' fit if error with 'leastsq' fit
try:
p = minimize(pdf2use, prm0, args=(x, n), method='leastsq',
xtol=xtol, ftol=ftol)
except KeyboardInterrupt:
raise
except:
print('!! Error with LEASTSQ fit, use L-BFGS-B instead')
p = minimize(pdf2use, prm0, args=(x, n), method='lbfgs')
#--------------------------------------------------------------------------
# Output
#--------------------------------------------------------------------------
elapsed = time.time() - start
# Identify bad results
if bad is True:
p.success = False
# Create values dict For lmfit >0.9.0 compatibility since it is no longer
# in the minimize output
values = {}
for i in p.params.keys():
values[i] = p.params[i].value
# Results
result = Statfit(sample, pdf2use, values, p.params,
p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
p.residual, x, n, edges, bins=bins)
# result = Statfit(sample, p.userfcn, p.kws, p.values, p.params,
# p.chisqr, p.redchi, elapsed, p.nfev, p.message, p.success,
# p.residual, x, n, edges, bins=bins)
return result
|
the-stack_0_2440 | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\Push2\color_chooser.py
# Compiled at: 2018-11-30 15:48:11
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import liveobj_changed, liveobj_valid, nop
from ableton.v2.control_surface import Component
from ableton.v2.control_surface.control import ButtonControl, control_matrix
from pushbase.colors import Pulse
from pushbase.message_box_component import Messenger
from .colors import IndexedColor, Rgb, inverse_translate_color_index, translate_color_index
from .skin_default import SELECTION_PULSE_SPEED
COLOR_CHOOSER_LAYOUT = (
(10, 11, 12, 13, 14, 15, 16, 17),
(9, None, None, None, None, None, None, 18),
(8, None, None, None, None, None, None, 19),
(7, None, None, None, None, None, None, 20),
(5, None, None, None, None, None, None, 21),
(6, None, None, None, None, None, None, 22),
(4, None, None, None, None, None, None, 23),
(3, 2, 1, None, None, 25, 26, 24))
class ColorChooserComponent(Component, Messenger):
matrix = control_matrix(ButtonControl, dimensions=(8, 8))
def __init__(self, *a, **k):
super(ColorChooserComponent, self).__init__(is_enabled=False, *a, **k)
self._object = None
self._notification_ref = nop
for button in self.matrix:
row, column = button.coordinate
button.color_index = COLOR_CHOOSER_LAYOUT[row][column]
return
@property
def object(self):
return self._object
@object.setter
def object(self, obj):
if liveobj_changed(self._object, obj):
self._object = obj
if obj is None:
notification = self._notification_ref()
if notification:
notification.hide()
self.set_enabled(False)
else:
self._render_color_palette(translate_color_index(obj.color_index))
self.set_enabled(True)
self._notification_ref = self.show_notification(b'Select a color for: %s' % obj.name, notification_time=-1)
return
@matrix.pressed
def matrix(self, button):
if liveobj_valid(self.object):
if button.color_index is None:
if hasattr(self.object, b'is_auto_colored'):
self.object.is_auto_colored = True
self.show_notification(b'Color automatically enabled for: %s' % self.object.name)
else:
self.object.color_index = inverse_translate_color_index(button.color_index)
self.object = None
return
def _render_color_palette(self, selected_color_index):
for button in self.matrix:
color_index = button.color_index
if color_index is not None:
if color_index == selected_color_index:
button.color = Pulse(IndexedColor.from_push_index(color_index, shade_level=2), IndexedColor.from_push_index(color_index), SELECTION_PULSE_SPEED)
else:
button.color = IndexedColor.from_push_index(color_index)
else:
button.color = Rgb.BLACK
return |
the-stack_0_2444 | import re, datetime
from Helpers.freezable_list import FrozenDict
from pytjson.Exceptions import ParseError
class Datatype:
# Initializer, will be overriden below
TAGS = {}
isScalar = re.compile(r'^[a-z0-9]*$')
isBin = re.compile('^[01]{8}$')
isOnlyNumbers = re.compile('^\-?(0|[1-9][0-9]*)$')
isNonScalar = re.compile(r'^([A-Z][a-z0-9]*)\<(.*)\>$')
@staticmethod
def parse(tag):
if not isinstance(tag, (str, unicode)):
raise TypeError("expected String, got {}".format(type(tag)))
if tag == "O":
# Object
return Datatype.TAGS[tag]
elif Datatype.isNonScalar.match(tag):
tmp_inner = Datatype.isNonScalar.match(tag).group(2)
tmp_type = Datatype.isNonScalar.match(tag).group(1)
inner = Datatype.parse(tmp_inner)
if tmp_type == "A":
tmp = Array(inner)
else:
tmp = Datatype.TAGS[tmp_type]
return tmp
elif Datatype.isScalar.match(tag):
# Scalar
return Datatype.TAGS[tag]
else:
raise ParseError("couldn't parse tag: {}".format(repr(tag)))
@staticmethod
def identify_type(obj, is_bytes):
if type(obj) is dict:
return Datatype.TAGS["O"]
elif type(obj) is list:
t = Array(None)
return t._identify_type(obj)
elif isinstance(obj, (str)):
return Datatype.TAGS["s"]
elif type(obj) is int:
return Datatype.TAGS["i"]
elif type(obj) is float:
return Datatype.TAGS["f"]
elif isinstance(obj, datetime.datetime):
return Datatype.TAGS["t"]
elif is_bytes:
return Datatype.TAGS["b"]
else:
raise TypeError("don't know how to serialize #{obj.class} as TJSON")
def datatype_generate(self, obj):
is_bytes = False if not isinstance(obj, bytes) else True
return self.identify_type(obj, is_bytes).generate(obj)
class Scalar(Datatype):
@staticmethod
def isScalar():
return True
class NonScalar(Datatype):
def __init__(self, inner_type):
self.inner_type = inner_type
@staticmethod
def isScalar():
return False
class Number(Scalar):
pass
class Integer:
@staticmethod
def generate(int_data):
# Integers are serialized as strings to sidestep the limits of some JSON parsers
return str(int_data).encode("utf-8")
class Binary(Scalar):
pass
from datatypes.string import String
from datatypes.timestamp import Timestamp
from datatypes.float import Float
from datatypes.integer import SignedInt, UnsignedInt
from datatypes.array import Array
from datatypes.binary import Binary16, Binary32, Binary64
from datatypes.object import Object
class Datatype(Datatype):
Datatype.TAGS = FrozenDict(
O = Object(None),
b = Binary64(),
b16 = Binary16(),
b32 = Binary32(),
b64 = Binary64(),
f = Float(),
i = SignedInt(),
s = String(),
t = Timestamp(),
u = UnsignedInt()
)
|
the-stack_0_2445 | import folium
import pandas
import math
import re
data = pandas.read_excel("GVP_Volcano_List.xlsx",header = 1)
map = folium.Map(tiles="Mapbox Bright")
featureGroup = folium.FeatureGroup(name="Volcanoes")
#Debug
dumpfh = open('out.txt', 'w')
lonData = data["Latitude"]
latData = data["Longitude"]
nameData = data["Volcano Name"]
for lon, lat, name, i in zip(lonData, latData, nameData, range(0, len(nameData))):
if not math.isnan(lon) and not math.isnan(lat):
#Debug
dumpfh.write('{i}: {lon} {lat} {name}\n'.format(i=i, lon=lon, lat=lat, name=name))
dumpfh.flush()
name = re.sub("'", '', name)
featureGroup.add_child(folium.Marker(location=[lon,lat],popup=str(name),icon=folium.Icon(color="green")))
map.add_child(featureGroup)
map.save("VolcanoMap.html")
#Debug
print('finished')
|
the-stack_0_2446 | #
# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)
# Ivannikov Institute for System Programming of the Russian Academy of Sciences
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from difflib import unified_diff
from django.core.exceptions import ObjectDoesNotExist
from django.utils.translation import ugettext_lazy as _
from bridge.vars import USER_ROLES, JOB_ROLES
from bridge.utils import BridgeException
import marks.SafeUtils as SafeUtils
import marks.UnsafeUtils as UnsafeUtils
import marks.UnknownUtils as UnknownUtils
from users.models import User
from reports.models import ReportUnsafe, ReportSafe, ReportUnknown
from marks.models import MarkSafe, MarkUnsafe, MarkUnknown, MarkSafeHistory, MarkUnsafeHistory,\
SafeTag, UnsafeTag, ConvertedTraces, MarkSafeReport, MarkUnsafeReport, MarkUnknownReport
STATUS_COLOR = {
'0': '#e81919',
'1': '#FF8533',
'2': '#FF8533',
'3': '#00c600',
}
UNSAFE_COLOR = {
'0': '#cb58ec',
'1': '#e81919',
'2': '#e81919',
'3': '#FF8533',
'4': '#D11919', # Incompatible marks
'5': '#000000', # Without marks
}
SAFE_COLOR = {
'0': '#cb58ec',
'1': '#FF8533',
'2': '#e81919',
'3': '#D11919', # Incompatible marks
'4': '#000000', # Without marks
}
class MarkAccess:
def __init__(self, user, mark=None, report=None):
self.user = user
self.mark = mark
self.report = report
def can_edit(self):
if not isinstance(self.user, User):
return False
if self.user.extended.role == USER_ROLES[2][0]:
return True
if not self.mark.is_modifiable or self.mark.version == 0:
return False
if self.user.extended.role == USER_ROLES[3][0]:
return True
if isinstance(self.mark, (MarkUnsafe, MarkSafe, MarkUnknown)):
first_vers = self.mark.versions.order_by('version').first()
else:
return False
if first_vers.author == self.user:
return True
if self.mark.job is not None:
first_v = self.mark.job.versions.order_by('version').first()
if first_v.change_author == self.user:
return True
last_v = self.mark.job.versions.get(version=self.mark.job.version)
if last_v.global_role in [JOB_ROLES[2][0], JOB_ROLES[4][0]]:
return True
try:
user_role = last_v.userrole_set.get(user=self.user)
if user_role.role in [JOB_ROLES[2][0], JOB_ROLES[4][0]]:
return True
except ObjectDoesNotExist:
return False
return False
def can_create(self):
if not isinstance(self.user, User):
return False
if isinstance(self.report, (ReportUnsafe, ReportSafe, ReportUnknown)):
if self.user.extended.role in [USER_ROLES[2][0], USER_ROLES[3][0]]:
return True
first_v = self.report.root.job.versions.order_by('version').first()
if first_v.change_author == self.user:
return True
try:
last_v = self.report.root.job.versions.get(version=self.report.root.job.version)
except ObjectDoesNotExist:
return False
if last_v.global_role in [JOB_ROLES[2][0], JOB_ROLES[4][0]]:
return True
try:
user_role = last_v.userrole_set.get(user=self.user)
if user_role.role in [JOB_ROLES[2][0], JOB_ROLES[4][0]]:
return True
except ObjectDoesNotExist:
return False
elif self.user.extended.role in [USER_ROLES[2][0], USER_ROLES[3][0]]:
return True
return False
def can_delete(self):
if not isinstance(self.user, User):
return False
if self.user.extended.role == USER_ROLES[2][0]:
return True
if not self.mark.is_modifiable or self.mark.version == 0:
return False
if self.user.extended.role == USER_ROLES[3][0]:
return True
authors = list(set(v_id for v_id, in self.mark.versions.values_list('author_id') if v_id is not None))
if len(authors) == 1 and authors[0] == self.user.id:
return True
return False
def can_remove_version(self, mark_version):
if not isinstance(self.user, User) or not isinstance(self.mark, (MarkUnsafe, MarkSafe, MarkUnknown)):
return False
# Nobody can remove first or last version. Also while mark is being deleted users can't clear versions.
if mark_version.version in {1, self.mark.version} or self.mark.version == 0:
return False
# Manager can remove all other versions
if self.user.extended.role == USER_ROLES[2][0]:
return True
# Others can't remove versions if mark is frozen.
if not self.mark.is_modifiable:
return False
# Expert can remove all versions.
if self.user.extended.role == USER_ROLES[3][0]:
return True
# Others can remove version only if they are authors of it.
if mark_version.author == self.user:
return True
return False
def can_freeze(self):
if not isinstance(self.user, User):
return False
return self.user.extended.role == USER_ROLES[2][0]
class TagsInfo:
def __init__(self, mark_type, mark=None):
self.mark = mark
self.type = mark_type
self.tags_old = []
self.tags_available = []
self.__get_tags()
def __get_tags(self):
if self.type not in ['unsafe', 'safe']:
return
if isinstance(self.mark, (MarkUnsafe, MarkSafe)):
last_v = self.mark.versions.get(version=self.mark.version)
self.tags_old = list(t['tag__tag'] for t in last_v.tags.order_by('tag__tag').values('tag__tag'))
elif isinstance(self.mark, (MarkUnsafeHistory, MarkSafeHistory)):
self.tags_old = list(t['tag__tag'] for t in self.mark.tags.order_by('tag__tag').values('tag__tag'))
if self.type == 'unsafe':
table = UnsafeTag
else:
table = SafeTag
self.tags_available = list(t['tag'] for t in table.objects.values('tag') if t['tag'] not in self.tags_old)
class NewMark:
def __init__(self, user, inst, data):
self._user = user
self._data = data
self._inst = inst
self._handler = self.__get_handler()
self.changes = {}
self.mark = None
def __get_handler(self):
if isinstance(self._inst, (ReportSafe, MarkSafe)):
return SafeUtils.NewMark(self._user, self._data)
elif isinstance(self._inst, (ReportUnsafe, MarkUnsafe)):
return UnsafeUtils.NewMark(self._user, self._data)
elif isinstance(self._inst, (ReportUnknown, MarkUnknown)):
return UnknownUtils.NewMark(self._user, self._data)
else:
raise ValueError('Unsupported type: %s' % type(self._inst))
def create_mark(self):
self.mark = self._handler.create_mark(self._inst)
self.changes = self._handler.changes
def change_mark(self):
self.mark = self._handler.change_mark(self._inst)
self.changes = self._handler.changes
class CompareMarkVersions:
def __init__(self, mark_type, version1, version2):
self.type = mark_type
self.v1 = version1
self.v2 = version2
self.verdict = self.__verdict_change()
self.status = self.__status_change()
self.tags = self.__tags_change()
self.et_func = self.__et_func_change()
self.et = self.__et_change()
self.attrs = self.__attr_change()
self.unknown_func = self.__unknown_func_change()
self.problem = self.__problem_change()
def __verdict_change(self):
if self.type == 'unknown' or self.v1.verdict == self.v2.verdict:
return None
if self.type == 'safe':
return [{'title': self.v1.get_verdict_display(), 'color': SAFE_COLOR[self.v1.verdict]},
{'title': self.v2.get_verdict_display(), 'color': SAFE_COLOR[self.v2.verdict]}]
else:
return [{'title': self.v1.get_verdict_display(), 'color': UNSAFE_COLOR[self.v1.verdict]},
{'title': self.v2.get_verdict_display(), 'color': UNSAFE_COLOR[self.v2.verdict]}]
def __status_change(self):
if self.v1.status == self.v2.status:
return None
return [{'title': self.v1.get_status_display(), 'color': STATUS_COLOR[self.v1.status]},
{'title': self.v2.get_status_display(), 'color': STATUS_COLOR[self.v2.status]}]
def __tags_change(self):
if self.type == 'unknown':
return None
tags1 = set(t for t, in self.v1.tags.values_list('tag__tag'))
tags2 = set(t for t, in self.v2.tags.values_list('tag__tag'))
if tags1 == tags2:
return None
return ['; '.join(sorted(tags1)), '; '.join(sorted(tags2))]
def __et_func_change(self):
if self.type != 'unsafe' or self.v1.function_id == self.v2.function_id:
return None
return [{
'compare_name': self.v1.function.name, 'compare_desc': self.v1.function.description,
'convert_name': self.v1.function.convert.name, 'convert_desc': self.v1.function.convert.description
}, {
'compare_name': self.v2.function.name, 'compare_desc': self.v2.function.description,
'convert_name': self.v2.function.convert.name, 'convert_desc': self.v2.function.convert.description
}]
def __et_change(self):
if self.type != 'unsafe' or self.v1.error_trace_id == self.v2.error_trace_id:
return None
diff_result = []
f1 = ConvertedTraces.objects.get(id=self.v1.error_trace_id)
f2 = ConvertedTraces.objects.get(id=self.v2.error_trace_id)
with f1.file as fp1, f2.file as fp2:
for line in unified_diff(fp1.read().decode('utf8').split('\n'), fp2.read().decode('utf8').split('\n')):
diff_result.append(line)
return '\n'.join(diff_result)
def __attr_change(self):
attrs1 = set(a_id for a_id, in self.v1.attrs.filter(is_compare=True).values_list('attr_id'))
attrs2 = set(a_id for a_id, in self.v2.attrs.filter(is_compare=True).values_list('attr_id'))
if attrs1 == attrs2:
return None
return [
list((a.attr.name.name, a.attr.value) for a in self.v1.attrs.filter(is_compare=True)
.select_related('attr', 'attr__name').order_by('id')),
list((a.attr.name.name, a.attr.value) for a in self.v2.attrs.filter(is_compare=True)
.select_related('attr', 'attr__name').order_by('id'))
]
def __unknown_func_change(self):
if self.type != 'unknown':
return None
if self.v1.is_regexp == self.v2.is_regexp and self.v1.function == self.v2.function:
return None
return [{'is_regexp': self.v1.is_regexp, 'func': self.v1.function},
{'is_regexp': self.v2.is_regexp, 'func': self.v2.function}]
def __problem_change(self):
if self.type != 'unknown':
return None
if self.v1.problem_pattern == self.v2.problem_pattern and self.v1.link == self.v2.link:
return None
return [{'pattern': self.v1.problem_pattern, 'link': self.v1.link},
{'pattern': self.v2.problem_pattern, 'link': self.v2.link}]
def delete_marks(user, marks_type, mark_ids, report_id=None):
if marks_type == 'safe':
marks = MarkSafe.objects.filter(id__in=mark_ids)
elif marks_type == 'unsafe':
marks = MarkUnsafe.objects.filter(id__in=mark_ids)
elif marks_type == 'unknown':
marks = MarkUnknown.objects.filter(id__in=mark_ids)
else:
raise ValueError('Unsupported marks type: %s' % marks_type)
if not all(MarkAccess(user, mark=mark).can_delete() for mark in marks):
if len(marks) > 1:
raise BridgeException(_("You can't delete one of the selected marks"))
elif len(marks) == 1:
raise BridgeException(_("You don't have an access to delete this mark"))
else:
raise BridgeException(_('Nothing to delete'))
if marks_type == 'safe':
SafeUtils.delete_marks(marks)
reports_model = ReportSafe
elif marks_type == 'unsafe':
UnsafeUtils.delete_marks(marks)
reports_model = ReportUnsafe
else:
UnknownUtils.delete_marks(marks)
reports_model = ReportUnknown
if report_id:
try:
report = reports_model.objects.get(id=report_id)
except ObjectDoesNotExist:
return None
return report.id if not isinstance(report, ReportUnsafe) else report.trace_id
class DownloadTags:
def __init__(self, tags_type):
self._type = tags_type
self._data = self.__get_tags_data()
def __iter__(self):
yield self._data
def file_size(self):
return len(self._data)
def __get_tags_data(self):
if self._type == 'safe':
tags_model = SafeTag
elif self._type == 'unsafe':
tags_model = UnsafeTag
else:
return b''
tags_data = []
for tag in tags_model.objects.all():
tag_data = {'name': tag.tag, 'description': tag.description}
if tag.parent is not None:
tag_data['parent'] = tag.parent.tag
tags_data.append(tag_data)
return json.dumps(tags_data, ensure_ascii=False, sort_keys=True, indent=4).encode('utf8')
class UpdateAssociationCache:
def __init__(self, association, recalc):
self._association = association
self._recalc = recalc
self.__update()
def __update(self):
if isinstance(self._association, MarkSafeReport):
self.__update_cache(SafeUtils)
elif isinstance(self._association, MarkUnsafeReport):
self.__update_cache(UnsafeUtils)
elif isinstance(self._association, MarkUnknownReport) and self._recalc:
UnknownUtils.update_unknowns_cache([self._association.report])
def __update_cache(self, leaf_lib):
if self._recalc:
changes = leaf_lib.UpdateVerdicts({self._association.mark_id: {
self._association.report: {'kind': '=', 'verdict1': self._association.report.verdict}
}}).changes.get(self._association.mark_id, {})
leaf_lib.RecalculateTags(list(changes))
leaf_lib.update_confirmed_cache([self._association.report])
|
the-stack_0_2447 | import abc
import functools
import logging
import pkg_resources
import six
import textwrap
from lymph.exceptions import Timeout, LookupFailure
logger = logging.getLogger(__name__)
docstring_format_vars = {k: textwrap.dedent(v).strip() for k, v in six.iteritems({
'COMMON_OPTIONS': """
Common Options:
--config=<file>, -c <file> Load configuration from the given path.
--help, -h Print this help message and exit.
--logfile=<file> Redirect log output to the given file.
--loglevel=<level> Set the log level to one of DEBUG, INFO, WARNING,
ERROR. [default: WARNING]
--version Show the lymph version and exit.
--color Force colored output.
--no-color Disable colored output.
--vars=<file> Load environment variables from the given path.
""",
'INSTANCE_OPTIONS': """
Instance Options:
--isolated, -i Don't register this service.
--port=<port>, -p <port> Use this port for the RPC endpoint.
--ip=<address> Use this IP for all sockets.
--guess-external-ip, -g Guess the public facing IP of this machine and
use it instead of the provided address.
--reload Automatically stop the service when imported
python files in the current working directory
change. The process will be restarted by the
node. Do not use this in production.
""",
})}
def format_docstring(doc):
return textwrap.dedent(doc).format(**docstring_format_vars).strip()
@six.add_metaclass(abc.ABCMeta)
class Command(object):
needs_config = True
short_description = ''
def __init__(self, args, config, terminal):
self.args = args
self.config = config
self.terminal = terminal
@classmethod
def get_help(cls):
return format_docstring(cls.__doc__)
@abc.abstractmethod
def run(self):
raise NotImplementedError
_command_class_cache = None
def get_command_classes():
global _command_class_cache
if _command_class_cache is None:
_command_class_cache, entry_points = {}, {}
for entry_point in pkg_resources.iter_entry_points('lymph.cli'):
name = entry_point.name
if name in entry_points:
logger.error('ignoring duplicate command definition for %s (already installed: %s)', entry_point, entry_points[name])
continue
entry_points[name] = entry_point
cls = entry_point.load()
cls.name = name
_command_class_cache[name] = cls
return _command_class_cache
def get_command_class(name):
return get_command_classes()[name]
def handle_request_errors(func):
@functools.wraps(func)
def decorated(*args, **kwargs):
try:
func(*args, **kwargs)
except LookupFailure as e:
logger.error("The specified service name could not be found: %s: %s" % (type(e).__name__, e))
return 1
except Timeout:
logger.error("The request timed out. Either the service is not available or busy.")
return 1
return decorated
|
the-stack_0_2449 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import warnings
from almost import Approximate
from pytest import deprecated_call, raises
from conftest import various_backends
import trueskill as t
from trueskill import (
quality, quality_1vs1, rate, rate_1vs1, Rating, setup, TrueSkill)
warnings.simplefilter('always')
inf = float('inf')
nan = float('nan')
class almost(Approximate):
def normalize(self, value):
if isinstance(value, Rating):
return self.normalize(tuple(value))
elif isinstance(value, list):
try:
if isinstance(value[0][0], Rating):
# flatten transformed ratings
return list(sum(value, ()))
except (TypeError, IndexError):
pass
return super(almost, self).normalize(value)
@classmethod
def wrap(cls, f, *args, **kwargs):
return lambda *a, **k: cls(f(*a, **k), *args, **kwargs)
_rate = almost.wrap(rate)
_rate_1vs1 = almost.wrap(rate_1vs1)
_quality = almost.wrap(quality)
_quality_1vs1 = almost.wrap(quality_1vs1)
# usage
def test_compatibility_with_another_rating_systems():
"""All rating system modules should implement ``rate_1vs1`` and
``quality_1vs1`` to provide shortcuts for 1 vs 1 simple competition games.
"""
r1, r2 = Rating(30, 3), Rating(20, 2)
assert quality_1vs1(r1, r2) == quality([(r1,), (r2,)])
rated = rate([(r1,), (r2,)])
assert rate_1vs1(r1, r2) == (rated[0][0], rated[1][0])
rated = rate([(r1,), (r2,)], [0, 0])
assert rate_1vs1(r1, r2, drawn=True) == (rated[0][0], rated[1][0])
def test_compare_ratings():
assert Rating(1, 2) == Rating(1, 2)
assert Rating(1, 2) != Rating(1, 3)
assert Rating(2, 2) > Rating(1, 2)
assert Rating(3, 2) >= Rating(1, 2)
assert Rating(0, 2) < Rating(1, 2)
assert Rating(-1, 2) <= Rating(1, 2)
def test_rating_to_number():
assert int(Rating(1, 2)) == 1
assert float(Rating(1.1, 2)) == 1.1
assert complex(Rating(1.2, 2)) == 1.2 + 0j
try:
assert long(Rating(1, 2)) == long(1)
except NameError:
# Python 3 doesn't have `long` anymore
pass
def test_unsorted_groups():
t1, t2, t3 = generate_teams([1, 1, 1])
rated = rate([t1, t2, t3], [2, 1, 0])
assert almost(rated) == \
[(18.325, 6.656), (25.000, 6.208), (31.675, 6.656)]
def test_custom_environment():
env = TrueSkill(draw_probability=.50)
t1, t2 = generate_teams([1, 1], env=env)
rated = env.rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
def test_setup_global_environment():
try:
setup(draw_probability=.50)
t1, t2 = generate_teams([1, 1])
rated = rate([t1, t2])
assert almost(rated) == [(30.267, 7.077), (19.733, 7.077)]
finally:
# rollback
setup()
def test_invalid_rating_groups():
env = TrueSkill()
with raises(ValueError):
env.validate_rating_groups([])
with raises(ValueError):
env.validate_rating_groups([()])
# need multiple groups not just one
with raises(ValueError):
env.validate_rating_groups([(Rating(),)])
# empty group is not allowed
with raises(ValueError):
env.validate_rating_groups([(Rating(),), ()])
# all groups should be same structure
with raises(TypeError):
env.validate_rating_groups([(Rating(),), {0: Rating()}])
def test_deprecated_methods():
env = TrueSkill()
r1, r2, r3 = Rating(), Rating(), Rating()
deprecated_call(t.transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(t.match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.Rating)
deprecated_call(env.transform_ratings, [(r1,), (r2,), (r3,)])
deprecated_call(env.match_quality, [(r1,), (r2,), (r3,)])
deprecated_call(env.rate_1vs1, r1, r2)
deprecated_call(env.quality_1vs1, r1, r2)
deprecated_call(lambda: Rating().exposure)
dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
deprecated_call(dyn.rate, [(r1,), (r2,)])
def test_deprecated_individual_rating_groups():
r1, r2, r3 = Rating(50, 1), Rating(10, 5), Rating(15, 5)
with raises(TypeError):
deprecated_call(rate, [r1, r2, r3])
with raises(TypeError):
deprecated_call(quality, [r1, r2, r3])
assert t.transform_ratings([r1, r2, r3]) == rate([(r1,), (r2,), (r3,)])
assert t.match_quality([r1, r2, r3]) == quality([(r1,), (r2,), (r3,)])
deprecated_call(t.transform_ratings, [r1, r2, r3])
deprecated_call(t.match_quality, [r1, r2, r3])
def test_rating_tuples():
r1, r2, r3 = Rating(), Rating(), Rating()
rated = rate([(r1, r2), (r3,)])
assert len(rated) == 2
assert isinstance(rated[0], tuple)
assert isinstance(rated[1], tuple)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert isinstance(rated[0][0], Rating)
def test_rating_dicts():
class Player(object):
def __init__(self, name, rating, team):
self.name = name
self.rating = rating
self.team = team
p1 = Player('Player A', Rating(), 0)
p2 = Player('Player B', Rating(), 0)
p3 = Player('Player C', Rating(), 1)
rated = rate([{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}])
assert len(rated) == 2
assert isinstance(rated[0], dict)
assert isinstance(rated[1], dict)
assert len(rated[0]) == 2
assert len(rated[1]) == 1
assert p1 in rated[0]
assert p2 in rated[0]
assert p3 in rated[1]
assert p1 not in rated[1]
assert p2 not in rated[1]
assert p3 not in rated[0]
assert isinstance(rated[0][p1], Rating)
p1.rating = rated[p1.team][p1]
p2.rating = rated[p2.team][p2]
p3.rating = rated[p3.team][p3]
def test_dont_use_0_for_min_delta():
with raises(ValueError):
rate([(Rating(),), (Rating(),)], min_delta=0)
def test_list_instead_of_tuple():
r1, r2 = Rating(), Rating()
assert rate([[r1], [r2]]) == rate([(r1,), (r2,)])
assert quality([[r1], [r2]]) == quality([(r1,), (r2,)])
def test_backend():
env = TrueSkill(backend=(NotImplemented, NotImplemented, NotImplemented))
with raises(TypeError):
env.rate_1vs1(Rating(), Rating())
with raises(ValueError):
# '__not_defined__' backend is not defined
TrueSkill(backend='__not_defined__')
# algorithm
def generate_teams(sizes, env=None):
rating_cls = Rating if env is None else env.create_rating
rating_groups = []
for size in sizes:
ratings = []
for x in range(size):
ratings.append(rating_cls())
rating_groups.append(tuple(ratings))
return rating_groups
def generate_individual(size, env=None):
return generate_teams([1] * size, env=env)
@various_backends
def test_n_vs_n():
# 1 vs 1
t1, t2 = generate_teams([1, 1])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == [(29.396, 7.171), (20.604, 7.171)]
assert _rate([t1, t2], [0, 0]) == [(25.000, 6.458), (25.000, 6.458)]
# 2 vs 2
t1, t2 = generate_teams([2, 2])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(28.108, 7.774), (28.108, 7.774), (21.892, 7.774), (21.892, 7.774)]
assert _rate([t1, t2], [0, 0]) == \
[(25.000, 7.455), (25.000, 7.455), (25.000, 7.455), (25.000, 7.455)]
# 4 vs 4
t1, t2 = generate_teams([4, 4])
assert _quality([t1, t2]) == 0.447
assert _rate([t1, t2]) == \
[(27.198, 8.059), (27.198, 8.059), (27.198, 8.059), (27.198, 8.059),
(22.802, 8.059), (22.802, 8.059), (22.802, 8.059), (22.802, 8.059)]
@various_backends
def test_1_vs_n():
t1, = generate_teams([1])
# 1 vs 2
t2, = generate_teams([2])
assert _quality([t1, t2]) == 0.135
assert _rate([t1, t2]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], [0, 0]) == \
[(31.660, 7.138), (18.340, 7.138), (18.340, 7.138)]
# 1 vs 3
t2, = generate_teams([3])
assert _quality([t1, t2]) == 0.012
assert _rate([t1, t2]) == \
[(36.337, 7.527), (13.663, 7.527), (13.663, 7.527), (13.663, 7.527)]
assert almost(rate([t1, t2], [0, 0]), 2) == \
[(34.990, 7.455), (15.010, 7.455), (15.010, 7.455), (15.010, 7.455)]
# 1 vs 7
t2, = generate_teams([7])
assert _quality([t1, t2]) == 0
assert _rate([t1, t2]) == \
[(40.582, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917),
(9.418, 7.917), (9.418, 7.917), (9.418, 7.917), (9.418, 7.917)]
@various_backends
def test_individual():
# 3 players
players = generate_individual(3)
assert _quality(players) == 0.200
assert _rate(players) == \
[(31.675, 6.656), (25.000, 6.208), (18.325, 6.656)]
assert _rate(players, [0] * 3) == \
[(25.000, 5.698), (25.000, 5.695), (25.000, 5.698)]
# 4 players
players = generate_individual(4)
assert _quality(players) == 0.089
assert _rate(players) == \
[(33.207, 6.348), (27.401, 5.787), (22.599, 5.787), (16.793, 6.348)]
# 5 players
players = generate_individual(5)
assert _quality(players) == 0.040
assert _rate(players) == \
[(34.363, 6.136), (29.058, 5.536), (25.000, 5.420), (20.942, 5.536),
(15.637, 6.136)]
# 8 players
players = generate_individual(8)
assert _quality(players) == 0.004
assert _rate(players, [0] * 8) == \
[(25.000, 4.592), (25.000, 4.583), (25.000, 4.576), (25.000, 4.573),
(25.000, 4.573), (25.000, 4.576), (25.000, 4.583), (25.000, 4.592)]
# 16 players
players = generate_individual(16)
assert _rate(players) == \
[(40.539, 5.276), (36.810, 4.711), (34.347, 4.524), (32.336, 4.433),
(30.550, 4.380), (28.893, 4.349), (27.310, 4.330), (25.766, 4.322),
(24.234, 4.322), (22.690, 4.330), (21.107, 4.349), (19.450, 4.380),
(17.664, 4.433), (15.653, 4.524), (13.190, 4.711), (9.461, 5.276)]
@various_backends
def test_multiple_teams():
# 2 vs 4 vs 2
t1 = (Rating(40, 4), Rating(45, 3))
t2 = (Rating(20, 7), Rating(19, 6), Rating(30, 9), Rating(10, 4))
t3 = (Rating(50, 5), Rating(30, 2))
assert _quality([t1, t2, t3]) == 0.367
assert _rate([t1, t2, t3], [0, 1, 1]) == \
[(40.877, 3.840), (45.493, 2.934), (19.609, 6.396), (18.712, 5.625),
(29.353, 7.673), (9.872, 3.891), (48.830, 4.590), (29.813, 1.976)]
# 1 vs 2 vs 1
t1 = (Rating(),)
t2 = (Rating(), Rating())
t3 = (Rating(),)
assert _quality([t1, t2, t3]) == 0.047
@various_backends
def test_upset():
# 1 vs 1
t1, t2 = (Rating(),), (Rating(50, 12.5),)
assert _quality([t1, t2]) == 0.110
assert _rate([t1, t2], [0, 0]) == [(31.662, 7.137), (35.010, 7.910)]
# 2 vs 2
t1 = (Rating(20, 8), Rating(25, 6))
t2 = (Rating(35, 7), Rating(40, 5))
assert _quality([t1, t2]) == 0.084
assert _rate([t1, t2]) == \
[(29.698, 7.008), (30.455, 5.594), (27.575, 6.346), (36.211, 4.768)]
# 3 vs 2
t1 = (Rating(28, 7), Rating(27, 6), Rating(26, 5))
t2 = (Rating(30, 4), Rating(31, 3))
assert _quality([t1, t2]) == 0.254
assert _rate([t1, t2], [0, 1]) == \
[(28.658, 6.770), (27.484, 5.856), (26.336, 4.917), (29.785, 3.958),
(30.879, 2.983)]
assert _rate([t1, t2], [1, 0]) == \
[(21.840, 6.314), (22.474, 5.575), (22.857, 4.757), (32.012, 3.877),
(32.132, 2.949)]
# 8 players
players = [(Rating(10, 8),), (Rating(15, 7),), (Rating(20, 6),),
(Rating(25, 5),), (Rating(30, 4),), (Rating(35, 3),),
(Rating(40, 2),), (Rating(45, 1),)]
assert _quality(players) == 0.000
assert _rate(players) == \
[(35.135, 4.506), (32.585, 4.037), (31.329, 3.756), (30.984, 3.453),
(31.751, 3.064), (34.051, 2.541), (38.263, 1.849), (44.118, 0.983)]
@various_backends
def test_partial_play():
t1, t2 = (Rating(),), (Rating(), Rating())
# each results from C# Skills:
assert rate([t1, t2], weights=[(1,), (1, 1)]) == rate([t1, t2])
assert _rate([t1, t2], weights=[(1,), (1, 1)]) == \
[(33.730, 7.317), (16.270, 7.317), (16.270, 7.317)]
assert _rate([t1, t2], weights=[(0.5,), (0.5, 0.5)]) == \
[(33.939, 7.312), (16.061, 7.312), (16.061, 7.312)]
assert _rate([t1, t2], weights=[(1,), (0, 1)]) == \
[(29.440, 7.166), (25.000, 8.333), (20.560, 7.166)]
assert _rate([t1, t2], weights=[(1,), (0.5, 1)]) == \
[(32.417, 7.056), (21.291, 8.033), (17.583, 7.056)]
# match quality of partial play
t1, t2, t3 = (Rating(),), (Rating(), Rating()), (Rating(),)
assert _quality([t1, t2, t3], [(1,), (0.25, 0.75), (1,)]) == 0.2
assert _quality([t1, t2, t3], [(1,), (0.8, 0.9), (1,)]) == 0.0809
@various_backends
def test_partial_play_with_weights_dict():
t1, t2 = (Rating(),), (Rating(), Rating())
assert rate([t1, t2], weights={(0, 0): 0.5, (1, 0): 0.5, (1, 1): 0.5}) == \
rate([t1, t2], weights=[[0.5], [0.5, 0.5]])
assert rate([t1, t2], weights={(1, 0): 0}) == \
rate([t1, t2], weights=[[1], [0, 1]])
assert rate([t1, t2], weights={(1, 0): 0.5}) == \
rate([t1, t2], weights=[[1], [0.5, 1]])
@various_backends
def test_microsoft_research_example():
# http://research.microsoft.com/en-us/projects/trueskill/details.aspx
alice, bob, chris, darren, eve, fabien, george, hillary = \
Rating(), Rating(), Rating(), Rating(), \
Rating(), Rating(), Rating(), Rating()
_rated = rate([{'alice': alice}, {'bob': bob}, {'chris': chris},
{'darren': darren}, {'eve': eve}, {'fabien': fabien},
{'george': george}, {'hillary': hillary}])
rated = {}
list(map(rated.update, _rated))
assert almost(rated['alice']) == (36.771, 5.749)
assert almost(rated['bob']) == (32.242, 5.133)
assert almost(rated['chris']) == (29.074, 4.943)
assert almost(rated['darren']) == (26.322, 4.874)
assert almost(rated['eve']) == (23.678, 4.874)
assert almost(rated['fabien']) == (20.926, 4.943)
assert almost(rated['george']) == (17.758, 5.133)
assert almost(rated['hillary']) == (13.229, 5.749)
@various_backends
def test_dynamic_draw_probability():
from trueskillhelpers import calc_dynamic_draw_probability as calc
def assert_predictable_draw_probability(r1, r2, drawn=False):
dyn = TrueSkill(draw_probability=t.dynamic_draw_probability)
sta = TrueSkill(draw_probability=calc((r1,), (r2,), dyn))
assert dyn.rate_1vs1(r1, r2, drawn) == sta.rate_1vs1(r1, r2, drawn)
assert_predictable_draw_probability(Rating(100), Rating(10))
assert_predictable_draw_probability(Rating(10), Rating(100))
assert_predictable_draw_probability(Rating(10), Rating(100), drawn=True)
assert_predictable_draw_probability(Rating(25), Rating(25))
assert_predictable_draw_probability(Rating(25), Rating(25), drawn=True)
assert_predictable_draw_probability(Rating(-25), Rating(125))
assert_predictable_draw_probability(Rating(125), Rating(-25))
assert_predictable_draw_probability(Rating(-25), Rating(125), drawn=True)
assert_predictable_draw_probability(Rating(25, 10), Rating(25, 0.1))
# functions
@various_backends
def test_exposure():
env = TrueSkill()
assert env.expose(env.create_rating()) == 0
env = TrueSkill(1000, 200)
assert env.expose(env.create_rating()) == 0
# mathematics
def test_valid_gaussian():
from trueskill.mathematics import Gaussian
with raises(TypeError): # sigma argument is needed
Gaussian(0)
with raises(ValueError): # sigma**2 should be greater than 0
Gaussian(0, 0)
def test_valid_matrix():
from trueskill.mathematics import Matrix
with raises(TypeError): # src must be a list or dict or callable
Matrix(None)
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([])
with raises(ValueError): # src must be a rectangular array of numbers
Matrix([[1, 2, 3], [4, 5]])
with raises(TypeError):
# A callable src must return an interable which generates a tuple
# containing coordinate and value
Matrix(lambda: None)
def test_matrix_from_dict():
from trueskill.mathematics import Matrix
mat = Matrix({(0, 0): 1, (4, 9): 1})
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_from_item_generator():
from trueskill.mathematics import Matrix
def gen_matrix(height, width):
yield (0, 0), 1
yield (height - 1, width - 1), 1
mat = Matrix(gen_matrix, 5, 10)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
with raises(TypeError):
# A callable src must call set_height and set_width if the size is
# non-deterministic
Matrix(gen_matrix)
def gen_and_set_size_matrix(set_height, set_width):
set_height(5)
set_width(10)
return [((0, 0), 1), ((4, 9), 1)]
mat = Matrix(gen_and_set_size_matrix)
assert mat.height == 5
assert mat.width == 10
assert mat[0][0] == 1
assert mat[0][1] == 0
assert mat[4][9] == 1
assert mat[4][8] == 0
def test_matrix_operations():
from trueskill.mathematics import Matrix
assert Matrix([[1, 2], [3, 4]]).inverse() == \
Matrix([[-2.0, 1.0], [1.5, -0.5]])
assert Matrix([[1, 2], [3, 4]]).determinant() == -2
assert Matrix([[1, 2], [3, 4]]).adjugate() == Matrix([[4, -2], [-3, 1]])
with raises(ValueError): # Bad size
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6]])
assert Matrix([[1, 2], [3, 4]]) * Matrix([[5, 6, 7], [8, 9, 10]]) == \
Matrix([[21, 24, 27], [47, 54, 61]])
with raises(ValueError): # Must be same size
Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6, 7], [8, 9, 10]])
assert Matrix([[1, 2], [3, 4]]) + Matrix([[5, 6], [7, 8]]) == \
Matrix([[6, 8], [10, 12]])
# reported bugs
@various_backends
def test_issue3():
"""The `issue #3`_, opened by @youknowone.
These inputs led to ZeroDivisionError before 0.1.4. Also another TrueSkill
implementations cannot calculate this case.
.. _issue #3: https://github.com/sublee/trueskill/issues/3
"""
# @konikos's case 1
t1 = (Rating(42.234, 3.728), Rating(43.290, 3.842))
t2 = (Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500),
Rating(16.667, 0.500), Rating(16.667, 0.500), Rating(16.667, 0.500))
rate([t1, t2], [6, 5])
# @konikos's case 2
t1 = (Rating(25.000, 0.500), Rating(25.000, 0.500), Rating(25.000, 0.500),
Rating(25.000, 0.500), Rating(33.333, 0.500), Rating(33.333, 0.500),
Rating(33.333, 0.500), Rating(33.333, 0.500), Rating(41.667, 0.500),
Rating(41.667, 0.500), Rating(41.667, 0.500), Rating(41.667, 0.500))
t2 = (Rating(42.234, 3.728), Rating(43.291, 3.842))
rate([t1, t2], [0, 28])
@various_backends(['scipy'])
def test_issue4():
"""The `issue #4`_, opened by @sublee.
numpy.float64 handles floating-point error by different way. For example,
it can just warn RuntimeWarning on n/0 problem instead of throwing
ZeroDivisionError.
.. _issue #4: https://github.com/sublee/trueskill/issues/4
"""
import numpy
r1, r2 = Rating(105.247, 0.439), Rating(27.030, 0.901)
# make numpy to raise FloatingPointError instead of warning
# RuntimeWarning
old_settings = numpy.seterr(divide='raise')
try:
rate([(r1,), (r2,)])
finally:
numpy.seterr(**old_settings)
@various_backends([None, 'scipy'])
def test_issue5(backend):
"""The `issue #5`_, opened by @warner121.
This error occurs when a winner has too low rating than a loser. Basically
Python cannot calculate correct result but mpmath_ can. I added ``backend``
option to :class:`TrueSkill` class. If it is set to 'mpmath' then the
problem will have gone.
The result of TrueSkill calculator by Microsoft is N(-273.092, 2.683) and
N(-75.830, 2.080), of C# Skills by Moserware is N(NaN, 2.6826) and
N(NaN, 2.0798). I choose Microsoft's result as an expectation for the test
suite.
.. _issue #5: https://github.com/sublee/trueskill/issues/5
.. _mpmath: http://mpmath.googlecode.com/
"""
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190))
assert _quality_1vs1(Rating(), Rating(1000)) == 0
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000))
@various_backends(['mpmath'])
def test_issue5_with_mpmath():
_rate_1vs1 = almost.wrap(rate_1vs1, 0)
assert _quality_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == 0
assert _rate_1vs1(Rating(-323.263, 2.965), Rating(-48.441, 2.190)) == \
[(-273.361, 2.683), (-75.683, 2.080)]
assert _quality_1vs1(Rating(), Rating(1000)) == 0
assert _rate_1vs1(Rating(), Rating(1000)) == \
[(415.298, 6.455), (609.702, 6.455)]
@various_backends(['mpmath'])
def test_issue5_with_more_extreme():
"""If the input is more extreme, 'mpmath' backend also made an exception.
But we can avoid the problem with higher precision.
"""
import mpmath
try:
dps = mpmath.mp.dps
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000))
mpmath.mp.dps = 50
assert almost(rate_1vs1(Rating(), Rating(1000000)), prec=-1) == \
[(400016.896, 6.455), (600008.104, 6.455)]
with raises(FloatingPointError):
rate_1vs1(Rating(), Rating(1000000000000))
mpmath.mp.dps = 100
assert almost(rate_1vs1(Rating(), Rating(1000000000000)), prec=-7) == \
[(400001600117.693, 6.455), (599998399907.307, 6.455)]
finally:
mpmath.mp.dps = dps
def test_issue9_weights_dict_with_object_keys():
"""The `issue #9`_, opened by @.
.. _issue #9: https://github.com/sublee/trueskill/issues/9
"""
class Player(object):
def __init__(self, rating, team):
self.rating = rating
self.team = team
p1 = Player(Rating(), 0)
p2 = Player(Rating(), 0)
p3 = Player(Rating(), 1)
teams = [{p1: p1.rating, p2: p2.rating}, {p3: p3.rating}]
rated = rate(teams, weights={(0, p1): 1, (0, p2): 0.5, (1, p3): 1})
assert rated[0][p1].mu > rated[0][p2].mu
assert rated[0][p1].sigma < rated[0][p2].sigma
assert rated[0][p1].sigma == rated[1][p3].sigma
|
the-stack_0_2450 | import itertools
import json
import os
import random
import numpy as np
from gym import spaces
from jsonmerge import Merger
from utils.constants import *
class PommermanJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, Item):
return obj.value
elif isinstance(obj, Action):
return obj.value
elif isinstance(obj, np.int64):
return int(obj)
elif hasattr(obj, 'to_json'):
return obj.to_json()
elif isinstance(obj, spaces.Discrete):
return obj.n
elif isinstance(obj, spaces.Tuple):
return [space.n for space in obj.spaces]
return json.JSONEncoder.default(self, obj)
def make_board(size, num_rigid=0, num_wood=0):
"""Make the random but symmetric board.
The numbers refer to the Item enum in constants. This is:
0 - passage
1 - rigid wall
2 - wood wall
3 - bomb
4 - flames
5 - fog
6 - extra bomb item
7 - extra firepower item
8 - kick
9 - skull
10 - 13: agents
Args:
size: The dimension of the board, i.e. it's sizeXsize.
num_rigid: The number of rigid walls on the board. This should be even.
num_wood: Similar to above but for wood walls.
Returns:
board: The resulting random board.
"""
def lay_wall(value, num_left, coordinates, board):
x, y = random.sample(coordinates, 1)[0]
coordinates.remove((x, y))
coordinates.remove((y, x))
board[x, y] = value
board[y, x] = value
num_left -= 2
return num_left
def make(size, num_rigid, num_wood):
# Initialize everything as a passage.
board = np.ones(
(size, size)).astype(np.uint8) * constants.Item.Passage.value
# Gather all the possible coordinates to use for walls.
coordinates = set([
(x, y) for x, y in \
itertools.product(range(size), range(size)) \
if x != y])
# Set the players down. Exclude them from coordinates.
# Agent0 is in top left. Agent1 is in bottom left.
# Agent2 is in bottom right. Agent 3 is in top right.
board[1, 1] = constants.Item.Agent0.value
board[size - 2, 1] = constants.Item.Agent1.value
board[size - 2, size - 2] = constants.Item.Agent2.value
board[1, size - 2] = constants.Item.Agent3.value
agents = [(1, 1), (size - 2, 1), (1, size - 2), (size - 2, size - 2)]
for position in agents:
if position in coordinates:
coordinates.remove(position)
# Exclude breathing room on either side of the agents.
for i in range(2, 4):
coordinates.remove((1, i))
coordinates.remove((i, 1))
coordinates.remove((1, size - i - 1))
coordinates.remove((size - i - 1, 1))
coordinates.remove((size - 2, size - i - 1))
coordinates.remove((size - i - 1, size - 2))
coordinates.remove((i, size - 2))
coordinates.remove((size - 2, i))
# Lay down wooden walls providing guaranteed passage to other agents.
wood = constants.Item.Wood.value
for i in range(4, size - 4):
board[1, i] = wood
board[size - i - 1, 1] = wood
board[size - 2, size - i - 1] = wood
board[size - i - 1, size - 2] = wood
coordinates.remove((1, i))
coordinates.remove((size - i - 1, 1))
coordinates.remove((size - 2, size - i - 1))
coordinates.remove((size - i - 1, size - 2))
num_wood -= 4
# Lay down the rigid walls.
while num_rigid > 0:
num_rigid = lay_wall(constants.Item.Rigid.value, num_rigid,
coordinates, board)
# Lay down the wooden walls.
while num_wood > 0:
num_wood = lay_wall(constants.Item.Wood.value, num_wood,
coordinates, board)
return board, agents
assert (num_rigid % 2 == 0)
assert (num_wood % 2 == 0)
board, agents = make(size, num_rigid, num_wood)
# Make sure it's possible to reach most of the passages.
while len(inaccessible_passages(board, agents)) > 4:
board, agents = make(size, num_rigid, num_wood)
return board
def make_items(board, num_items):
item_positions = {}
while num_items > 0:
row = random.randint(0, len(board) - 1)
col = random.randint(0, len(board[0]) - 1)
if board[row, col] != constants.Item.Wood.value:
continue
if (row, col) in item_positions:
continue
item_positions[(row, col)] = random.choice([
constants.Item.ExtraBomb, constants.Item.IncrRange,
constants.Item.Kick
]).value
num_items -= 1
return item_positions
def inaccessible_passages(board, agent_positions):
"""Return inaccessible passages on this board."""
seen = set()
agent_position = agent_positions.pop()
passage_positions = np.where(board == constants.Item.Passage.value)
positions = list(zip(passage_positions[0], passage_positions[1]))
Q = [agent_position]
while Q:
row, col = Q.pop()
for (i, j) in [(1, 0), (-1, 0), (0, 1), (0, -1)]:
next_position = (row + i, col + j)
if next_position in seen:
continue
if not position_on_board(board, next_position):
continue
if position_is_rigid(board, next_position):
continue
if next_position in positions:
positions.pop(positions.index(next_position))
if not len(positions):
return []
seen.add(next_position)
Q.append(next_position)
return positions
def is_valid_direction(board, position, direction, invalid_values=None):
row, col = position
if invalid_values is None:
invalid_values = [item.value for item in \
[constants.Item.Rigid, constants.Item.Wood]]
if constants.Action(direction) == constants.Action.Stop:
return True
if constants.Action(direction) == constants.Action.Up:
return row - 1 >= 0 and board[row - 1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Down:
return row + 1 < len(board) and board[row +
1][col] not in invalid_values
if constants.Action(direction) == constants.Action.Left:
return col - 1 >= 0 and board[row][col - 1] not in invalid_values
if constants.Action(direction) == constants.Action.Right:
return col + 1 < len(board[0]) and \
board[row][col + 1] not in invalid_values
raise constants.InvalidAction("We did not receive a valid direction: ",
direction)
def _position_is_item(board, position, item):
return board[position] == item.value
def position_is_flames(board, position):
return _position_is_item(board, position, constants.Item.Flames)
def position_is_bomb(bombs, position):
"""Check if a given position is a bomb.
We don't check the board because that is an unreliable source. An agent
may be obscuring the bomb on the board.
"""
for bomb in bombs:
if position == bomb.position:
return True
return False
def position_is_powerup(board, position):
powerups = [
constants.Item.ExtraBomb, constants.Item.IncrRange, constants.Item.Kick
]
item_values = [item.value for item in powerups]
return board[position] in item_values
def position_is_wall(board, position):
return position_is_rigid(board, position) or \
position_is_wood(board, position)
def position_is_passage(board, position):
return _position_is_item(board, position, constants.Item.Passage)
def position_is_rigid(board, position):
return _position_is_item(board, position, constants.Item.Rigid)
def position_is_wood(board, position):
return _position_is_item(board, position, constants.Item.Wood)
def position_is_agent(board, position):
return board[position] in [
constants.Item.Agent0.value, constants.Item.Agent1.value,
constants.Item.Agent2.value, constants.Item.Agent3.value
]
def position_is_enemy(board, position, enemies):
return constants.Item(board[position]) in enemies
# TODO: Fix this so that it includes the teammate.
def position_is_passable(board, position, enemies):
return all([
any([
position_is_agent(board, position),
position_is_powerup(board, position),
position_is_passage(board, position)
]), not position_is_enemy(board, position, enemies)
])
def position_is_fog(board, position):
return _position_is_item(board, position, constants.Item.Fog)
def agent_value(id_):
return getattr(constants.Item, 'Agent%d' % id_).value
def position_in_items(board, position, items):
return any([_position_is_item(board, position, item) for item in items])
def position_on_board(board, position):
x, y = position
return all([len(board) > x, len(board[0]) > y, x >= 0, y >= 0])
def get_direction(position, next_position):
"""Get the direction such that position --> next_position.
We assume that they are adjacent.
"""
x, y = position
nx, ny = next_position
if x == nx:
if y < ny:
return constants.Action.Right
else:
return constants.Action.Left
elif y == ny:
if x < nx:
return constants.Action.Down
else:
return constants.Action.Up
raise constants.InvalidAction(
"We did not receive a valid position transition.")
def get_next_position(position, direction):
x, y = position
if direction == constants.Action.Right:
return (x, y + 1)
elif direction == constants.Action.Left:
return (x, y - 1)
elif direction == constants.Action.Down:
return (x + 1, y)
elif direction == constants.Action.Up:
return (x - 1, y)
elif direction == constants.Action.Stop:
return (x, y)
raise constants.InvalidAction("We did not receive a valid direction.")
def make_np_float(feature):
return np.array(feature).astype(np.float32)
def join_json_state(record_json_dir, agents, finished_at, config):
jsonSchema = {
"properties": {
"state": {
"mergeStrategy": "append"
}
}
}
jsonTemplate = {
"agents": agents,
"finished_at": finished_at,
"config": config,
"state": []
}
merger = Merger(jsonSchema)
base = merger.merge({}, jsonTemplate)
for root, dirs, files in os.walk(record_json_dir):
for name in files:
path = os.path.join(record_json_dir, name)
if name.endswith('.json') and "game_state" not in name:
with open(path) as data_file:
data = json.load(data_file)
head = {"state": [data]}
base = merger.merge(base, head)
with open(os.path.join(record_json_dir, 'game_state.json'), 'w') as f:
f.write(json.dumps(base, sort_keys=True, indent=4))
for root, dirs, files in os.walk(record_json_dir):
for name in files:
if "game_state" not in name:
os.remove(os.path.join(record_json_dir, name))
|
the-stack_0_2452 | # Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import glob
import math
import textwrap
from apps import apps
class HplCpuAppConf(apps.AppConf):
@staticmethod
def name():
return 'hpl_cpu'
def __init__(self, num_nodes, mach, perc_dram_per_node=0.9, cores_per_node=None):
'''
num_nodes: Number of MPI ranks (1 node per rank) -- 2, 4, 8 or 16.
perc_dram_per_node: Ratio of the total node DRAM that should be used for the
HPL matrix (assuming DP).
80-90% is a good amount to maximize efficiency.
Default is 0.9.
cores_per_node: Number of Xeon cores that each MPI process can offload to via OMP.
Total number of physical cores will be selected if this is not set
(defauilt=None).
'''
dram_for_app = num_nodes * mach.total_node_memory_bytes() * perc_dram_per_node
if cores_per_node is None:
cores_per_node = mach.num_core()
benchmark_dir = os.path.dirname(os.path.abspath(__file__))
self.exe_path = os.path.join(benchmark_dir, 'hpl-2.3/bin/Linux_Intel64/xhpl')
self.NBs=384 # This is the recommended size for Intel Scalable Xeon family.
process_grid_ratios = {
1: {'P': 1, 'Q': 1},
2: {'P': 1, 'Q': 2},
4: {'P': 2, 'Q': 2},
8: {'P': 2, 'Q': 4},
16: {'P': 4, 'Q': 4}
}
if num_nodes not in process_grid_ratios:
raise RuntimeError("Number of nodes {num_nodes} is not defined for HPL.".format(num_nodes=num_nodes))
self.P = process_grid_ratios[num_nodes]['P']
self.Q = process_grid_ratios[num_nodes]['Q']
self.N = int(round(math.sqrt(dram_for_app / 8)))
self._cpu_per_rank = cores_per_node
sys.stdout.write('DRAM reserved for APP: {dram_for_app:0.2f}GB\n'.format(dram_for_app=dram_for_app/2**30))
sys.stdout.write('Cores for app: {cores_per_node}\n'.format(cores_per_node=cores_per_node))
sys.stdout.write('N={N}\n'.format(N=self.N))
def get_bash_setup_commands(self):
input_file = textwrap.dedent('''
HPLinpack benchmark input file
Innovative Computing Laboratory, University of Tennessee
HPL.out output file name (if any)
6 device out (6=stdout,7=stderr,file)
1 # of problems sizes (N)
{N} Ns
1 # of NBs
{NBs} NBs
0 PMAP process mapping (0=Row-,1=Column-major)
1 # of process grids (P x Q)
{P} Ps
{Q} Qs
16.0 threshold
1 # of panel fact
1 PFACTs (0=left, 1=Crout, 2=Right)1
1 # of recursive stopping criterium
4 NBMINs (>= 1)
1 # of panels in recursion
2 NDIVs
1 # of recursive panel fact.
1 RFACTs (0=left, 1=Crout, 2=Right)
1 # of broadcast
0 BCASTs (0=1rg,1=1rM,2=2rg,3=2rM,4=Lng,5=LnM)
1 # of lookahead depth
0 DEPTHs (>=0)
2 SWAP (0=bin-exch,1=long,2=mix)
64 swapping threshold
0 L1 in (0=transposed,1=no-transposed) form
0 U in (0=transposed,1=no-transposed) form
1 Equilibration (0=no,1=yes)
8 memory alignment in double (> 0)
EOF
'''.format(N=self.N, NBs=self.NBs, P=self.P, Q=self.Q))
setup_commands = 'export MKL_NUM_THREADS={cpu_per_rank}\n'.format(cpu_per_rank=self._cpu_per_rank)
setup_commands += 'cat > ./HPL.dat << EOF {input_file}\n'.format(input_file=input_file)
return setup_commands
def get_rank_per_node(self):
return 1
def get_cpu_per_rank(self):
return self._cpu_per_rank
def get_bash_exec_path(self):
return self.exe_path
def get_bash_exec_args(self):
return ''
def get_custom_geopm_args(self):
# See README.md for an explanation of why
# HPL cannot start in process control mode.
# Also hyperthreading does not benefit HPL and
# it is turned off.
return ['--geopm-ctl=application',
'--geopm-hyperthreads-disable']
def parse_fom(self, log_path):
result = None
key = 'WR00'
with open(log_path) as fid:
for line in fid.readlines():
if key in line:
result = float(line.split(' ')[-1])
break
return result
|
the-stack_0_2453 | __description__ = \
"""
Class for generating simulated epistasis maps with options for various
distributions of values.
"""
__author__ = "Zach Sailer"
from functools import wraps
from epistasis.mapping import EpistasisMap
from numpy import random
class DistributionSimulation(EpistasisMap):
"""
Just like an epistasis map, but with extra methods for setting epistatic
coefficients
"""
def __init__(self, gpm, df=None, sites=None, values=None, uncertainties=None):
super().__init__(df=df, sites=sites, values=values, uncertainties=uncertainties)
self._gpm = gpm
@property
def avail_distributions(self):
return random.__all__
def set_order_from_distribution(self, orders, dist="normal", **kwargs):
"""
Sets epistatic coefficients to values drawn from a statistical
distribution.
Distributions are found in SciPy's `random` module. Kwargs are passed
directly to these methods
"""
# Get distribution
try:
method = getattr(random, dist)
except AttributeError:
err = "Distribution now found. Check the `avail_distribution` \n"
err += "attribute for available distributions.\n"
raise ValueError(err)
idx = self.data.orders.isin(orders)
self.data.loc[idx, "values"] = method(
size=sum(idx),
**kwargs
)
self._gpm.build()
@wraps(EpistasisMap.set_values)
def set_values(self, values, filter=None):
super().set_values(values, filter=filter)
self._gpm.build()
|
the-stack_0_2454 | """A transformer for gen3 project,reads genetrails_variants bcc, writes to DEFAULT_OUTPUT_DIR."""
import hashlib
import os
import json
from gen3_etl.utils.ioutils import reader
from defaults import DEFAULT_OUTPUT_DIR, DEFAULT_EXPERIMENT_CODE, DEFAULT_PROJECT_ID, default_parser, emitter, obscure_dates
from gen3_etl.utils.schema import generate, template
LOOKUP_PATHS = """
source/bcc/genetrails_classification.json
source/bcc/genetrails_copy_number_result_type.json
source/bcc/genetrails_protein_variant_type.json
source/bcc/genetrails_result_significance.json
source/bcc/genetrails_result_type.json
source/bcc/genetrails_run_status.json
source/bcc/genetrails_transcript_priority.json
source/bcc/genetrails_variant_type.json
source/bcc/chromosome.json
source/bcc/assay_categories.json
source/bcc/assay_version.json
source/bcc/gene.json
source/bcc/genome_build.json
""".strip().split()
def transform(item_paths, output_dir, experiment_code, compresslevel=0, callback=None):
"""Read bcc labkey json and writes gen3 json."""
genetrails_emitter = emitter('wes_result', output_dir=output_dir)
with open('output/reference/gene_lookup.tsv') as f:
gene_lookup = {k: v for k,v in (line.split() for line in f) }
for p in item_paths:
source = os.path.splitext(os.path.basename(p))[0]
for line in reader(p):
line['source'] = source
if callback:
line = callback(line)
submitter_id = line.get('participantid', line.get('ParticipantID', None))
aliquot_id = '{}-sample-aliquot'.format(submitter_id)
genetrails_variant = {
'type': 'wes_result',
'project_id': DEFAULT_PROJECT_ID,
'aliquot': {'submitter_id': aliquot_id},
'submitter_id': line['lsid']}
if 'gene_symbol' in line and line['gene_symbol'].lower() in gene_lookup:
line['gene'] = {'submitter_id': gene_lookup[line['gene_symbol'].lower()], 'project_id': 'smmart-reference'}
genetrails_variant.update(line)
genetrails_emitter.write(genetrails_variant)
genetrails_emitter.close()
def lookups():
look_ups = {}
for p in LOOKUP_PATHS:
c = p.replace('source/bcc/','').replace('genetrails_','').replace('.json','')
look_ups[c] = {}
print(p, c)
for line in reader(p):
name = line['display_name']
val = [line[k] for k in line if not k.startswith('_') and k.endswith('_id')][0]
look_ups[c][val] = name
return look_ups
LOOKUPS = lookups()
def my_callback(line):
"""Remove fields that start with _, fix key names with embedded /, fix id lookups """
for k in [k for k in line if k.startswith('_')]:
del line[k]
for k in [k for k in line if '/' in k]:
line[k.split('/')[1]] = line[k]
del line[k]
for k in [k for k in line if k.endswith('_id')]:
if k in ['project_id', 'submitter_id']:
continue
lup = k.replace('_id', '')
if line[k]:
try:
line[lup] = LOOKUPS[lup][line[k]]
except Exception as e:
print(lup, k, line[k])
print('******')
print(LOOKUPS[lup])
print('******')
raise e
del line[k]
if 'chromosome' in line:
line['chromosome'] = str(line['chromosome'].replace('chr',''))
if 'gene' in line:
line['gene_symbol'] = line['gene']
del line['gene']
return line
def my_schema_callback(schema):
"""Remove fields that start with _, fix key names with embedded /, fix id lookups """
for k in [k for k in schema['properties'] if k.startswith('_')]:
del schema['properties'][k]
for k in [k for k in schema['properties'] if '/' in k]:
schema['properties'][k.split('/')[1]] = schema['properties'][k]
del schema['properties'][k]
for k in [k for k in schema['properties'] if k.endswith('_id')]:
if k in ['project_id', 'submitter_id']:
continue
schema['properties'][k.replace('_id', '')] = {'type': ['string', "'null'"]} # schema['properties'][k]
del schema['properties'][k]
# adds the source property
schema['category'] = 'bcc extention'
schema['properties']['aliquot'] = {'$ref': '_definitions.yaml#/to_one'}
return schema
if __name__ == "__main__":
item_paths = ['source/bcc/WESResults.json']
args = default_parser(DEFAULT_OUTPUT_DIR, DEFAULT_EXPERIMENT_CODE, DEFAULT_PROJECT_ID).parse_args()
transform(item_paths, output_dir=args.output_dir, experiment_code=args.experiment_code, callback=my_callback)
item_paths = ['output/bcc/wes_result.json']
link = {'name':'aliquot', 'backref':'wes_result', 'label':'derived_from', 'target_type':'aliquot', 'multiplicity': 'many_to_one', 'required': False }
schema_path = generate(item_paths,'wes_result', output_dir='output/bcc', links=[link], callback=my_schema_callback)
assert os.path.isfile(schema_path), 'should have an schema file {}'.format(schema_path)
print(schema_path)
|
the-stack_0_2458 | import numba
import numpy as np
from scipy.sparse import csr_matrix
from .base import BasePointer, GraphBlasContainer
from .context import handle_panic, return_error
from .exceptions import GrB_Info
class MatrixPtr(BasePointer):
def set_matrix(self, matrix):
self.instance = matrix
class Matrix(GraphBlasContainer):
def __init__(self, matrix):
assert isinstance(matrix, csr_matrix)
self.matrix = matrix
@classmethod
def new_from_dtype(cls, dtype, nrows, ncols):
matrix = csr_matrix((nrows, ncols), dtype=dtype)
return cls(matrix)
@classmethod
def new_from_existing(cls, other):
matrix = csr_matrix(other)
return cls(matrix)
@classmethod
def get_pointer(cls):
return MatrixPtr()
@handle_panic
def Matrix_new(A: MatrixPtr, dtype: type, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
matrix = Matrix.new_from_dtype(dtype, nrows, ncols)
A.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_dup(C: MatrixPtr, A: Matrix):
matrix = Matrix.new_from_existing(A)
C.set_matrix(matrix)
return GrB_Info.GrB_SUCCESS
@handle_panic
def Matrix_resize(C: Matrix, nrows: int, ncols: int):
if nrows <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "nrows must be > 0")
if ncols <= 0:
return_error(GrB_Info.GrB_INVALID_VALUE, "ncols must be > 0")
C.matrix.resize((nrows, ncols))
return GrB_Info.GrB_SUCCESS
# TODO: this is just the essential code; it needs to handle descriptors, masks, accumulators, etc
@handle_panic
def mxm(C, A, B, semiring):
cr, cc = C.shape
ar, ac = A.shape
br, bc = B.shape
if cr != ar:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.nrows != A.nrows")
if cc != bc:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "C.ncols != B.ncols")
if ac != br:
return_error(GrB_Info.GrB_DIMENSION_MISMATCH, "A.nrows != B.ncols")
b = B.tocsc()
d, i, ip = _sparse_matmul(
A.data,
A.indices,
A.indptr,
b.data,
b.indices,
b.indptr,
semiring.plus.op,
semiring.times,
semiring.plus.identity,
C.dtype,
)
C.data = d
C.indices = i
C.indptr = ip
return GrB_Info.GrB_SUCCESS
@numba.njit
def _sparse_matmul(
a_data,
a_indices,
a_indptr,
b_data,
b_indices,
b_indptr,
plus,
times,
identity,
dtype,
):
# Final array size is unknown, so we give ourselves room and then adjust on the fly
tmp_output_size = a_data.size * 2
data = np.empty((tmp_output_size,), dtype=dtype)
indices = np.empty((tmp_output_size,), dtype=a_indices.dtype)
indptr = np.empty((a_indptr.size,), dtype=a_indptr.dtype)
output_counter = 0
for iptr in range(a_indptr.size - 1):
indptr[iptr] = output_counter
for jptr in range(b_indptr.size - 1):
a_counter = a_indptr[iptr]
a_stop = a_indptr[iptr + 1]
b_counter = b_indptr[jptr]
b_stop = b_indptr[jptr + 1]
val = identity
nonempty = False
while a_counter < a_stop and b_counter < b_stop:
a_k = a_indices[a_counter]
b_k = b_indices[b_counter]
if a_k == b_k:
val = plus(val, times(a_data[a_counter], b_data[b_counter]))
nonempty = True
a_counter += 1
b_counter += 1
elif a_k < b_k:
a_counter += 1
else:
b_counter += 1
if nonempty:
if output_counter >= tmp_output_size:
# We filled up the allocated space; copy existing data to a larger array
tmp_output_size *= 2
new_data = np.empty((tmp_output_size,), dtype=data.dtype)
new_indices = np.empty((tmp_output_size,), dtype=indices.dtype)
new_data[:output_counter] = data[:output_counter]
new_indices[:output_counter] = indices[:output_counter]
data = new_data
indices = new_indices
data[output_counter] = val
indices[output_counter] = jptr
output_counter += 1
# Add final entry to indptr (should indicate nnz in the output)
nnz = output_counter
indptr[iptr + 1] = nnz
# Trim output arrays
data = data[:nnz]
indices = indices[:nnz]
return (data, indices, indptr)
|
the-stack_0_2459 | import json
data_files = {
'colors.json',
'default_key_mappings.txt',
'unicode_names.json'
}
with open('dist-js/skulpt-designer-files.js', 'w') as output_file:
for filename in data_files:
with open(f'designer/data/{filename}') as data_file:
data = data_file.read()
line = f"Sk.builtinFiles['files']['src/lib/designer/data/{filename}']={json.dumps(data)};\n"
output_file.write(line) |
the-stack_0_2460 | import logging
import os
import re
from collections import namedtuple
from tigertag import Pluggable
from tigertag.util import str2bool
logger = logging.getLogger(__name__)
FileInfo = namedtuple('FileInfo', 'name path hash temp ext_id')
class Scanner(Pluggable):
RESERVED_PROPS = ['NAME', 'ENABLED']
def __init__(self, name, enabled):
self.name = name
self.enabled = enabled
self.props = {}
self.listeners = [] # ScannerListeners
def scan(self):
raise NotImplementedError('The {} scanner has not implemented the scan method.'.format(self.name))
class ScannerListener:
def on_file(self, scanner: Scanner, file_info: FileInfo):
pass
class ScannerManager:
def __init__(self):
self.scanners = {}
self.listeners = [] # ScannerListener array
def add(self, scanner):
self.scanners[scanner.name] = scanner
def scan(self):
if len(self.scanners) == 0:
logger.warning('No scanners configured. Please check your configuration')
for scanner_name, scanner in self.scanners.items():
if scanner.enabled:
scanner.listeners = []
for scanner_listener in self.listeners:
scanner.listeners.append(scanner_listener)
scanner.scan()
class ScannerManagerBuilder:
def __init__(self, scanner_manager_klass):
self.scanner_manager_klass = scanner_manager_klass
def build(self):
raise NotImplementedError
class EnvironmentScannerManagerBuilder(ScannerManagerBuilder):
def __init__(self, scanner_manager_klass):
super().__init__(scanner_manager_klass)
def get_class(self, klass_name):
parts = klass_name.split('.')
module = ".".join(parts[:-1])
m = __import__(module)
for comp in parts[1:]:
m = getattr(m, comp)
return m
def build(self):
sm = self.scanner_manager_klass()
scanner_detect = re.compile('^SCANNER_(?P<scanner>[A-Z0-9]*)_NAME')
# Find and create the scanners
for env_name, env_value in os.environ.items():
match = scanner_detect.match(env_name)
if match is not None:
logger.debug('Configuring scanner {}:{}'.format(env_name, env_value))
scanner_env_name = match.group('scanner')
scanner_klass_name = env_value
enabled = False
if os.environ['SCANNER_{}_ENABLED'.format(scanner_env_name)] is not None:
enabled = str2bool(os.environ['SCANNER_{}_ENABLED'.format(scanner_env_name)])
scanner_klass = self.get_class(scanner_klass_name)
scanner = scanner_klass(scanner_env_name, enabled)
# Collect all the scanner properties
prop_detect = re.compile('^SCANNER_{}_(?P<prop>[A-Z0-9_]*)'.format(scanner_env_name))
for env_prop_name, env_prop_value in os.environ.items():
prop_match = prop_detect.match(env_prop_name)
if prop_match is not None:
prop_name = prop_match.group('prop')
if prop_name not in [Scanner.RESERVED_PROPS]:
scanner.props[prop_name] = env_prop_value
sm.add(scanner)
return sm
|
the-stack_0_2463 | from . import ClientConstants as CC
from . import ClientDefaults
from . import ClientNetworkingContexts
from . import ClientNetworkingDomain
from . import ClientNetworkingJobs
from . import ClientParsing
from . import ClientThreading
from . import HydrusConstants as HC
from . import HydrusGlobals as HG
from . import HydrusData
from . import HydrusExceptions
from . import HydrusSerialisable
import itertools
import os
import json
import requests
import re
import threading
import time
import urllib.parse
VALIDITY_VALID = 0
VALIDITY_UNTESTED = 1
VALIDITY_INVALID = 2
validity_str_lookup = {}
validity_str_lookup[ VALIDITY_VALID ] = 'valid'
validity_str_lookup[ VALIDITY_UNTESTED ] = 'untested'
validity_str_lookup[ VALIDITY_INVALID ] = 'invalid'
LOGIN_ACCESS_TYPE_EVERYTHING = 0
LOGIN_ACCESS_TYPE_NSFW = 1
LOGIN_ACCESS_TYPE_SPECIAL = 2
LOGIN_ACCESS_TYPE_USER_PREFS_ONLY = 3
login_access_type_str_lookup = {}
login_access_type_str_lookup[ LOGIN_ACCESS_TYPE_EVERYTHING ] = 'Everything'
login_access_type_str_lookup[ LOGIN_ACCESS_TYPE_NSFW ] = 'NSFW'
login_access_type_str_lookup[ LOGIN_ACCESS_TYPE_SPECIAL ] = 'Special'
login_access_type_str_lookup[ LOGIN_ACCESS_TYPE_USER_PREFS_ONLY ] = 'User prefs'
login_access_type_default_description_lookup = {}
login_access_type_default_description_lookup[ LOGIN_ACCESS_TYPE_EVERYTHING ] = 'Login required to access any content.'
login_access_type_default_description_lookup[ LOGIN_ACCESS_TYPE_NSFW ] = 'Login required to access NSFW content.'
login_access_type_default_description_lookup[ LOGIN_ACCESS_TYPE_SPECIAL ] = 'Login required to access special content.'
login_access_type_default_description_lookup[ LOGIN_ACCESS_TYPE_USER_PREFS_ONLY ] = 'Login only required to access user preferences.'
PIXIV_NETWORK_CONTEXT = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, 'pixiv.net' )
HENTAI_FOUNDRY_NETWORK_CONTEXT = ClientNetworkingContexts.NetworkContext( CC.NETWORK_CONTEXT_DOMAIN, 'hentai-foundry.com' )
class NetworkLoginManager( HydrusSerialisable.SerialisableBase ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_LOGIN_MANAGER
SERIALISABLE_NAME = 'Login Manager'
SERIALISABLE_VERSION = 1
SESSION_TIMEOUT = 60 * 45
def __init__( self ):
HydrusSerialisable.SerialisableBase.__init__( self )
# needs _dirty and setdirty and be on that serialisation check and so on
self.engine = None
self._dirty = False
self._lock = threading.Lock()
self._login_scripts = HydrusSerialisable.SerialisableList()
self._domains_to_login_info = {}
self._login_script_keys_to_login_scripts = {}
self._login_script_names_to_login_scripts = {}
self._hydrus_login_script = LoginScriptHydrus()
self._error_names = set()
def _GetBestLoginScript( self, login_domain ):
self._login_scripts.sort( key = lambda ls: len( ls.GetCredentialDefinitions() ) )
for login_script in self._login_scripts:
if login_domain in login_script.GetExampleDomains():
return login_script
return None
def _GetLoginDomainStatus( self, network_context ):
login_domain = None
login_expected = False
login_possible = True
login_error_text = ''
domain = network_context.context_data
potential_login_domains = ClientNetworkingDomain.ConvertDomainIntoAllApplicableDomains( domain, discard_www = False )
for potential_login_domain in potential_login_domains:
if potential_login_domain in self._domains_to_login_info:
login_domain = potential_login_domain
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
if active or login_access_type == LOGIN_ACCESS_TYPE_EVERYTHING:
login_expected = True
if not active:
login_possible = False
login_error_text = 'Not active - ' + login_access_text
elif validity == VALIDITY_INVALID:
login_possible = False
login_error_text = validity_error_text
elif not HydrusData.TimeHasPassed( no_work_until ):
login_possible = False
login_error_text = no_work_until_reason
break
return ( login_domain, login_expected, login_possible, login_error_text )
def _GetLoginScriptAndCredentials( self, login_domain ):
if login_domain in self._domains_to_login_info:
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
( login_script_key, login_script_name ) = login_script_key_and_name
if login_script_key in self._login_script_keys_to_login_scripts:
login_script = self._login_script_keys_to_login_scripts[ login_script_key ]
elif login_script_name in self._login_script_names_to_login_scripts:
login_script = self._login_script_names_to_login_scripts[ login_script_name ]
login_script_key_and_name = login_script.GetLoginScriptKeyAndName()
self._SetDirty()
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
else:
validity = VALIDITY_INVALID
validity_error_text = 'Could not find the login script for "' + login_domain + '"!'
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
raise HydrusExceptions.ValidationException( validity_error_text )
try:
login_script.CheckCanLogin( credentials )
except HydrusExceptions.ValidationException as e:
validity = VALIDITY_INVALID
validity_error_text = str( e )
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
raise
if validity == VALIDITY_UNTESTED and validity_error_text != '':
# cleaning up the 'restart dialog to test validity in cases where it is valid
validity_error_text = ''
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
return ( login_script, credentials )
else:
raise HydrusExceptions.ValidationException( 'Could not find any login entry for "' + login_domain + '"!' )
def _GetSerialisableInfo( self ):
serialisable_login_scripts = self._login_scripts.GetSerialisableTuple()
serialisable_domains_to_login_info = {}
for ( login_domain, ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) ) in list(self._domains_to_login_info.items()):
( login_script_key, login_script_name ) = login_script_key_and_name
serialisable_login_script_key_and_name = ( login_script_key.hex(), login_script_name )
serialisable_domains_to_login_info[ login_domain ] = ( serialisable_login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
return ( serialisable_login_scripts, serialisable_domains_to_login_info )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_login_scripts, serialisable_domains_to_login_info ) = serialisable_info
self._login_scripts = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_login_scripts )
self._domains_to_login_info = {}
for ( login_domain, ( serialisable_login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) ) in list(serialisable_domains_to_login_info.items()):
( serialisable_login_script_key, login_script_name ) = serialisable_login_script_key_and_name
login_script_key_and_name = ( bytes.fromhex( serialisable_login_script_key ), login_script_name )
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
def _RecalcCache( self ):
self._login_script_keys_to_login_scripts = { login_script.GetLoginScriptKey() : login_script for login_script in self._login_scripts }
self._login_script_names_to_login_scripts = { login_script.GetName() : login_script for login_script in self._login_scripts }
self._RevalidateCache()
def _RevalidateCache( self ):
for login_domain in list(self._domains_to_login_info.keys()):
try:
self._GetLoginScriptAndCredentials( login_domain )
except HydrusExceptions.ValidationException:
pass
def _SetDirty( self ):
self._dirty = True
def AlreadyHaveExactlyThisLoginScript( self, new_login_script ):
with self._lock:
# absent irrelevant variables, do we have the exact same object already in?
login_script_key_and_name = new_login_script.GetLoginScriptKeyAndName()
dupe_login_scripts = [ login_script.Duplicate() for login_script in self._login_scripts ]
for dupe_login_script in dupe_login_scripts:
dupe_login_script.SetLoginScriptKeyAndName( login_script_key_and_name )
if dupe_login_script.DumpToString() == new_login_script.DumpToString():
return True
return False
def AutoAddLoginScripts( self, login_scripts ):
with self._lock:
next_login_scripts = list( self._login_scripts )
for login_script in login_scripts:
login_script.RegenerateLoginScriptKey()
next_login_scripts.extend( login_scripts )
self.SetLoginScripts( next_login_scripts )
def CheckCanLogin( self, network_context ):
with self._lock:
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN:
( login_domain, login_expected, login_possible, login_error_text ) = self._GetLoginDomainStatus( network_context )
if login_domain is None or not login_expected:
raise HydrusExceptions.ValidationException( 'The domain ' + login_domain + ' has no active login script--has it just been turned off?' )
elif not login_possible:
raise HydrusExceptions.ValidationException( 'The domain ' + login_domain + ' cannot log in: ' + login_error_text )
elif network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
service_key = network_context.context_data
services_manager = self.engine.controller.services_manager
if not services_manager.ServiceExists( service_key ):
raise HydrusExceptions.ValidationException( 'Service does not exist!' )
service = services_manager.GetService( service_key )
try:
service.CheckFunctional( including_bandwidth = False, including_account = False )
except Exception as e:
message = 'Service has had a recent error or is otherwise not functional! Specific error was:'
message += os.linesep * 2
message += str( e )
message += os.linesep * 2
message += 'You might like to try refreshing its account in \'review services\'.'
raise HydrusExceptions.ValidationException( message )
def DelayLoginScript( self, login_domain, login_script_key, reason ):
with self._lock:
if login_domain not in self._domains_to_login_info:
return
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
if login_script_key != login_script_key_and_name[0]:
return
no_work_until = HydrusData.GetNow() + 3600 * 4
no_work_until_reason = reason
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
def DeleteLoginDomain( self, login_domain ):
with self._lock:
if login_domain in self._domains_to_login_info:
del self._domains_to_login_info[ login_domain ]
self._RecalcCache()
self._SetDirty()
def DeleteLoginScripts( self, login_script_names ):
with self._lock:
login_scripts = [ login_script for login_script in self._login_scripts if login_script.GetName() not in login_script_names ]
self.SetLoginScripts( login_scripts )
def GenerateLoginProcess( self, network_context ):
with self._lock:
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN:
( login_domain, login_expected, login_possible, login_error_text ) = self._GetLoginDomainStatus( network_context )
if login_domain is None or not login_expected:
raise HydrusExceptions.ValidationException( 'The domain ' + login_domain + ' has no active login script--has it just been turned off?' )
elif not login_possible:
raise HydrusExceptions.ValidationException( 'The domain ' + login_domain + ' cannot log in: ' + login_error_text )
else:
login_network_context = ClientNetworkingContexts.NetworkContext( context_type = CC.NETWORK_CONTEXT_DOMAIN, context_data = login_domain )
( login_script, credentials ) = self._GetLoginScriptAndCredentials( login_domain )
login_process = LoginProcessDomain( self.engine, login_network_context, login_script, credentials )
return login_process
elif network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
login_process = LoginProcessHydrus( self.engine, network_context, self._hydrus_login_script )
return login_process
def GenerateLoginProcessForDomain( self, login_domain ):
network_context = ClientNetworkingContexts.NetworkContext.STATICGenerateForDomain( login_domain )
return self.GenerateLoginProcess( network_context )
def GetDomainsToLoginInfo( self ):
with self._lock:
self._RevalidateCache()
return dict( self._domains_to_login_info )
def GetLoginScripts( self ):
with self._lock:
return list( self._login_scripts )
def Initialise( self ):
self._RecalcCache()
def InvalidateLoginScript( self, login_domain, login_script_key, reason ):
with self._lock:
if login_domain not in self._domains_to_login_info:
return
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
if login_script_key != login_script_key_and_name[0]:
return
validity = VALIDITY_INVALID
validity_error_text = reason
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
HydrusData.ShowText( 'The login for "' + login_domain + '" failed! It will not be reattempted until the problem is fixed. The failure reason was:' + os.linesep * 2 + validity_error_text )
self._SetDirty()
def IsDirty( self ):
with self._lock:
return self._dirty
def NeedsLogin( self, network_context ):
with self._lock:
if network_context.context_type == CC.NETWORK_CONTEXT_DOMAIN:
( login_domain, login_expected, login_possible, login_error_text ) = self._GetLoginDomainStatus( network_context )
if login_domain is None or not login_expected:
return False # no login required, no problem
else:
try:
( login_script, credentials ) = self._GetLoginScriptAndCredentials( login_domain )
except HydrusExceptions.ValidationException:
# couldn't find the script or something. assume we need a login to move errors forward to checkcanlogin trigger phase
return True
login_network_context = ClientNetworkingContexts.NetworkContext( context_type = CC.NETWORK_CONTEXT_DOMAIN, context_data = login_domain )
return not login_script.IsLoggedIn( self.engine, login_network_context )
elif network_context.context_type == CC.NETWORK_CONTEXT_HYDRUS:
return not self._hydrus_login_script.IsLoggedIn( self.engine, network_context )
def OverwriteDefaultLoginScripts( self, login_script_names ):
with self._lock:
from . import ClientDefaults
default_login_scripts = ClientDefaults.GetDefaultLoginScripts()
for login_script in default_login_scripts:
login_script.RegenerateLoginScriptKey()
existing_login_scripts = list( self._login_scripts )
new_login_scripts = [ login_script for login_script in existing_login_scripts if login_script.GetName() not in login_script_names ]
new_login_scripts.extend( [ login_script for login_script in default_login_scripts if login_script.GetName() in login_script_names ] )
self.SetLoginScripts( new_login_scripts )
def SetClean( self ):
with self._lock:
self._dirty = False
def SetCredentialsAndActivate( self, login_domain, new_credentials ):
with self._lock:
if login_domain not in self._domains_to_login_info:
return
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
credentials = new_credentials
active = True
validity = VALIDITY_UNTESTED
validity_error_text = ''
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
def SetDomainsToLoginInfo( self, domains_to_login_info ):
with self._lock:
self._domains_to_login_info = dict( domains_to_login_info )
self._RecalcCache()
self._SetDirty()
def SetLoginScripts( self, login_scripts ):
with self._lock:
self._login_scripts = HydrusSerialisable.SerialisableList( login_scripts )
# start with simple stuff first
self._login_scripts.sort( key = lambda ls: len( ls.GetCredentialDefinitions() ) )
for login_script in self._login_scripts:
login_script_key_and_name = login_script.GetLoginScriptKeyAndName()
example_domains_info = login_script.GetExampleDomainsInfo()
for ( login_domain, login_access_type, login_access_text ) in example_domains_info:
if '.' in login_domain:
# looks good, so let's see if we can update/add some info
if login_domain in self._domains_to_login_info:
( old_login_script_key_and_name, credentials, old_login_access_type, old_login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
if old_login_script_key_and_name[1] == login_script_key_and_name[1]:
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
else:
credentials = {}
# if there is nothing to enter, turn it on by default, like HF click-through
active = len( login_script.GetCredentialDefinitions() ) == 0
validity = VALIDITY_UNTESTED
validity_error_text = ''
no_work_until = 0
no_work_until_reason = ''
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._RecalcCache()
self._SetDirty()
def ValidateLoginScript( self, login_domain, login_script_key ):
with self._lock:
if login_domain not in self._domains_to_login_info:
return
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
if login_script_key != login_script_key_and_name[0]:
return
validity = VALIDITY_VALID
validity_error_text = ''
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
def TryToLinkMissingLoginScripts( self, login_domains ):
with self._lock:
for login_domain in login_domains:
try:
( existing_login_script, existing_credentials ) = self._GetLoginScriptAndCredentials( login_domain )
continue # already seems to have a good login script, so nothing to fix
except HydrusExceptions.ValidationException:
pass
( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason ) = self._domains_to_login_info[ login_domain ]
login_script = self._GetBestLoginScript( login_domain )
if login_script is None:
continue
login_script_key_and_name = login_script.GetLoginScriptKeyAndName()
self._domains_to_login_info[ login_domain ] = ( login_script_key_and_name, credentials, login_access_type, login_access_text, active, validity, validity_error_text, no_work_until, no_work_until_reason )
self._SetDirty()
def LoginTumblrGDPR( self ):
# t-thanks, EU
# this is cribbed from poking around here https://github.com/johanneszab/TumblThree/commit/3563d6cebf1a467151d6b8d6eee9806ddd6e6364
network_job = ClientNetworkingJobs.NetworkJob( 'GET', 'http://www.tumblr.com/' )
network_job.SetForLogin( True )
self.engine.AddJob( network_job )
network_job.WaitUntilDone()
html = network_job.GetContentText()
formula = ClientParsing.ParseFormulaHTML( tag_rules = [ ClientParsing.ParseRuleHTML( rule_type = ClientParsing.HTML_RULE_TYPE_DESCENDING, tag_name = 'meta', tag_attributes = { 'id' : 'tumblr_form_key' } ) ], content_to_fetch = ClientParsing.HTML_CONTENT_ATTRIBUTE, attribute_to_fetch = "content" )
results = formula.Parse( {}, html )
if len( results ) != 1:
raise HydrusExceptions.ParseException( 'Could not figure out the tumblr form key for the GDPR click-through.' )
tumblr_form_key = results[0]
#
body = '{\"eu_resident\":true,\"gdpr_is_acceptable_age\":true,\"gdpr_consent_core\":true,\"gdpr_consent_first_party_ads\":true,\"gdpr_consent_third_party_ads\":true,\"gdpr_consent_search_history\":true,\"redirect_to\":\"\"}'
referral_url = 'https://www.tumblr.com/privacy/consent?redirect='
network_job = ClientNetworkingJobs.NetworkJob( 'POST', 'https://www.tumblr.com/svc/privacy/consent', body = body, referral_url = referral_url )
network_job.SetForLogin( True )
network_job.AddAdditionalHeader( 'Accept', 'application/json, text/javascript, */*; q=0.01')
network_job.AddAdditionalHeader( 'Content-Type', 'application/json' )
network_job.AddAdditionalHeader( 'X-Requested-With', 'XMLHttpRequest' )
network_job.AddAdditionalHeader( 'X-tumblr-form-key', tumblr_form_key )
self.engine.AddJob( network_job )
network_job.WaitUntilDone()
# test cookies here or something
HydrusData.ShowText( 'Looks like tumblr GDPR click-through worked! You should be good for a year, at which point we should have an automatic solution for this!' )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_NETWORK_LOGIN_MANAGER ] = NetworkLoginManager
CREDENTIAL_TYPE_TEXT = 0
CREDENTIAL_TYPE_PASS = 1
credential_type_str_lookup = {}
credential_type_str_lookup[ CREDENTIAL_TYPE_TEXT ] = 'normal'
credential_type_str_lookup[ CREDENTIAL_TYPE_PASS ] = 'hidden (password)'
class LoginCredentialDefinition( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_CREDENTIAL_DEFINITION
SERIALISABLE_NAME = 'Login Credential Definition'
SERIALISABLE_VERSION = 1
def __init__( self, name = 'username', credential_type = CREDENTIAL_TYPE_TEXT, string_match = None ):
if string_match is None:
string_match = ClientParsing.StringMatch()
HydrusSerialisable.SerialisableBaseNamed.__init__( self, name )
self._credential_type = credential_type
self._string_match = string_match
def _GetSerialisableInfo( self ):
serialisable_string_match = self._string_match.GetSerialisableTuple()
return ( self._credential_type, serialisable_string_match )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._credential_type, serialisable_string_match ) = serialisable_info
self._string_match = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_string_match )
def GetStringMatch( self ):
return self._string_match
def GetType( self ):
return self._credential_type
def SetStringMatch( self, string_match ):
self._string_match = string_match
def SetType( self, credential_type ):
self._credential_type = credential_type
def ShouldHide( self ):
return self._credential_type == CREDENTIAL_TYPE_PASS
def Test( self, text ):
if self._string_match is not None:
try:
self._string_match.Test( text )
except HydrusExceptions.StringMatchException as e:
raise HydrusExceptions.ValidationException( 'Could not validate "' + self._name + '" credential: ' + str( e ) )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_CREDENTIAL_DEFINITION ] = LoginCredentialDefinition
class LoginProcess( object ):
def __init__( self, engine, network_context, login_script ):
self.engine = engine
self.network_context = network_context
self.login_script = login_script
self._done = False
def _Start( self ):
raise NotImplementedError()
def IsDone( self ):
return self._done
def Start( self ):
try:
self._Start()
finally:
self._done = True
class LoginProcessDomain( LoginProcess ):
def __init__( self, engine, network_context, login_script, credentials ):
LoginProcess.__init__( self, engine, network_context, login_script )
self.credentials = credentials
def _Start( self ):
login_domain = self.network_context.context_data
job_key = ClientThreading.JobKey( cancellable = True )
job_key.SetVariable( 'popup_title', 'Logging in ' + login_domain )
HG.client_controller.pub( 'message', job_key )
HydrusData.Print( 'Starting login for ' + login_domain )
result = self.login_script.Start( self.engine, self.network_context, self.credentials, job_key = job_key )
HydrusData.Print( 'Finished login for ' + self.network_context.context_data + '. Result was: ' + result )
job_key.SetVariable( 'popup_text_1', result )
job_key.Finish()
job_key.Delete( 4 )
class LoginProcessHydrus( LoginProcess ):
def _Start( self ):
self.login_script.Start( self.engine, self.network_context )
class LoginScriptHydrus( object ):
def _IsLoggedIn( self, engine, network_context ):
session = engine.session_manager.GetSession( network_context )
cookies = session.cookies
cookies.clear_expired_cookies()
return 'session_key' in cookies
def IsLoggedIn( self, engine, network_context ):
return self._IsLoggedIn( engine, network_context )
def Start( self, engine, network_context ):
service_key = network_context.context_data
service = engine.controller.services_manager.GetService( service_key )
base_url = service.GetBaseURL()
url = base_url + 'session_key'
access_key = service.GetCredentials().GetAccessKey()
network_job = ClientNetworkingJobs.NetworkJobHydrus( service_key, 'GET', url )
network_job.SetForLogin( True )
network_job.AddAdditionalHeader( 'Hydrus-Key', access_key.hex() )
engine.AddJob( network_job )
try:
network_job.WaitUntilDone()
if self._IsLoggedIn( engine, network_context ):
HydrusData.Print( 'Successfully logged into ' + service.GetName() + '.' )
else:
service.DelayFutureRequests( 'Could not log in for unknown reason.' )
except Exception as e:
e_string = str( e )
service.DelayFutureRequests( e_string )
class LoginScriptDomain( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_SCRIPT_DOMAIN
SERIALISABLE_NAME = 'Login Script - Domain'
SERIALISABLE_VERSION = 2
def __init__( self, name = 'login script', login_script_key = None, required_cookies_info = None, credential_definitions = None, login_steps = None, example_domains_info = None ):
if required_cookies_info is None:
required_cookies_info = {}
required_cookies_info = HydrusSerialisable.SerialisableDictionary( required_cookies_info )
if credential_definitions is None:
credential_definitions = []
credential_definitions = HydrusSerialisable.SerialisableList( credential_definitions )
if login_steps is None:
login_steps = []
login_steps = HydrusSerialisable.SerialisableList( login_steps )
if example_domains_info is None:
example_domains_info = []
HydrusSerialisable.SerialisableBaseNamed.__init__( self, name )
self._login_script_key = HydrusData.GenerateKey()
self._required_cookies_info = required_cookies_info # string match : string match
self._credential_definitions = credential_definitions
self._login_steps = login_steps
self._example_domains_info = example_domains_info # domain | login_access_type | login_access_text
def _GetSerialisableInfo( self ):
serialisable_login_script_key = self._login_script_key.hex()
serialisable_required_cookies = self._required_cookies_info.GetSerialisableTuple()
serialisable_credential_definitions = self._credential_definitions.GetSerialisableTuple()
serialisable_login_steps = self._login_steps.GetSerialisableTuple()
return ( serialisable_login_script_key, serialisable_required_cookies, serialisable_credential_definitions, serialisable_login_steps, self._example_domains_info )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( serialisable_login_script_key, serialisable_required_cookies, serialisable_credential_definitions, serialisable_login_steps, self._example_domains_info ) = serialisable_info
self._login_script_key = bytes.fromhex( serialisable_login_script_key )
self._required_cookies_info = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_required_cookies )
self._credential_definitions = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_credential_definitions )
self._login_steps = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_login_steps )
# convert lists to tups for listctrl data hashing
self._example_domains_info = [ tuple( l ) for l in self._example_domains_info ]
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( serialisable_login_script_key, serialisable_required_cookies, serialisable_credential_definitions, serialisable_login_steps, example_domains_info ) = old_serialisable_info
old_required_cookies_info = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_required_cookies )
new_required_cookies_info = HydrusSerialisable.SerialisableDictionary()
for ( name, value_string_match ) in list(old_required_cookies_info.items()):
key_string_match = ClientParsing.StringMatch( match_type = ClientParsing.STRING_MATCH_FIXED, match_value = name, example_string = name )
new_required_cookies_info[ key_string_match ] = value_string_match
serialisable_required_cookies = new_required_cookies_info.GetSerialisableTuple()
new_serialisable_info = ( serialisable_login_script_key, serialisable_required_cookies, serialisable_credential_definitions, serialisable_login_steps, example_domains_info )
return ( 2, new_serialisable_info )
def _IsLoggedIn( self, engine, network_context, validation_check = False ):
session = engine.session_manager.GetSession( network_context )
cookies = session.cookies
cookies.clear_expired_cookies()
search_domain = network_context.context_data
for ( cookie_name_string_match, value_string_match ) in list(self._required_cookies_info.items()):
try:
cookie = ClientNetworkingDomain.GetCookie( cookies, search_domain, cookie_name_string_match )
except HydrusExceptions.DataMissing as e:
if validation_check:
raise HydrusExceptions.ValidationException( 'Missing cookie "' + cookie_name_string_match.ToString() + '"!' )
return False
cookie_text = cookie.value
try:
value_string_match.Test( cookie_text )
except HydrusExceptions.StringMatchException as e:
if validation_check:
raise HydrusExceptions.ValidationException( 'Cookie "' + cookie_name_string_match.ToString() + '" failed: ' + str( e ) + '!' )
return False
return True
def CheckCanLogin( self, given_credentials ):
self.CheckIsValid()
given_cred_names = set( given_credentials.keys() )
required_cred_names = { name for name in itertools.chain.from_iterable( ( step.GetRequiredCredentials() for step in self._login_steps ) ) }
missing_givens = required_cred_names.difference( given_cred_names )
if len( missing_givens ) > 0:
missing_givens = list( missing_givens )
missing_givens.sort()
raise HydrusExceptions.ValidationException( 'Missing required credentials: ' + ', '.join( missing_givens ) )
#
cred_names_to_definitions = { credential_definition.GetName() : credential_definition for credential_definition in self._credential_definitions }
for ( pretty_name, text ) in given_credentials.items():
if pretty_name not in cred_names_to_definitions:
continue
credential_definition = cred_names_to_definitions[ pretty_name ]
credential_definition.Test( text )
def CheckIsValid( self ):
defined_cred_names = { credential_definition.GetName() for credential_definition in self._credential_definitions }
required_cred_names = { name for name in itertools.chain.from_iterable( ( step.GetRequiredCredentials() for step in self._login_steps ) ) }
missing_definitions = required_cred_names.difference( defined_cred_names )
if len( missing_definitions ) > 0:
missing_definitions = list( missing_definitions )
missing_definitions.sort()
raise HydrusExceptions.ValidationException( 'Missing required credential definitions: ' + ', '.join( missing_definitions ) )
#
temp_vars = set()
for login_step in self._login_steps:
( required_vars, set_vars ) = login_step.GetRequiredAndSetTempVariables()
missing_vars = required_vars.difference( temp_vars )
if len( missing_vars ) > 0:
missing_vars = list( missing_vars )
missing_vars.sort()
raise HydrusExceptions.ValidationException( 'Missing temp variables for login step "' + login_step.GetName() + '": ' + ', '.join( missing_vars ) )
temp_vars.update( set_vars )
def GetCredentialDefinitions( self ):
return self._credential_definitions
def GetExampleDomains( self ):
return [ domain for ( domain, login_access_type, login_access_text ) in self._example_domains_info ]
def GetExampleDomainsInfo( self ):
return self._example_domains_info
def GetExampleDomainInfo( self, given_domain ):
for ( domain, login_access_type, login_access_text ) in self._example_domains_info:
if domain == given_domain:
return ( login_access_type, login_access_text )
raise HydrusExceptions.DataMissing( 'Could not find that domain!' )
def GetRequiredCookiesInfo( self ):
return self._required_cookies_info
def GetLoginExpiry( self, engine, network_context ):
session = engine.session_manager.GetSession( network_context )
cookies = session.cookies
cookies.clear_expired_cookies()
search_domain = network_context.context_data
session_cookies = False
expiry_timestamps = []
for cookie_name_string_match in list(self._required_cookies_info.keys()):
try:
cookie = ClientNetworkingDomain.GetCookie( cookies, search_domain, cookie_name_string_match )
except HydrusExceptions.DataMissing as e:
return None
expiry = cookie.expires
if expiry is None:
session_cookies = True
else:
expiry_timestamps.append( expiry )
if session_cookies:
return None
else:
return min( expiry_timestamps )
def GetLoginScriptKey( self ):
return self._login_script_key
def GetLoginScriptKeyAndName( self ):
return ( self._login_script_key, self._name )
def GetLoginSteps( self ):
return self._login_steps
def GetRequiredCredentials( self ):
required_creds = []
for login_step in self._login_steps:
required_creds.extend( login_step.GetRequiredCredentials() ) # name with an order
return required_creds
def GetSafeSummary( self ):
return 'Login Script "' + self._name + '" - ' + ', '.join( self.GetExampleDomains() )
def IsLoggedIn( self, engine, network_context ):
return self._IsLoggedIn( engine, network_context )
def RegenerateLoginScriptKey( self ):
self._login_script_key = HydrusData.GenerateKey()
def SetLoginScriptKey( self, login_script_key ):
self._login_script_key = login_script_key
def SetLoginScriptKeyAndName( self, login_script_key_and_name ):
( login_script_key, name ) = login_script_key_and_name
self._login_script_key = login_script_key
self._name = name
def Start( self, engine, network_context, given_credentials, network_job_presentation_context_factory = None, test_result_callable = None, job_key = None ):
# don't mess with the domain--assume that we are given precisely the right domain
login_domain = network_context.context_data
temp_variables = {}
last_url_used = None
for login_step in self._login_steps:
if job_key is not None:
if job_key.IsCancelled():
message = 'User cancelled the login process.'
engine.login_manager.DelayLoginScript( login_domain, self._login_script_key, message )
return message
job_key.SetVariable( 'popup_text_1', login_step.GetName() )
try:
last_url_used = login_step.Start( engine, login_domain, given_credentials, temp_variables, referral_url = last_url_used, network_job_presentation_context_factory = network_job_presentation_context_factory, test_result_callable = test_result_callable )
except HydrusExceptions.ValidationException as e:
if test_result_callable is not None:
HydrusData.ShowException( e )
message = str( e )
engine.login_manager.InvalidateLoginScript( login_domain, self._login_script_key, message )
return 'Verification error: ' + message
except HydrusExceptions.NetworkException as e:
if test_result_callable is not None:
HydrusData.ShowException( e )
message = str( e )
engine.login_manager.DelayLoginScript( login_domain, self._login_script_key, message )
return 'Network error: ' + message
except Exception as e:
if test_result_callable is not None:
HydrusData.ShowException( e )
message = str( e )
engine.login_manager.InvalidateLoginScript( login_domain, self._login_script_key, message )
return 'Unusual error: ' + message
time.sleep( 2 )
try:
self._IsLoggedIn( engine, network_context, validation_check = True )
except Exception as e:
if test_result_callable is not None:
HydrusData.ShowException( e )
message = str( e )
engine.login_manager.InvalidateLoginScript( login_domain, self._login_script_key, message )
return 'Final cookie check failed: ' + message
engine.login_manager.ValidateLoginScript( login_domain, self._login_script_key )
return 'Login OK!'
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_SCRIPT_DOMAIN ] = LoginScriptDomain
LOGIN_PARAMETER_TYPE_PARAMETER = 0
LOGIN_PARAMETER_TYPE_COOKIE = 1
LOGIN_PARAMETER_TYPE_HEADER = 2
class LoginStep( HydrusSerialisable.SerialisableBaseNamed ):
SERIALISABLE_TYPE = HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_STEP
SERIALISABLE_NAME = 'Login Step'
SERIALISABLE_VERSION = 2
def __init__( self, name = 'hit home page to establish session', scheme = 'https', method = 'GET', subdomain = None, path = '/' ):
HydrusSerialisable.SerialisableBaseNamed.__init__( self, name )
self._scheme = scheme
self._method = method
self._subdomain = subdomain
self._path = path
self._CleanseSubdomainAndPath()
self._required_credentials = {} # pretty_name : arg name
self._static_args = {} # arg name : string
self._temp_args = {} # temp arg name : arg name
self._required_cookies_info = HydrusSerialisable.SerialisableDictionary() # string match : string match
self._content_parsers = HydrusSerialisable.SerialisableList()
def _CleanseSubdomainAndPath( self ):
if self._subdomain is not None:
self._subdomain = re.sub( '[^a-z\.]+', '', self._subdomain )
if not self._path.startswith( '/' ):
self._path = '/' + self._path
def _GetSerialisableInfo( self ):
serialisable_required_cookies = self._required_cookies_info.GetSerialisableTuple()
serialisable_content_parsers = self._content_parsers.GetSerialisableTuple()
return ( self._scheme, self._method, self._subdomain, self._path, self._required_credentials, self._static_args, self._temp_args, serialisable_required_cookies, serialisable_content_parsers )
def _InitialiseFromSerialisableInfo( self, serialisable_info ):
( self._scheme, self._method, self._subdomain, self._path, self._required_credentials, self._static_args, self._temp_args, serialisable_required_cookies, serialisable_content_parsers ) = serialisable_info
self._CleanseSubdomainAndPath()
self._required_cookies_info = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_required_cookies )
self._content_parsers = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_content_parsers )
def _UpdateSerialisableInfo( self, version, old_serialisable_info ):
if version == 1:
( scheme, method, subdomain, path, required_credentials, static_args, temp_args, serialisable_required_cookies, serialisable_content_parsers ) = old_serialisable_info
old_required_cookies_info = HydrusSerialisable.CreateFromSerialisableTuple( serialisable_required_cookies )
new_required_cookies_info = HydrusSerialisable.SerialisableDictionary()
for ( name, value_string_match ) in list(old_required_cookies_info.items()):
key_string_match = ClientParsing.StringMatch( match_type = ClientParsing.STRING_MATCH_FIXED, match_value = name, example_string = name )
new_required_cookies_info[ key_string_match ] = value_string_match
serialisable_required_cookies = new_required_cookies_info.GetSerialisableTuple()
new_serialisable_info = ( scheme, method, subdomain, path, required_credentials, static_args, temp_args, serialisable_required_cookies, serialisable_content_parsers )
return ( 2, new_serialisable_info )
def GetRequiredCredentials( self ):
return [ pretty_name for ( pretty_name, arg_name ) in list(self._required_credentials.items()) ]
def GetRequiredAndSetTempVariables( self ):
required_temp_variables = set( self._temp_args.keys() )
set_temp_variables = { additional_info for [ ( name, content_type, additional_info ) ] in [ content_parser.GetParsableContent() for content_parser in self._content_parsers ] }
return ( required_temp_variables, set_temp_variables )
def SetComplicatedVariables( self, required_credentials, static_args, temp_args, required_cookies_info, content_parsers ):
self._required_credentials = required_credentials
self._static_args = static_args
self._temp_args = temp_args
self._required_cookies_info = HydrusSerialisable.SerialisableDictionary( required_cookies_info )
self._content_parsers = HydrusSerialisable.SerialisableList( content_parsers )
def Start( self, engine, domain, given_credentials, temp_variables, referral_url = None, network_job_presentation_context_factory = None, test_result_callable = None ):
def session_to_cookie_strings( sess ):
cookie_strings = set()
for cookie in sess.cookies:
s = cookie.name + ': ' + cookie.value + ' | ' + cookie.domain + ' | '
expiry = cookie.expires
if expiry is None:
expiry = -1
pretty_expiry = 'session'
else:
pretty_expiry = HydrusData.ConvertTimestampToPrettyExpires( expiry )
s += pretty_expiry
cookie_strings.add( s )
return cookie_strings
url = 'Did not make a url.'
test_result_body = None
downloaded_data = 'Did not download data.'
new_temp_variables = {}
original_cookie_strings = session_to_cookie_strings( engine.session_manager.GetSessionForDomain( domain ) )
test_script_result = 'Did not start.'
try:
domain_to_hit = domain
if self._subdomain is not None:
if domain.startswith( 'www.' ):
domain = domain[4:]
domain_to_hit = self._subdomain + '.' + domain
query_dict = {}
query_dict.update( self._static_args )
for ( pretty_name, arg_name ) in list(self._required_credentials.items()):
query_dict[ arg_name ] = given_credentials[ pretty_name ]
for ( temp_name, arg_name ) in list(self._temp_args.items()):
if temp_name not in temp_variables:
raise HydrusExceptions.ValidationException( 'The temporary variable \'' + temp_name + '\' was not found!' )
query_dict[ arg_name ] = temp_variables[ temp_name ]
scheme = self._scheme
netloc = domain_to_hit
path = self._path
params = ''
fragment = ''
if self._method == 'GET':
query = ClientNetworkingDomain.ConvertQueryDictToText( query_dict )
body = None
test_result_body = ''
elif self._method == 'POST':
query = ''
body = query_dict
test_result_body = ClientNetworkingDomain.ConvertQueryDictToText( query_dict )
r = urllib.parse.ParseResult( scheme, netloc, path, params, query, fragment )
url = r.geturl()
network_job = ClientNetworkingJobs.NetworkJob( self._method, url, body = body, referral_url = referral_url )
if self._method == 'POST' and referral_url is not None:
p = urllib.parse.urlparse( referral_url )
r = urllib.parse.ParseResult( p.scheme, p.netloc, '', '', '', '' )
origin = r.geturl() # https://accounts.pixiv.net
network_job.AddAdditionalHeader( 'origin', origin ) # GET/POST forms are supposed to have this for CSRF. we'll try it just with POST for now
network_job.SetForLogin( True )
engine.AddJob( network_job )
if network_job_presentation_context_factory is not None:
with network_job_presentation_context_factory( network_job ) as njpc:
network_job.WaitUntilDone()
else:
network_job.WaitUntilDone()
session = network_job.GetSession()
cookies = session.cookies
for ( cookie_name_string_match, string_match ) in list(self._required_cookies_info.items()):
try:
cookie = ClientNetworkingDomain.GetCookie( cookies, domain, cookie_name_string_match )
except HydrusExceptions.DataMissing as e:
raise HydrusExceptions.ValidationException( 'Missing cookie "' + cookie_name_string_match.ToString() + '" on step "' + self._name + '"!' )
cookie_text = cookie.value
try:
string_match.Test( cookie_text )
except HydrusExceptions.StringMatchException as e:
raise HydrusExceptions.ValidationException( 'Cookie "' + cookie_name_string_match.ToString() + '" failed on step "' + self._name + '": ' + str( e ) + '!' )
downloaded_text = network_job.GetContentText()
parsing_context = {}
parsing_context[ 'url' ] = url
for content_parser in self._content_parsers:
try:
parse_results = content_parser.Parse( parsing_context, downloaded_text )
except HydrusExceptions.VetoException as e:
raise HydrusExceptions.ValidationException( str( e ) )
result = ClientParsing.GetVariableFromParseResults( parse_results )
if result is not None:
( temp_name, value ) = result
new_temp_variables[ temp_name ] = value
temp_variables.update( new_temp_variables )
test_script_result = 'OK!'
return url
except Exception as e:
test_script_result = str( e )
raise
finally:
if test_result_callable is not None:
current_cookie_strings = session_to_cookie_strings( engine.session_manager.GetSessionForDomain( domain ) )
new_cookie_strings = tuple( current_cookie_strings.difference( original_cookie_strings ) )
new_temp_strings = tuple( ( key + ': ' + value for ( key, value ) in list(new_temp_variables.items()) ) )
test_result = ( self._name, url, test_result_body, downloaded_data, new_temp_strings, new_cookie_strings, test_script_result )
test_result_callable( test_result )
def ToTuple( self ):
return ( self._scheme, self._method, self._subdomain, self._path, self._required_credentials, self._static_args, self._temp_args, self._required_cookies_info, self._content_parsers )
HydrusSerialisable.SERIALISABLE_TYPES_TO_OBJECT_TYPES[ HydrusSerialisable.SERIALISABLE_TYPE_LOGIN_STEP ] = LoginStep
|
the-stack_0_2464 | import numpy as np
from numpy import random
SF = 0xF0
EF = 0x0F
VAL_MIN = 0
VAL_MAX = 255
DU_LEN_MAX = 1024
NUM_OF_VECTORS = 10
VECTOR_HEADER = 'test_vectors.hpp'
VECTOR_SRC = 'test_vectors.cpp'
VECTOR_PY = 'test_vectors.py'
vectors = []
for i in range(NUM_OF_VECTORS):
du = random.randint(VAL_MIN, VAL_MAX, size=random.randint(1, DU_LEN_MAX))
vectors.append(du.tolist())
with open(VECTOR_SRC, 'w') as fw:
fw.write('#include <cstdint>\n')
fw.write('#include <vector>\n\n')
fw.write('#include "{}"\n\n'.format(VECTOR_HEADER))
vectors_txt = 'std::vector<const uint8_t *> vectors = {'
vectors_sizes_txt = 'std::vector<size_t> vectors_sizes = {'
for i, v in enumerate(vectors):
fw.write('const uint8_t v' + str(i) + '[] = {\n')
tmp = ''
for j, val in enumerate(v):
tmp += '0x{0:02X},'.format(val)
if (0 == ((j + 1) % 8)):
tmp += '\n '
else:
tmp += ' '
tmp = tmp.strip(' ,')
fw.write(' ' + tmp)
fw.write('\n};\n\n')
vectors_txt += 'v' + str(i) + ' ,'
vectors_sizes_txt += str(len(v)) + ' ,'
vectors_txt = vectors_txt.strip(',') + '};\n'
fw.write(vectors_txt)
vectors_sizes_txt = vectors_sizes_txt.strip(',') + '};\n'
fw.write(vectors_sizes_txt)
with open(VECTOR_HEADER, 'w') as fw:
fw.write('#ifndef _TEST_VECTORS_HPP_\n')
fw.write('#define _TEST_VECTORS_HPP_\n\n')
fw.write('#include <cstdint>\n\n')
fw.write('#include <vector>\n\n')
fw.write('extern std::vector<const uint8_t *> vectors;\n')
fw.write('extern std::vector<size_t> vectors_sizes;\n\n')
fw.write('#endif // _TEST_VECTORS_HPP_\n')
with open(VECTOR_PY, 'w') as fw:
fw.write('vectors = []\n\n')
for i, v in enumerate(vectors):
fw.write('# Vector[{}]: {} bytes\n'.format(i, len(v)))
fw.write('vectors.append(bytes([\n')
tmp = ''
for j, val in enumerate(v):
tmp += '0x{0:02X},'.format(val)
if (0 == ((j + 1) % 8)):
tmp += '\n '
else:
tmp += ' '
tmp = tmp.strip(' ,')
fw.write(' ' + tmp + '\n')
fw.write(']))\n\n')
|
the-stack_0_2465 | import hashlib
import logging
import random
from django.conf import settings
from django.contrib.auth.models import Group
from uniauth.processors import (BaseProcessor,
NameIdBuilder)
from . unical_attributes_generator import UnicalAttributeGenerator
logger = logging.getLogger(__name__)
if 'ldap_peoples' in settings.INSTALLED_APPS:
from ldap_peoples.models import LdapAcademiaUser
if 'multildap' in settings.INSTALLED_APPS:
from multildap.client import LdapClient
class GroupProcessor(BaseProcessor):
"""
Example implementation of access control for users:
- superusers are allowed
- staff is allowed
- they have to belong to a certain group
"""
group = "ExampleGroup"
def has_access(self, user): # pragma: no cover
return user.is_superuser or \
user.is_staff or \
user.groups.filter(name=self.group).exists()
class LdapAcademiaProcessor(BaseProcessor):
""" Processor class used to retrieve attribute from LDAP server
and user nameID (userID) with standard formats
"""
def get_identity(self, user):
if isinstance(user, str):
username = user
else:
username = user.username
return LdapAcademiaUser.objects.filter(uid=username).first()
def create_identity(self, user, sp={}):
""" Generate an identity dictionary of the user based on the
given mapping of desired user attributes by the SP
"""
default_mapping = {'username': 'username'}
sp_mapping = sp['config'].get('attribute_mapping',
default_mapping)
# get ldap user
lu = self.get_identity(user)
#logging.info("{} doesn't have a valid computed ePPN in LDAP, please fix it!".format(user.username))
results = self.process_attributes(user, sp_mapping)
if not lu:
return results
results = self.process_attributes(lu, sp_mapping)
# add custom/legacy attribute made by processing
results = self.extra_attr_processing(results, sp_mapping)
# if targetedID is available give it to sp
if self.eduPersonTargetedID:
results['eduPersonTargetedID'] = [self.eduPersonTargetedID]
return results
class LdapUnicalAcademiaProcessor(LdapAcademiaProcessor):
"""
The same of its father but with a custom attribute processing
for legacy support to stange SP
"""
def extra_attr_processing(self, results, sp_mapping):
return UnicalAttributeGenerator.process(results, sp_mapping)
class LdapUnicalMultiAcademiaProcessor(LdapUnicalAcademiaProcessor):
"""
Uses pyMultiLDAP to gather an uid from multiple sources.
It will stop on the first occurrence.
"""
def get_identity(self, user):
if hasattr(self, 'request') and hasattr(self.request, 'session'):
if self.request.session.get('identity_attributes'):
return type('', (object,), self.request.session['identity_attributes'])()
if isinstance(user, str):
username = user
else:
username = user.username
# otherwise do another query ...
identity = None
for lc in settings.LDAP_CONNECTIONS: # pragma: no coverage
ldapfilter = '(uid={})'.format(username)
logging.debug("Processor {} searches for {} in {}".format(self.__class__,
username,
lc))
identity = lc.get(search=ldapfilter, format='object')
if identity:
return identity
|
the-stack_0_2468 |
import random
import pytest
from conftest import get_api_data
from assemblyline.common import forge
from assemblyline.odm.random_data import create_users, wipe_users, create_heuristics, wipe_heuristics
@pytest.fixture(scope="module")
def datastore(datastore_connection):
try:
create_users(datastore_connection)
create_heuristics(datastore_connection)
yield datastore_connection
finally:
wipe_users(datastore_connection)
wipe_heuristics(datastore_connection)
def test_get_heuristics(datastore, login_session):
_, session, host = login_session
heuristic = random.choice(datastore.heuristic.search("id:*", rows=100, as_obj=False)['items'])
resp = get_api_data(session, f"{host}/api/v4/heuristics/{heuristic['heur_id']}/")
assert resp['classification'] == heuristic['classification']
assert resp['description'] == heuristic['description']
assert resp['filetype'] == heuristic['filetype']
assert resp['heur_id'] == heuristic['heur_id']
assert resp['name'] == heuristic['name']
def test_heuristic_stats(datastore, login_session):
_, session, host = login_session
cache = forge.get_statistics_cache()
cache.delete()
resp = get_api_data(session, f"{host}/api/v4/heuristics/stats/")
assert len(resp) == 0
stats = datastore.calculate_heuristic_stats()
cache.set('heuristics', stats)
heuristic_count = datastore.heuristic.search("id:*", rows=0)['total']
resp = get_api_data(session, f"{host}/api/v4/heuristics/stats/")
assert len(resp) == heuristic_count
for sig_stat in resp:
assert sorted(list(sig_stat.keys())) == ['avg', 'classification', 'count', 'heur_id', 'max', 'min', 'name']
|
the-stack_0_2470 | import os
from ats.attributedict import AttributeDict
statuses = AttributeDict()
_StatusCodesAbr = dict(
CREATED = "INIT",
INVALID = "INVD",
PASSED = "PASS",
FAILED = "FAIL",
SKIPPED = "SKIP",
RUNNING = 'EXEC',
FILTERED = 'FILT',
TIMEDOUT = 'TIME',
BATCHED = "BACH",
HALTED = "HALT",
EXPECTED = "EXPT",
LSFERROR = "LSFE",
)
class _StatusCode:
def __init__(self, name):
self.name = name
self.abr = _StatusCodesAbr[name]
def __str__(self):
return self.abr
def __eq__(self, other):
if isinstance(other, _StatusCode):
return self.name == other.name
elif isinstance(other, str):
return self.name == other or self.abr == other
else:
return False
def __ne__(self, other):
return self.name != other.name
def __repr__(self):
return "StatusCode(%s)" % repr(self.name)
def StatusCode(name):
"Return a status code so that they compare with 'is'. "
try:
return statuses[name]
except KeyError:
new = _StatusCode(name)
statuses[name] = new
return new
CREATED = StatusCode("CREATED")
INVALID = StatusCode("INVALID")
PASSED = StatusCode("PASSED")
FAILED = StatusCode("FAILED")
SKIPPED = StatusCode("SKIPPED")
RUNNING = StatusCode("RUNNING")
FILTERED = StatusCode("FILTERED")
TIMEDOUT = StatusCode("TIMEDOUT")
BATCHED = StatusCode("BATCHED")
HALTED = StatusCode("HALTED")
EXPECTED = StatusCode("EXPECTED")
LSFERROR = StatusCode("LSFERROR")
class AtsError (Exception):
"Exception class for Ats."
def __init__ (self, msg):
Exception.__init__ (self, msg)
def expandpath (path):
"Return a normalized, variable and ~-expanded version of path"
path = str(path)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
return path
def abspath(path):
"Return an absolute, expanded path."
return os.path.abspath(expandpath(path))
_debug = 0
def debug(value=None):
"Return the debug flag; if value given, set it."
global _debug
if value is None:
return _debug
else:
_debug = int(value)
def is_valid_file (path):
"Does path represent a valid file?"
path = abspath(path)
return os.path.isfile(path)
def is_valid_executable (path):
"Does path represent a valid executable?"
path = abspath(path)
return is_valid_file(path) and os.access(path, os.X_OK)
if __name__ == "__main__":
print(locals())
|
the-stack_0_2471 | from distutils.version import LooseVersion
import os
import json
import pytest
import numpy as np
import pandas as pd
from sklearn import datasets
import xgboost as xgb
import matplotlib as mpl
import yaml
import mlflow
import mlflow.xgboost
from mlflow.models import Model
from mlflow.models.utils import _read_example
from mlflow.utils.autologging import BatchMetricsLogger
from unittest.mock import patch
mpl.use("Agg")
def get_latest_run():
client = mlflow.tracking.MlflowClient()
return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)
def get_model_conf(artifact_uri, model_subpath="model"):
model_conf_path = os.path.join(artifact_uri, model_subpath, "MLmodel")
return Model.load(model_conf_path)
@pytest.fixture(scope="session")
def bst_params():
return {
"objective": "multi:softprob",
"num_class": 3,
"eval_metric": "mlogloss",
}
@pytest.fixture(scope="session")
def dtrain():
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
return xgb.DMatrix(X, y)
@pytest.mark.large
def test_xgb_autolog_ends_auto_created_run(bst_params, dtrain):
mlflow.xgboost.autolog()
xgb.train(bst_params, dtrain)
assert mlflow.active_run() is None
@pytest.mark.large
def test_xgb_autolog_persists_manually_created_run(bst_params, dtrain):
mlflow.xgboost.autolog()
with mlflow.start_run() as run:
xgb.train(bst_params, dtrain)
assert mlflow.active_run()
assert mlflow.active_run().info.run_id == run.info.run_id
@pytest.mark.large
def test_xgb_autolog_logs_default_params(bst_params, dtrain):
mlflow.xgboost.autolog()
xgb.train(bst_params, dtrain)
run = get_latest_run()
params = run.data.params
expected_params = {
"num_boost_round": 10,
# In xgboost >= 1.3.0, the default value for `maximize` in `xgboost.train` is None:
# https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.train
# In < 1.3.0, it's False:
# https://xgboost.readthedocs.io/en/release_1.2.0/python/python_api.html#xgboost.train
"maximize": None if LooseVersion(xgb.__version__) >= LooseVersion("1.3.0") else False,
"early_stopping_rounds": None,
"verbose_eval": True,
}
expected_params.update(bst_params)
for key, val in expected_params.items():
assert key in params
assert params[key] == str(val)
unlogged_params = [
"dtrain",
"evals",
"obj",
"feval",
"evals_result",
"xgb_model",
"callbacks",
"learning_rates",
]
for param in unlogged_params:
assert param not in params
@pytest.mark.large
def test_xgb_autolog_logs_specified_params(bst_params, dtrain):
mlflow.xgboost.autolog()
expected_params = {
"num_boost_round": 20,
"early_stopping_rounds": 5,
"verbose_eval": False,
}
xgb.train(bst_params, dtrain, evals=[(dtrain, "train")], **expected_params)
run = get_latest_run()
params = run.data.params
expected_params.update(bst_params)
for key, val in expected_params.items():
assert key in params
assert params[key] == str(val)
unlogged_params = [
"dtrain",
"evals",
"obj",
"feval",
"evals_result",
"xgb_model",
"callbacks",
"learning_rates",
]
for param in unlogged_params:
assert param not in params
@pytest.mark.large
def test_xgb_autolog_logs_metrics_with_validation_data(bst_params, dtrain):
mlflow.xgboost.autolog()
evals_result = {}
xgb.train(
bst_params, dtrain, num_boost_round=20, evals=[(dtrain, "train")], evals_result=evals_result
)
run = get_latest_run()
data = run.data
metric_key = "train-mlogloss"
client = mlflow.tracking.MlflowClient()
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 20
assert metric_history == evals_result["train"]["mlogloss"]
@pytest.mark.large
def test_xgb_autolog_logs_metrics_with_multi_validation_data(bst_params, dtrain):
mlflow.xgboost.autolog()
evals_result = {}
evals = [(dtrain, "train"), (dtrain, "valid")]
xgb.train(bst_params, dtrain, num_boost_round=20, evals=evals, evals_result=evals_result)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for eval_name in [e[1] for e in evals]:
metric_key = "{}-mlogloss".format(eval_name)
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 20
assert metric_history == evals_result[eval_name]["mlogloss"]
@pytest.mark.large
def test_xgb_autolog_logs_metrics_with_multi_metrics(bst_params, dtrain):
mlflow.xgboost.autolog()
evals_result = {}
params = {**bst_params, "eval_metric": ["merror", "mlogloss"]}
xgb.train(
params, dtrain, num_boost_round=20, evals=[(dtrain, "train")], evals_result=evals_result
)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for metric_name in params["eval_metric"]:
metric_key = "train-{}".format(metric_name)
metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]
assert metric_key in data.metrics
assert len(metric_history) == 20
assert metric_history == evals_result["train"][metric_name]
@pytest.mark.large
def test_xgb_autolog_logs_metrics_with_multi_validation_data_and_metrics(bst_params, dtrain):
mlflow.xgboost.autolog()
evals_result = {}
params = {**bst_params, "eval_metric": ["merror", "mlogloss"]}
evals = [(dtrain, "train"), (dtrain, "valid")]
xgb.train(params, dtrain, num_boost_round=20, evals=evals, evals_result=evals_result)
run = get_latest_run()
data = run.data
client = mlflow.tracking.MlflowClient()
for eval_name in [e[1] for e in evals]:
for metric_name in params["eval_metric"]:
metric_key = "{}-{}".format(eval_name, metric_name)
metric_history = [
x.value for x in client.get_metric_history(run.info.run_id, metric_key)
]
assert metric_key in data.metrics
assert len(metric_history) == 20
assert metric_history == evals_result[eval_name][metric_name]
@pytest.mark.large
def test_xgb_autolog_logs_metrics_with_early_stopping(bst_params, dtrain):
mlflow.xgboost.autolog()
evals_result = {}
params = {**bst_params, "eval_metric": ["merror", "mlogloss"]}
evals = [(dtrain, "train"), (dtrain, "valid")]
model = xgb.train(
params,
dtrain,
num_boost_round=20,
early_stopping_rounds=5,
evals=evals,
evals_result=evals_result,
)
run = get_latest_run()
data = run.data
assert "best_iteration" in data.metrics
assert int(data.metrics["best_iteration"]) == model.best_iteration
assert "stopped_iteration" in data.metrics
assert int(data.metrics["stopped_iteration"]) == len(evals_result["train"]["merror"]) - 1
client = mlflow.tracking.MlflowClient()
for eval_name in [e[1] for e in evals]:
for metric_name in params["eval_metric"]:
metric_key = "{}-{}".format(eval_name, metric_name)
metric_history = [
x.value for x in client.get_metric_history(run.info.run_id, metric_key)
]
assert metric_key in data.metrics
assert len(metric_history) == 20 + 1
best_metrics = evals_result[eval_name][metric_name][model.best_iteration]
assert metric_history == evals_result[eval_name][metric_name] + [best_metrics]
@pytest.mark.large
def test_xgb_autolog_batch_metrics_logger_logs_expected_metrics(bst_params, dtrain):
patched_metrics_data = []
# Mock patching BatchMetricsLogger.record_metrics()
# to ensure that expected metrics are being logged.
original = BatchMetricsLogger.record_metrics
with patch(
"mlflow.utils.autologging.BatchMetricsLogger.record_metrics", autospec=True
) as record_metrics_mock:
def record_metrics_side_effect(self, metrics, step=None):
patched_metrics_data.extend(metrics.items())
original(self, metrics, step)
record_metrics_mock.side_effect = record_metrics_side_effect
mlflow.xgboost.autolog()
evals_result = {}
params = {**bst_params, "eval_metric": ["merror", "mlogloss"]}
evals = [(dtrain, "train"), (dtrain, "valid")]
model = xgb.train(
params,
dtrain,
num_boost_round=20,
early_stopping_rounds=5,
evals=evals,
evals_result=evals_result,
)
patched_metrics_data = dict(patched_metrics_data)
run = get_latest_run()
original_metrics = run.data.metrics
for metric_name in original_metrics:
assert metric_name in patched_metrics_data
assert original_metrics[metric_name] == patched_metrics_data[metric_name]
assert int(patched_metrics_data["best_iteration"]) == model.best_iteration
assert int(original_metrics["best_iteration"]) == model.best_iteration
@pytest.mark.large
def test_xgb_autolog_logs_feature_importance(bst_params, dtrain):
mlflow.xgboost.autolog()
model = xgb.train(bst_params, dtrain)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id)]
importance_type = "weight"
plot_name = "feature_importance_{}.png".format(importance_type)
assert plot_name in artifacts
json_name = "feature_importance_{}.json".format(importance_type)
assert json_name in artifacts
json_path = os.path.join(artifacts_dir, json_name)
with open(json_path, "r") as f:
loaded_imp = json.load(f)
assert loaded_imp == model.get_score(importance_type=importance_type)
@pytest.mark.large
def test_xgb_autolog_logs_specified_feature_importance(bst_params, dtrain):
importance_types = ["weight", "total_gain"]
mlflow.xgboost.autolog(importance_types=importance_types)
model = xgb.train(bst_params, dtrain)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id)]
for imp_type in importance_types:
plot_name = "feature_importance_{}.png".format(imp_type)
assert plot_name in artifacts
json_name = "feature_importance_{}.json".format(imp_type)
assert json_name in artifacts
json_path = os.path.join(artifacts_dir, json_name)
with open(json_path, "r") as f:
loaded_imp = json.load(f)
assert loaded_imp == model.get_score(importance_type=imp_type)
@pytest.mark.large
def test_no_figure_is_opened_after_logging(bst_params, dtrain):
mlflow.xgboost.autolog()
xgb.train(bst_params, dtrain)
assert mpl.pyplot.get_fignums() == []
@pytest.mark.large
def test_xgb_autolog_loads_model_from_artifact(bst_params, dtrain):
mlflow.xgboost.autolog()
model = xgb.train(bst_params, dtrain)
run = get_latest_run()
run_id = run.info.run_id
loaded_model = mlflow.xgboost.load_model("runs:/{}/model".format(run_id))
np.testing.assert_array_almost_equal(model.predict(dtrain), loaded_model.predict(dtrain))
@pytest.mark.large
def test_xgb_autolog_does_not_throw_if_importance_values_not_supported(dtrain):
# the gblinear booster does not support calling get_score on it,
# where get_score is used to create the importance values plot.
bst_params = {"objective": "multi:softprob", "num_class": 3, "booster": "gblinear"}
mlflow.xgboost.autolog()
# we make sure here that we do not throw while attempting to plot
# importance values on a model with a linear booster.
model = xgb.train(bst_params, dtrain)
with pytest.raises(Exception):
model.get_score(importance_type="weight")
@pytest.mark.large
def test_xgb_autolog_gets_input_example(bst_params):
mlflow.xgboost.autolog(log_input_examples=True)
# we cannot use dtrain fixture, as the dataset must be constructed
# after the call to autolog() in order to get the input example
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
dataset = xgb.DMatrix(X, y)
xgb.train(bst_params, dataset)
run = get_latest_run()
model_path = os.path.join(run.info.artifact_uri, "model")
model_conf = Model.load(os.path.join(model_path, "MLmodel"))
input_example = _read_example(model_conf, model_path)
assert input_example.equals(X[:5])
pyfunc_model = mlflow.pyfunc.load_model(os.path.join(run.info.artifact_uri, "model"))
# make sure reloading the input_example and predicting on it does not error
pyfunc_model.predict(input_example)
@pytest.mark.large
def test_xgb_autolog_infers_model_signature_correctly(bst_params):
mlflow.xgboost.autolog(log_model_signatures=True)
# we cannot use dtrain fixture, as the dataset must be constructed
# after the call to autolog() in order to get the input example
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
dataset = xgb.DMatrix(X, y)
xgb.train(bst_params, dataset)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id, "model")]
ml_model_filename = "MLmodel"
assert str(os.path.join("model", ml_model_filename)) in artifacts
ml_model_path = os.path.join(artifacts_dir, "model", ml_model_filename)
data = None
with open(ml_model_path, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
assert data is not None
assert "signature" in data
signature = data["signature"]
assert signature is not None
assert "inputs" in signature
assert json.loads(signature["inputs"]) == [
{"name": "sepal length (cm)", "type": "double"},
{"name": "sepal width (cm)", "type": "double"},
]
assert "outputs" in signature
assert json.loads(signature["outputs"]) == [
{"type": "tensor", "tensor-spec": {"dtype": "float32", "shape": [-1, 3]}},
]
@pytest.mark.large
def test_xgb_autolog_does_not_throw_if_importance_values_are_empty(bst_params, tmpdir):
tmp_csv = tmpdir.join("data.csv")
tmp_csv.write("1,0.3,1.2\n")
tmp_csv.write("0,2.4,5.2\n")
tmp_csv.write("1,0.3,-1.2\n")
mlflow.xgboost.autolog()
dataset = xgb.DMatrix(tmp_csv.strpath + "?format=csv&label_column=0")
# we make sure here that we do not throw while attempting to plot
# importance values on a dataset that returns no importance values.
model = xgb.train(bst_params, dataset)
assert model.get_score(importance_type="weight") == {}
@pytest.mark.large
def test_xgb_autolog_continues_logging_even_if_signature_inference_fails(bst_params, tmpdir):
tmp_csv = tmpdir.join("data.csv")
tmp_csv.write("1,0.3,1.2\n")
tmp_csv.write("0,2.4,5.2\n")
tmp_csv.write("1,0.3,-1.2\n")
mlflow.xgboost.autolog(importance_types=[], log_model_signatures=True)
# signature and input example inference should fail here since the dataset is given
# as a file path
dataset = xgb.DMatrix(tmp_csv.strpath + "?format=csv&label_column=0")
xgb.train(bst_params, dataset)
run = get_latest_run()
run_id = run.info.run_id
artifacts_dir = run.info.artifact_uri.replace("file://", "")
client = mlflow.tracking.MlflowClient()
artifacts = [x.path for x in client.list_artifacts(run_id, "model")]
ml_model_filename = "MLmodel"
assert os.path.join("model", ml_model_filename) in artifacts
ml_model_path = os.path.join(artifacts_dir, "model", ml_model_filename)
data = None
with open(ml_model_path, "r") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
assert data is not None
assert "run_id" in data
assert "signature" not in data
@pytest.mark.large
def test_xgb_autolog_does_not_break_dmatrix_serialization(bst_params, tmpdir):
mlflow.xgboost.autolog()
# we cannot use dtrain fixture, as the dataset must be constructed
# after the call to autolog() in order to test the serialization
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
dataset = xgb.DMatrix(X, y)
xgb.train(bst_params, dataset)
save_path = tmpdir.join("dataset_serialization_test").strpath
dataset.save_binary(save_path) # serialization should not throw
xgb.DMatrix(save_path) # deserialization also should not throw
@pytest.mark.large
@pytest.mark.parametrize("log_input_examples", [True, False])
@pytest.mark.parametrize("log_model_signatures", [True, False])
def test_xgb_autolog_configuration_options(bst_params, log_input_examples, log_model_signatures):
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
with mlflow.start_run() as run:
mlflow.xgboost.autolog(
log_input_examples=log_input_examples, log_model_signatures=log_model_signatures
)
dataset = xgb.DMatrix(X, y)
xgb.train(bst_params, dataset)
model_conf = get_model_conf(run.info.artifact_uri)
assert ("saved_input_example_info" in model_conf.to_dict()) == log_input_examples
assert ("signature" in model_conf.to_dict()) == log_model_signatures
@pytest.mark.large
@pytest.mark.parametrize("log_models", [True, False])
def test_xgb_autolog_log_models_configuration(bst_params, log_models):
iris = datasets.load_iris()
X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])
y = iris.target
with mlflow.start_run() as run:
mlflow.xgboost.autolog(log_models=log_models)
dataset = xgb.DMatrix(X, y)
xgb.train(bst_params, dataset)
run_id = run.info.run_id
client = mlflow.tracking.MlflowClient()
artifacts = [f.path for f in client.list_artifacts(run_id)]
assert ("model" in artifacts) == log_models
def test_xgb_autolog_does_not_break_dmatrix_instantiation_with_data_none():
"""
This test verifies that `xgboost.DMatrix(None)` doesn't fail after patching.
XGBoost internally calls `xgboost.DMatrix(None)` to create a blank `DMatrix` object.
Example: https://github.com/dmlc/xgboost/blob/v1.2.1/python-package/xgboost/core.py#L701
"""
mlflow.xgboost.autolog()
xgb.DMatrix(None)
|
the-stack_0_2472 | import os
from dodo_commands.framework import ramda as R
from dodo_commands.framework.config_io import ConfigIO
class Layers:
def __init__(self):
self.config_io = ConfigIO()
self.root_layer_path = None
self.root_layer = None
self.layer_by_target_path = {}
self.selected_layer_by_path = {}
self.metadata_by_layer_name = None
def get_ordered_layer_paths(self):
root_layer_path = self.config_io.glob([self.root_layer_path])[0]
x = R.concat(
self.selected_layer_by_path.keys(), self.layer_by_target_path.keys()
)
x = R.uniq(x)
x = sorted(x, key=os.path.basename)
x = self.config_io.glob(x)
x = R.filter(lambda x: x != root_layer_path)(x)
x = R.concat([root_layer_path], x)
return x
@staticmethod
def get(ctr):
return ctr.layers
def init_layers(self, root_layer_path):
self.root_layer_path = root_layer_path
return self
|
the-stack_0_2473 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# -*- coding: utf-8 -*-
"""
# @Time : 2019/5/25
# @Author : Jiaqi&Zecheng
# @File : train.py
# @Software: PyCharm
"""
import time
import traceback
import os
import torch
import torch.optim as optim
import tqdm
import copy
from src import args as arg
from src import utils
from src.models.model import IRNet
from src.rule import semQL
def train(args):
"""
:param args:
:return:
"""
grammar = semQL.Grammar()
sql_data, table_data, val_sql_data,\
val_table_data= utils.load_dataset(args.dataset, use_small=args.toy)
model = IRNet(args, grammar)
if args.cuda: model.cuda()
# now get the optimizer
optimizer_cls = eval('torch.optim.%s' % args.optimizer)
optimizer = optimizer_cls(model.parameters(), lr=args.lr)
print('Enable Learning Rate Scheduler: ', args.lr_scheduler)
if args.lr_scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[21, 41], gamma=args.lr_scheduler_gammar)
else:
scheduler = None
print('Loss epoch threshold: %d' % args.loss_epoch_threshold)
print('Sketch loss coefficient: %f' % args.sketch_loss_coefficient)
if args.load_model:
print('load pretrained model from %s'% (args.load_model))
pretrained_model = torch.load(args.load_model,
map_location=lambda storage, loc: storage)
pretrained_modeled = copy.deepcopy(pretrained_model)
for k in pretrained_model.keys():
if k not in model.state_dict().keys():
del pretrained_modeled[k]
model.load_state_dict(pretrained_modeled)
model.word_emb = utils.load_word_emb(args.glove_embed_path)
# begin train
model_save_path = utils.init_log_checkpoint_path(args)
utils.save_args(args, os.path.join(model_save_path, 'config.json'))
best_dev_acc = .0
try:
with open(os.path.join(model_save_path, 'epoch.log'), 'w') as epoch_fd:
for epoch in tqdm.tqdm(range(args.epoch)):
if args.lr_scheduler:
scheduler.step()
epoch_begin = time.time()
loss = utils.epoch_train(model, optimizer, args.batch_size, sql_data, table_data, args,
loss_epoch_threshold=args.loss_epoch_threshold,
sketch_loss_coefficient=args.sketch_loss_coefficient)
epoch_end = time.time()
json_datas = utils.epoch_acc(model, args.batch_size, val_sql_data, val_table_data,
beam_size=args.beam_size)
acc = utils.eval_acc(json_datas, val_sql_data)
if acc > best_dev_acc:
utils.save_checkpoint(model, os.path.join(model_save_path, 'best_model.model'))
best_dev_acc = acc
utils.save_checkpoint(model, os.path.join(model_save_path, '{%s}_{%s}.model') % (epoch, acc))
log_str = 'Epoch: %d, Loss: %f, Sketch Acc: %f, Acc: %f, time: %f\n' % (
epoch + 1, loss, acc, acc, epoch_end - epoch_begin)
tqdm.tqdm.write(log_str)
epoch_fd.write(log_str)
epoch_fd.flush()
except Exception as e:
# Save model
utils.save_checkpoint(model, os.path.join(model_save_path, 'end_model.model'))
print(e)
tb = traceback.format_exc()
print(tb)
else:
utils.save_checkpoint(model, os.path.join(model_save_path, 'end_model.model'))
json_datas = utils.epoch_acc(model, args.batch_size, val_sql_data, val_table_data,
beam_size=args.beam_size)
acc = utils.eval_acc(json_datas, val_sql_data)
print("Sketch Acc: %f, Acc: %f, Beam Acc: %f" % (acc, acc, acc,))
if __name__ == '__main__':
arg_parser = arg.init_arg_parser()
args = arg.init_config(arg_parser)
print(args)
train(args) |
the-stack_0_2476 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contains common helpers for working with Android manifests."""
import hashlib
import os
import re
import shlex
import xml.dom.minidom as minidom
from util import build_utils
from xml.etree import ElementTree
ANDROID_NAMESPACE = 'http://schemas.android.com/apk/res/android'
TOOLS_NAMESPACE = 'http://schemas.android.com/tools'
DIST_NAMESPACE = 'http://schemas.android.com/apk/distribution'
EMPTY_ANDROID_MANIFEST_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', 'AndroidManifest.xml'))
# When normalizing for expectation matching, wrap these tags when they are long
# or else they become very hard to read.
_WRAP_CANDIDATES = (
'<manifest',
'<application',
'<activity',
'<provider',
'<receiver',
'<service',
)
# Don't wrap lines shorter than this.
_WRAP_LINE_LENGTH = 100
_xml_namespace_initialized = False
def _RegisterElementTreeNamespaces():
global _xml_namespace_initialized
if _xml_namespace_initialized:
return
_xml_namespace_initialized = True
ElementTree.register_namespace('android', ANDROID_NAMESPACE)
ElementTree.register_namespace('tools', TOOLS_NAMESPACE)
ElementTree.register_namespace('dist', DIST_NAMESPACE)
def ParseManifest(path):
"""Parses an AndroidManifest.xml using ElementTree.
Registers required namespaces, creates application node if missing, adds any
missing namespaces for 'android', 'tools' and 'dist'.
Returns tuple of:
doc: Root xml document.
manifest_node: the <manifest> node.
app_node: the <application> node.
"""
_RegisterElementTreeNamespaces()
doc = ElementTree.parse(path)
# ElementTree.find does not work if the required tag is the root.
if doc.getroot().tag == 'manifest':
manifest_node = doc.getroot()
else:
manifest_node = doc.find('manifest')
app_node = doc.find('application')
if app_node is None:
app_node = ElementTree.SubElement(manifest_node, 'application')
return doc, manifest_node, app_node
def SaveManifest(doc, path):
with build_utils.AtomicOutput(path) as f:
f.write(ElementTree.tostring(doc.getroot(), encoding='UTF-8'))
def GetPackage(manifest_node):
return manifest_node.get('package')
def AssertUsesSdk(manifest_node,
min_sdk_version=None,
target_sdk_version=None,
max_sdk_version=None,
fail_if_not_exist=False):
"""Asserts values of attributes of <uses-sdk> element.
Unless |fail_if_not_exist| is true, will only assert if both the passed value
is not None and the value of attribute exist. If |fail_if_not_exist| is true
will fail if passed value is not None but attribute does not exist.
"""
uses_sdk_node = manifest_node.find('./uses-sdk')
if uses_sdk_node is None:
return
for prefix, sdk_version in (('min', min_sdk_version), ('target',
target_sdk_version),
('max', max_sdk_version)):
value = uses_sdk_node.get('{%s}%sSdkVersion' % (ANDROID_NAMESPACE, prefix))
if fail_if_not_exist and not value and sdk_version:
assert False, (
'%sSdkVersion in Android manifest does not exist but we expect %s' %
(prefix, sdk_version))
if not value or not sdk_version:
continue
assert value == sdk_version, (
'%sSdkVersion in Android manifest is %s but we expect %s' %
(prefix, value, sdk_version))
def AssertPackage(manifest_node, package):
"""Asserts that manifest package has desired value.
Will only assert if both |package| is not None and the package is set in the
manifest.
"""
package_value = GetPackage(manifest_node)
if package_value is None or package is None:
return
assert package_value == package, (
'Package in Android manifest is %s but we expect %s' % (package_value,
package))
def _SortAndStripElementTree(root):
def sort_key(node):
ret = ElementTree.tostring(node)
# ElementTree.tostring inserts namespace attributes for any that are needed
# for the node or any of its descendants. Remove them so as to prevent a
# change to a child that adds/removes a namespace usage from changing sort
# order.
return re.sub(r' xmlns:.*?".*?"', '', ret.decode('utf8'))
def helper(node):
for child in node:
if child.text and child.text.isspace():
child.text = None
helper(child)
node[:] = sorted(node, key=sort_key)
def rename_attrs(node, from_name, to_name):
value = node.attrib.get(from_name)
if value is not None:
node.attrib[to_name] = value
del node.attrib[from_name]
for child in node:
rename_attrs(child, from_name, to_name)
# Sort alphabetically with two exceptions:
# 1) Put <application> node last (since it's giant).
# 2) Pretend android:name appears before other attributes.
app_node = root.find('application')
app_node.tag = 'zz'
rename_attrs(root, '{%s}name' % ANDROID_NAMESPACE, '__name__')
helper(root)
rename_attrs(root, '__name__', '{%s}name' % ANDROID_NAMESPACE)
app_node.tag = 'application'
def _SplitElement(line):
"""Parses a one-line xml node into ('<tag', ['a="b"', ...]], '/>')."""
# Shlex splits nicely, but removes quotes. Need to put them back.
def restore_quotes(value):
return value.replace('=', '="', 1) + '"'
# Simplify restore_quotes by separating />.
assert line.endswith('>'), line
end_tag = '>'
if line.endswith('/>'):
end_tag = '/>'
line = line[:-len(end_tag)]
# Use shlex to avoid having to re-encode ", etc.
parts = shlex.split(line)
start_tag = parts[0]
attrs = parts[1:]
return start_tag, [restore_quotes(x) for x in attrs], end_tag
def _CreateNodeHash(lines):
"""Computes a hash (md5) for the first XML node found in |lines|.
Args:
lines: List of strings containing pretty-printed XML.
Returns:
Positive 32-bit integer hash of the node (including children).
"""
target_indent = lines[0].find('<')
tag_closed = False
for i, l in enumerate(lines[1:]):
cur_indent = l.find('<')
if cur_indent != -1 and cur_indent <= target_indent:
tag_lines = lines[:i + 1]
break
elif not tag_closed and 'android:name="' in l:
# To reduce noise of node tags changing, use android:name as the
# basis the hash since they usually unique.
tag_lines = [l]
break
tag_closed = tag_closed or '>' in l
else:
assert False, 'Did not find end of node:\n' + '\n'.join(lines)
# Insecure and truncated hash as it only needs to be unique vs. its neighbors.
return hashlib.md5(('\n'.join(tag_lines)).encode('utf8')).hexdigest()[:8]
def _IsSelfClosing(lines):
"""Given pretty-printed xml, returns whether first node is self-closing."""
for l in lines:
idx = l.find('>')
if idx != -1:
return l[idx - 1] == '/'
assert False, 'Did not find end of tag:\n' + '\n'.join(lines)
def _AddDiffTags(lines):
# When multiple identical tags appear sequentially, XML diffs can look like:
# + </tag>
# + <tag>
# rather than:
# + <tag>
# + </tag>
# To reduce confusion, add hashes to tags.
# This also ensures changed tags show up with outer <tag> elements rather than
# showing only changed attributes.
hash_stack = []
for i, l in enumerate(lines):
stripped = l.lstrip()
# Ignore non-indented tags and lines that are not the start/end of a node.
if l[0] != ' ' or stripped[0] != '<':
continue
# Ignore self-closing nodes that fit on one line.
if l[-2:] == '/>':
continue
# Ignore <application> since diff tag changes with basically any change.
if stripped.lstrip('</').startswith('application'):
continue
# Check for the closing tag (</foo>).
if stripped[1] != '/':
cur_hash = _CreateNodeHash(lines[i:])
if not _IsSelfClosing(lines[i:]):
hash_stack.append(cur_hash)
else:
cur_hash = hash_stack.pop()
lines[i] += ' # DIFF-ANCHOR: {}'.format(cur_hash)
assert not hash_stack, 'hash_stack was not empty:\n' + '\n'.join(hash_stack)
def NormalizeManifest(manifest_contents):
_RegisterElementTreeNamespaces()
# This also strips comments and sorts node attributes alphabetically.
root = ElementTree.fromstring(manifest_contents)
package = GetPackage(root)
app_node = root.find('application')
if app_node is not None:
# android:debuggable is added when !is_official_build. Strip it out to avoid
# expectation diffs caused by not adding is_official_build. Play store
# blocks uploading apps with it set, so there's no risk of it slipping in.
debuggable_name = '{%s}debuggable' % ANDROID_NAMESPACE
if debuggable_name in app_node.attrib:
del app_node.attrib[debuggable_name]
# Trichrome's static library version number is updated daily. To avoid
# frequent manifest check failures, we remove the exact version number
# during normalization.
for node in app_node.getchildren():
if (node.tag in ['uses-static-library', 'static-library']
and '{%s}version' % ANDROID_NAMESPACE in node.keys()
and '{%s}name' % ANDROID_NAMESPACE in node.keys()):
node.set('{%s}version' % ANDROID_NAMESPACE, '$VERSION_NUMBER')
# We also remove the exact package name (except the one at the root level)
# to avoid noise during manifest comparison.
def blur_package_name(node):
for key in node.keys():
node.set(key, node.get(key).replace(package, '$PACKAGE'))
for child in node.getchildren():
blur_package_name(child)
# We only blur the package names of non-root nodes because they generate a lot
# of diffs when doing manifest checks for upstream targets. We still want to
# have 1 piece of package name not blurred just in case the package name is
# mistakenly changed.
for child in root.getchildren():
blur_package_name(child)
_SortAndStripElementTree(root)
# Fix up whitespace/indentation.
dom = minidom.parseString(ElementTree.tostring(root))
out_lines = []
for l in dom.toprettyxml(indent=' ').splitlines():
if not l or l.isspace():
continue
if len(l) > _WRAP_LINE_LENGTH and any(x in l for x in _WRAP_CANDIDATES):
indent = ' ' * l.find('<')
start_tag, attrs, end_tag = _SplitElement(l)
out_lines.append('{}{}'.format(indent, start_tag))
for attribute in attrs:
out_lines.append('{} {}'.format(indent, attribute))
out_lines[-1] += '>'
# Heuristic: Do not allow multi-line tags to be self-closing since these
# can generally be allowed to have nested elements. When diffing, it adds
# noise if the base file is self-closing and the non-base file is not
# self-closing.
if end_tag == '/>':
out_lines.append('{}{}>'.format(indent, start_tag.replace('<', '</')))
else:
out_lines.append(l)
# Make output more diff-friendly.
_AddDiffTags(out_lines)
return '\n'.join(out_lines) + '\n'
|
the-stack_0_2477 | """A Python Wrapper for accessing the ZeroTier API."""
import asyncio
import logging
import aiohttp
import async_timeout
from . import exceptions
_LOGGER = logging.getLogger(__name__)
WRITABLE_NETWORK = [
'name',
'private',
'enableBroadcast',
'v4AssignMode',
'v6AssignMode',
'mtu',
'multicastLimit'
'routes',
'ipAssignmentPools',
'rules',
'capabilities',
'tags',
'remoteTraceTarget',
'remoteTraceLevel',
]
WRITABLE_MEMBER = ['authorized', 'activeBridge', 'ipAssignments']
class ZeroTier(object):
"""A class for handling the data retrieval."""
def __init__(self, api_token, loop, session, host='localhost', port=9993):
"""Initialize the connection."""
self._loop = loop
self._session = session
self.headers = {'X-ZT1-Auth': api_token}
self.data = None
self.url = '{}:{}'.format(host, port)
async def get_data(self, endpoint):
"""Retrieve the data."""
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.get(
'http://{}/{}'.format(self.url, endpoint),
headers=self.headers)
_LOGGER.debug("Response status: %s", response.status)
self.data = await response.json()
_LOGGER.debug(self.data)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Can not load data from ZeroTier controller")
raise exceptions.ZeroTierConnectionError()
async def set_value(self, key, variable, endpoint):
"""Send a POST request toa controller."""
payload = {key: variable}
print(payload)
try:
with async_timeout.timeout(5, loop=self._loop):
response = await self._session.post(
'http://{}/{}'.format(self.url, endpoint),
headers=self.headers, data=payload)
_LOGGER.debug("Response status: %s", response.status)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Can not update entry of ZeroTier controller")
raise exceptions.ZeroTierConnectionError()
|
the-stack_0_2478 | #!/usr/bin/env python
import json
from random import randint
import momoko
import tornado.ioloop
import tornado.web
from tornado import gen
import tornado.options
from tornado.options import options
import tornado.httpserver
from commons import JsonHandler, JsonHelloWorldHandler, PlaintextHelloWorldHandler, BaseHandler
tornado.options.define('port', default=8888, type=int, help="Server port")
tornado.options.define('postgres', default="localhost",
type=str, help="PostgreSQL host")
tornado.options.define('backlog', default=8192, type=int,
help="Server backlog")
class SingleQueryHandler(JsonHandler):
SQL = "SELECT id, randomNumber FROM World WHERE id=%s"
@gen.coroutine
def get(self):
random_id = randint(1, 10000)
cursor = yield db.execute(self.SQL, (random_id,))
row = cursor.fetchone()
response = json.dumps({self.ID: row[0], self.RANDOM_NUMBER: row[1]})
self.finish(response)
class MultipleQueriesHandler(JsonHandler):
SQL = "SELECT id, randomNumber FROM World WHERE id=%s"
@gen.coroutine
def get(self):
queries = self.get_argument(self.QUERIES, "1")
try:
queries = int(queries.strip())
except ValueError:
queries = 1
queries = min(max(1, queries), 500)
worlds = []
cursors = yield [db.execute(self.SQL, (randint(1, 10000),)) for _ in xrange(queries)]
for cursor in cursors:
row = cursor.fetchone()
worlds.append({self.ID: row[0], self.RANDOM_NUMBER: row[1]})
response = json.dumps(worlds)
self.finish(response)
application = tornado.web.Application([
(r"/json", JsonHelloWorldHandler),
(r"/plaintext", PlaintextHelloWorldHandler),
(r"/db", SingleQueryHandler),
(r"/queries", MultipleQueriesHandler)
],
template_path="templates"
)
application.ui_modules = {}
if __name__ == "__main__":
tornado.options.parse_command_line()
server = tornado.httpserver.HTTPServer(application)
server.bind(options.port, backlog=options.backlog)
server.start(0)
ioloop = tornado.ioloop.IOLoop.instance()
dsn = "user=benchmarkdbuser password=benchmarkdbpass dbname=hello_world host=%s" % options.postgres
db = momoko.Pool(dsn, size=100, max_size=200)
ioloop.run_sync(db.connect)
ioloop.start()
|
the-stack_0_2480 | import logging
import os
import random
import sys
import time
from threading import Thread
from termcolor import cprint
from core import OpenLeecher
from kbhit import KBHit
# Core class
# Handles the core, can be threaded
# Args : None
class Core(Thread):
def __init__(self):
Thread.__init__(self, target=self.run)
self.core = OpenLeecher()
def run(self):
self.core.run()
ol = Core()
# Show Help
# Display help in console
# Args : None
def show_help():
cprint("OpenLeecher v" + ol.core.VERSION, 'blue', attrs=['bold'])
cprint("Made by lwsk", 'blue')
cprint("https://openleecher.tk/\n", 'blue')
cprint("Command line arguments:", attrs=['bold'])
cprint("\t--gui : launch graphic user interface")
cprint("\t-b 0 : set behavior")
cprint("\t-g IPGenerator : set generator(s)")
cprint("\t-s WEBScanner FTPScanner : set scanner(s)")
cprint("\t-t 8 : set thread limit at 8")
cprint("\t-h : display help")
cprint("\nBehaviors", attrs=['bold'])
cprint("\t0 : Random")
cprint("\t1 : Intelligent")
cprint("\t2 : Complete")
cprint("\nAvailable generator(s):", attrs=['bold'])
for g in ol.core.generator.g:
cprint("\t" + g.__class__.__name__)
cprint("\nAvailable scanner(s):", attrs=['bold'])
for s in ol.core.scanner.s:
cprint("\t" + s.__class__.__name__)
# Reset UI
# Clear console screen and re-draw UI
# Args : None
def reset_UI():
os.system('cls' if os.name == 'nt' else 'clear')
print(""".-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-.
| |
| OpenLeecher v"""+ol.core.VERSION+""" |
| |
| Generators : |""")
for g in ol.core.generator.g:
if g.active is True:
print("""
| """+str(g.__class__.__name__).strip()+""" |""")
print("""
! !
: :
: Scanners : :""")
for s in ol.core.scanner.s:
if s.active is True:
print("""
. """+str(s.__class__.__name__).strip()+""" .""")
print("""
: :
: :
! !
| Controls : |
| 'o' Raise maximum thread limit |
| 'l' Lower maximum thread limit |
| space Pause/Resume generator |
| 'q' / esc Quit |
| |
`-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-'
""")
print("\n")
def run_core():
running = True
reset_UI()
kb = KBHit()
while running:
if kb.kbhit():
c = kb.getch()
if len(c) > 0:
if c == 'q' or ord(c) is 27: # esc
running = False
elif c == 'o':
ol.core.maxthreads += 1
elif c == 'l':
if ol.core.maxthreads > 0:
ol.core.maxthreads -= 1
elif ord(c) is 32 : # space
ol.core.paused = not ol.core.paused
#else:
#sys.stdout.write('\r' + 'Keycode not registered ' + str(ord(c)) + '\n')
pre = ""
if ol.core.paused is True:
pre += '[PAUSED] '
sys.stdout.write("\r" + pre + str(len(ol.core.threads)) + "/" + str(ol.core.maxthreads) + " threads running" + ('.' * random.randint(1, 3)) + " ")
# Notifications
if len(ol.core.scanner.results) > 0:
x = ol.core.scanner.results.pop(0)
sys.stdout.write("\r" + "Found " + str(x.p) + " @ " + str(x.v) + '\n')
time.sleep(0.04)
sys.stdout.write("\r" + "Quitting OpenLeecher..." + "\n")
logging.info("Quitting OpenLeecher...")
sys.exit(0)
def start():
if os.name == 'nt':
import gui
gui.launch()
else:
for i in range(1, len(sys.argv)):
a = sys.argv[i]
if str(a) == '-h':
show_help()
run = False
logging.info("Quitting OpenLeecher...")
sys.exit(0)
elif str(a) == '--gui':
import gui
gui.launch()
elif str(a) == '-t':
try:
ol.core.maxthreads = int(sys.argv[i + 1])
except:
run = False
cprint("Error !", 'red', attrs=['bold']),
print("Thread limit argument (-t) value is invalid")
elif str(a) == '-b':
try:
ol.core.behavior = int(sys.argv[i + 1])
except:
run = False
cprint("Error !", 'red', attrs=['bold']),
print("Thread limit argument (-t) value is invalid")
elif str(a) == '-g':
j = 1
try:
while str(sys.argv[i + j]).startswith('-') is False:
if ol.core.generator.activate(str(sys.argv[i + j])) is False:
cprint("Error !", 'red', attrs=['bold']),
print("Generator " + str(sys.argv[i + j]) + " cannot be loaded.")
logging.error("Error : Generator " + str(sys.argv[i + j]) + " cannot be loaded.")
break
j += 1
except:
pass
elif str(a) == '-s':
j = 1
try:
while str(sys.argv[i + j]).startswith('-') is False:
if ol.core.scanner.activate(str(sys.argv[i + j])) is False:
cprint("Error !", 'red', attrs=['bold']),
print("Scanner " + str(sys.argv[i + j]) + " cannot be loaded.")
logging.error("Error : Scanner " + str(sys.argv[i + j]) + " cannot be loaded.")
break
j += 1
except:
pass
if ol.core.can_run() is True:
ol.start()
run_core()
else:
cprint("Error !", 'red', attrs=['bold'])
print("There must be at least one generator and one scanner active.")
logging.error("Error : There must be at least one generator and one scanner active.")
logging.warning("Cannot start session.")
logging.info("Quitting OpenLeecher...")
sys.exit(0)
if __name__ == '__main__':
start()
sys.exit(0) |
the-stack_0_2482 | import numpy as np
import cv2
class UISketch:
def __init__(self, img_size, img_path, scale, accu=True, nc=3):
self.img_size = img_size
self.scale = scale
self.nc = nc
if img_path is not "":
self.img = cv2.imread(img_path)
self.mask = cv2.imread(img_path,cv2.IMREAD_GRAYSCALE)
self.mask = np.expand_dims(self.mask,axis=2)
else:
self.img = np.zeros((img_size, img_size, self.nc), np.uint8)
self.mask = np.zeros((img_size, img_size, 1), np.uint8)
if self.nc == 1: # [hack]
self.width = 2
else:
self.width = 1
def update(self, points, color):
num_pnts = len(points)
c = 255 - int(color.red())
if c > 0:
c = 255
for i in range(0, num_pnts - 1):
pnt1 = (int(points[i].x()/self.scale), int(points[i].y()/self.scale))
pnt2 = (int(points[i + 1].x()/self.scale), int(points[i + 1].y()/self.scale))
if self.nc == 3:
cv2.line(self.img, pnt1, pnt2, (c,c,c), self.width)
else:
cv2.line(self.img, pnt1, pnt2, c, self.width)
cv2.line(self.mask, pnt1, pnt2, 255, self.width)
def update_width(self, d, color):
self.width = min(20, max(1, self.width+ d))
return self.width
def get_constraints(self):
return self.img, self.mask
def reset(self):
self.img = np.zeros((self.img_size, self.img_size, self.nc), np.uint8)
self.mask = np.zeros((self.img_size, self.img_size, 1), np.uint8)
|
the-stack_0_2483 | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse_lazy
from django.forms import formset_factory
from django.views.generic import (
CreateView, DetailView,
FormView,
ListView,
TemplateView,
UpdateView,
RedirectView,
)
from django.conf import settings
from django.db.models import Count
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth.decorators import login_required
import json
from .forms import (
CommentCreateForm,
DocumentUploadForm, LogbookCreateForm,
MarkForm,
BaseMarkFormSet,
StudentGroupForm,
StudentGroupJoinForm,
)
from .mixins import (
StudentGroupContextMixin, UserHasGroupAccessMixin, UserIsStudentMixin,
UserIsTeacherMixin)
from .models import Batch, Comment, Document, Logbook, StudentGroup, Notification
from ..registration.models import User
class GroupCreateJoinView(
LoginRequiredMixin, UserIsStudentMixin, TemplateView):
http_method_names = ['get']
template_name = 'thesis/group_create_join.html'
class GroupCreateView(LoginRequiredMixin, UserIsStudentMixin, CreateView):
model = StudentGroup
form_class = StudentGroupForm
success_url = reverse_lazy('thesis:document_list')
template_name = 'thesis/group_create.html'
http_method_names = ['get', 'post']
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(user=self.request.user, **self.get_form_kwargs())
def form_valid(self, form):
self.object = studentgroup = form.save()
user = self.request.user
user.studentgroup = studentgroup
user.save()
messages.success(
self.request,
'Created Group Successfully!',
extra_tags='is-success')
return HttpResponseRedirect(self.get_success_url())
class GroupJoinView(LoginRequiredMixin, UserIsStudentMixin, FormView):
model = StudentGroup
form_class = StudentGroupJoinForm
success_url = reverse_lazy('thesis:document_list')
template_name = 'thesis/group_join.html'
http_method_names = ['get', 'post']
def form_valid(self, form):
md5hash = form.cleaned_data.get('md5hash')
studentgroup = get_object_or_404(StudentGroup, md5hash=md5hash)
if studentgroup.status != 'Pending':
messages.error(
self.request,
"The Group has already been approved by admin. You can not join this group.",
extra_tags='is-danger',
)
return HttpResponseRedirect('/group/join/')
batch = studentgroup.batch
students_count = studentgroup.students.all().count()
if students_count >= batch.max_students_per_group:
messages.error(
self.request,
'The Group has already reached maximum capacity',
extra_tags='is-danger'
)
return HttpResponseRedirect('/group/join/')
user = self.request.user
user.studentgroup = studentgroup
user.save()
messages.success(
self.request,
'You joined the Group successfully!',
extra_tags='is-success')
return HttpResponseRedirect(self.get_success_url())
class DocumentListView(
LoginRequiredMixin, UserHasGroupAccessMixin,
StudentGroupContextMixin, ListView):
template_name = 'thesis/document_list.html'
http_method_names = ['get']
context_object_name = 'proposal_documents'
def filter_by_document_type(self, document_type):
return self.studentgroup.documents.filter(
document_type=document_type,
).order_by(
'-is_accepted', '-upload_time',
)
def get_queryset(self):
return self.filter_by_document_type(Document.DocumentType.PROPOSAL.value)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['comments'] = self.studentgroup.comments.order_by(
'-created_at')
context['pre_defense_documents'] = self.filter_by_document_type(
Document.DocumentType.PRE_DEFENSE.value)
context['defense_documents'] = self.filter_by_document_type(
Document.DocumentType.DEFENSE.value)
context['logbooks'] = self.studentgroup.logbooks.all().order_by('-time')
return context
def get(self, request, *args, **kwargs):
notifications = Notification.objects.filter(
user=request.user,
studentgroup=self.studentgroup,
is_viewed=False,
)
for notification in notifications:
notification.is_viewed = True
Notification.objects.bulk_update(notifications, ['is_viewed'])
response = super().get(request, *args, **kwargs)
return response
class DocumentUploadView(
LoginRequiredMixin, UserIsStudentMixin, UserHasGroupAccessMixin,
StudentGroupContextMixin, CreateView):
model = Document
template_name = 'thesis/document_upload.html'
form_class = DocumentUploadForm
success_url = reverse_lazy('thesis:document_list')
http_method_names = ['get', 'post']
def form_valid(self, form):
self.object = document = form.save(commit=False)
document.studentgroup = self.studentgroup
document.save()
messages.success(
self.request,
'Document Uploaded successfully!',
extra_tags='is-success')
return HttpResponseRedirect(self.get_success_url())
class LogbookCreateView(
LoginRequiredMixin, UserIsStudentMixin, UserHasGroupAccessMixin,
StudentGroupContextMixin, CreateView):
model = Logbook
template_name = 'thesis/logbook_upload.html'
form_class = LogbookCreateForm
success_url = reverse_lazy('thesis:document_list')
http_method_names = ['get', 'post']
def get_form(self, *args, **kwargs):
return self.form_class(
studentgroup=self.studentgroup,
**self.get_form_kwargs(),
)
def form_valid(self, form):
messages.success(
self.request,
'Logbook Created Successfully!',
extra_tags='is-success',
)
return super().form_valid(form)
class LogbookDetailView(
LoginRequiredMixin, UserHasGroupAccessMixin, StudentGroupContextMixin,
DetailView):
model = Logbook
context_object_name = 'logbook'
template_name = 'thesis/logbook_details.html'
class LogbookApprovedToggleView(
LoginRequiredMixin,
UserIsTeacherMixin,
StudentGroupContextMixin,
RedirectView):
def get_redirect_url(self, *args, **kwargs):
logbook = get_object_or_404(
Logbook,
studentgroup=self.studentgroup,
id=self.kwargs['logbook_id'],
)
logbook.approved = not logbook.approved
logbook.save()
if logbook.approved:
messages.success(
self.request,
f'Logbook {logbook.id} has been approved',
extra_tags='is-success',
)
else:
messages.error(
self.request,
f'Logbook #{logbook.id} has been disapproved',
extra_tags='is-danger',
)
return reverse_lazy(
'thesis:group_detail',
args=(self.studentgroup.md5hash,),
)
class DocumentAcceptedToggleView(
LoginRequiredMixin,
UserIsTeacherMixin,
StudentGroupContextMixin,
RedirectView):
def get_redirect_url(self, *args, **kwargs):
document = get_object_or_404(
Document,
studentgroup=self.studentgroup,
id=self.kwargs['document_id'],
)
document.is_accepted = not document.is_accepted
document.save()
if document.is_accepted:
messages.success(
self.request, 'Document has been approved', extra_tags='is-success')
else:
messages.error(
self.request, 'Document has been disapproved', extra_tags='is-danger')
return reverse_lazy('thesis:group_detail', args=(self.studentgroup.md5hash,))
class GroupInviteView(
LoginRequiredMixin, UserIsStudentMixin, UserHasGroupAccessMixin,
StudentGroupContextMixin, TemplateView):
http_method_names = ['get']
template_name = "thesis/group_invite.html"
class BaseGroupListView(LoginRequiredMixin, UserIsTeacherMixin, ListView):
template_name = "thesis/group_list.html"
http_method_names = ['get']
context_object_name = 'groups'
def get_context_data(self, *args, object_list=None, **kwargs):
context_data = super().get_context_data(
*args, object_list=object_list, **kwargs)
batch_number = self.kwargs.get('batch_number', '')
context_data['batches'] = Batch.objects.all()
context_data['batch_number'] = int(
batch_number) if batch_number else ''
return context_data
def get_studentgroups(self, studentgroup_related_name):
user = self.request.user
queryset = getattr(user, studentgroup_related_name).filter(
approved=True).order_by('id')
batch_number = self.kwargs.get('batch_number', '')
if batch_number:
return queryset.filter(batch__number=batch_number)
return queryset
class GroupListView(BaseGroupListView):
def get_queryset(self):
return self.get_studentgroups('studentgroups')
class InternalGroupListView(BaseGroupListView):
def get_queryset(self):
return self.get_studentgroups('internal_studentgroups')
class ExternalGroupListView(BaseGroupListView):
def get_queryset(self):
return self.get_studentgroups('external_studentgroups')
class NotificationListView(LoginRequiredMixin, ListView):
template_name = "thesis/notification_list.html"
http_method_names = ['get']
context_object_name = 'notifications'
def get_queryset(self):
return Notification.objects.filter(user=self.request.user, is_viewed=False).order_by('-created_at')
class GroupUpdateView(
LoginRequiredMixin, UserIsStudentMixin, UserHasGroupAccessMixin,
UpdateView):
model = StudentGroup
template_name = "thesis/group_update.html"
http_method_names = ['get', 'post']
form_class = StudentGroupForm
success_url = reverse_lazy('thesis:document_list')
def get_form(self, form_class=None):
if form_class is None:
form_class = self.get_form_class()
return form_class(user=self.request.user, **self.get_form_kwargs())
def get_object(self, *args, **kwargs):
return self.request.user.studentgroup
def form_valid(self, form):
response = super().form_valid(form)
messages.success(
self.request,
'Group Updated Successfully!',
extra_tags='is-success')
return response
class CommentCreateView(
LoginRequiredMixin, UserHasGroupAccessMixin, StudentGroupContextMixin,
CreateView):
model = Comment
http_method_names = ['post']
form_class = CommentCreateForm
success_url = reverse_lazy('thesis:document_list')
def get_success_url(self, *args, **kwargs):
if self.request.user.is_teacher:
return reverse_lazy(
'thesis:group_detail',
kwargs={'group_code': self.studentgroup.md5hash})
return self.success_url
def form_valid(self, form):
self.object = comment = form.save(commit=False)
comment.user = self.request.user
comment.studentgroup = self.studentgroup
comment.save()
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
messages.error(
self.request,
'Comment can not be empty.',
extra_tags='is-danger'
)
return HttpResponseRedirect(self.get_success_url())
class StudentGroupApproveView(
LoginRequiredMixin, UserIsTeacherMixin, StudentGroupContextMixin,
TemplateView):
http_method_names = ['get', 'post']
template_name = 'thesis/group_approve.html'
def post(self, request, *args, **kwargs):
if self.studentgroup.approved:
self.studentgroup.approved = False
if self.studentgroup.progress == 100:
self.studentgroup.progress = 90
messages.success(
request,
'The StudentGroups Proposal has been disapproved!',
extra_tags='is-success')
else:
self.studentgroup.approved = True
self.studentgroup.progress = 100
messages.success(
request,
'The StudentGroups Proposal has been approved!',
extra_tags='is-success')
self.studentgroup.save()
return HttpResponseRedirect(
reverse_lazy(
'thesis:group_detail',
kwargs={'group_code': self.studentgroup.md5hash}))
class StudentGroupProgressUpdateView(
LoginRequiredMixin, UserIsTeacherMixin, StudentGroupContextMixin,
TemplateView):
http_method_names = ['post']
def post(self, request, *args, **kwargs):
data = json.loads(str(request.body.decode('utf-8')))
progress_value = int(data.get('progress_value'))
if progress_value > 100:
progress_value = 100
elif progress_value < 0:
progress_value = 0
self.studentgroup.progress = progress_value
self.studentgroup.save()
return JsonResponse({'progress_value': progress_value})
@login_required
def get_teachers_list_by_field_json(request, field_id):
available_teachers = User.objects.values(
'id', 'username', 'full_name',
group_count=Count('studentgroups')).filter(
fields__id=field_id,
group_count__lt=settings.MAXIMUM_GROUPS_UNDER_TEACHER
)
data = json.dumps(list(available_teachers), cls=DjangoJSONEncoder)
return JsonResponse(data, safe=False,)
@login_required
def grade_students(request, group_code):
studentgroup = get_object_or_404(StudentGroup, md5hash=group_code)
user = request.user
students = studentgroup.students.all().order_by('username')
students_count = students.count()
MarkFormSet = formset_factory(
MarkForm,
extra=students_count,
min_num=students_count,
max_num=students_count,
validate_min=True,
validate_max=True,
formset=BaseMarkFormSet,
)
formset_initial = {
"form_kwargs": {
'user': user,
'studentgroup': studentgroup,
},
"initial": [{"student_choice": student.id} for student in students],
}
if request.method == 'POST':
formset = MarkFormSet(request.POST, **formset_initial)
if formset.is_valid():
formset.save()
messages.success(
request,
'Grades have been submitted successfully.',
extra_tags='is-success')
return redirect(
reverse_lazy(
'thesis:group_detail',
args=(group_code,),
),
)
else:
return render(
request,
'thesis/create-mark.html',
context={
'studentgroup': studentgroup,
'user': user,
'formset': formset,
},
)
return render(
request,
'thesis/create-mark.html',
context={
'studentgroup': studentgroup,
'user': user,
'formset': MarkFormSet(**formset_initial)
},
)
|
the-stack_0_2484 | # src/lyrical/ovh.py
"""Client for the lyrics.ovh REST API."""
from concurrent.futures import as_completed
from dataclasses import dataclass
from typing import List
from urllib.parse import unquote, urlparse
import click
import desert
import marshmallow
import requests
from requests.adapters import HTTPAdapter
from requests_futures.sessions import FuturesSession
from urllib3.util.retry import Retry
from . import __version__
USER_AGENT: str = f"Lyrical/{__version__} ( https://github.com/openfinch/lyrical )"
LYRICS_API_URL: str = "https://api.lyrics.ovh/v1/{artist}/{title}"
@dataclass
class LyricsCorpus:
"""LyricsCorpus resource.
Attributes:
title: Title of the track
artist: Name of the artist
lyrics: lyrics of the track
"""
title: str
artist: str
lyrics: str
lyrics_schema = desert.schema(LyricsCorpus, meta={"unknown": marshmallow.EXCLUDE})
def build_corpus(artist: str, tracklist: List) -> List[LyricsCorpus]:
"""Build a lyrics corpus.
Performs a GET request to the /recording?query=arid:{id} endpoint.
Args:
artist: The name of the artist
tracklist: A list of track names
Returns:
A LyricsCorpus resource
Raises:
ClickException: The HTTP request failed or the HTTP response
contained an invalid body.
"""
urls = []
batch_size = 10
corpus = []
try:
# Generate a list of pages for official releases
for track in tracklist:
urls.append(LYRICS_API_URL.format(artist=artist, title=track))
# Generate and iterate over a set of request futures
with FuturesSession(max_workers=batch_size) as session:
retries = 5
status_forcelist = [503]
retry = Retry(
total=retries,
read=retries,
connect=retries,
respect_retry_after_header=True,
backoff_factor=1,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("https://", adapter)
futures = [
session.get(url, headers={"User-Agent": USER_AGENT}) for url in urls
]
for future in as_completed(futures):
resp = future.result()
if resp.status_code == 200:
resp_json = resp.json()
url = urlparse(resp.url)
path = url.path.split("/")
if (
resp_json["lyrics"] != "Instrumental"
or len(resp_json["lyrics"]) < 0
):
lyric = lyrics_schema.load(
{
"artist": unquote(path[2]),
"title": unquote(path[3]),
"lyrics": resp_json["lyrics"],
}
)
corpus.append(lyric)
return corpus
except (requests.RequestException, marshmallow.ValidationError) as error:
message: str = str(error)
raise click.ClickException(message)
|
the-stack_0_2485 | # qubit number=4
# total number=46
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=13
prog += CNOT(0,3) # number=17
prog += X(3) # number=18
prog += CNOT(0,3) # number=19
prog += CNOT(0,3) # number=15
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(3) # number=12
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=37
prog += CZ(0,3) # number=38
prog += H(3) # number=39
prog += CNOT(0,3) # number=40
prog += X(3) # number=41
prog += H(3) # number=43
prog += CZ(0,3) # number=44
prog += H(3) # number=45
prog += H(3) # number=30
prog += CZ(0,3) # number=31
prog += H(3) # number=32
prog += H(0) # number=33
prog += CZ(3,0) # number=34
prog += RX(0.33300882128051834,2) # number=36
prog += H(0) # number=35
prog += CNOT(3,0) # number=23
prog += Z(3) # number=24
prog += CNOT(3,0) # number=25
prog += CNOT(3,0) # number=22
prog += H(3) # number=8
prog += H(0) # number=9
prog += Y(2) # number=10
prog += Y(2) # number=11
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil3083.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_0_2486 | #!/usr/bin/env python3
import pandas as pd
import seaborn as sns
import sys
import matplotlib.pyplot as plt
import numpy as np
from macros import colors
def algorithm_font(algorithm):
return r'\textsf{{{}}}'.format(algorithm)
def combined(algorithm, regularity):
return '{}-{}'.format(algorithm, regularity)
def plot_tw_vs_simtime(data_filename, plot_filename, verbose):
# Use latex font
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# Set up Seaborn style
sns.set(style="darkgrid")
# Import the dataframe
dataframe = pd.read_csv(data_filename)
dataframe = dataframe.sort_values(by=['algorithm'])
# Keep the data we want for the large facet
dataframe = dataframe.dropna(subset=['tree-decomp-width'])
dataframe = dataframe.loc[(dataframe['algorithm'] != 'quickbb')]
dataframe = dataframe.loc[dataframe['vertices'].isin([10, 14, 18,
22, 26, 30])]
dataframe['tree-decomp-width'] =\
pd.to_numeric(dataframe['tree-decomp-width'], downcast='integer')
dataframe['algorithm'] =\
np.vectorize(algorithm_font)(dataframe['algorithm'])
# If we want to have a different color for algorithm + regularity
# dataframe['combined'] =\
# np.vectorize(combined)(dataframe['algorithm'], dataframe['regularity'])
plot = sns.stripplot(x="tree-decomp-width",
y="simulation-time",
hue="algorithm",
data=dataframe,
dodge=True,
size=4,
jitter=True,
alpha=0.7,
linewidth=0.1,
palette=[colors[x] for x in ['freetdi', 'meiji']],
hue_order=['\\textsf{freetdi}', '\\textsf{meiji-e}'])
for i in range(len(dataframe["tree-decomp-width"].unique()) - 1):
plot.axvline(x=i+.5, c="white", dashes=(2, 1))
plot.set(ylim=(.01, 10000), yscale="log")
plot.set(xlabel="Contraction Complexity",
ylabel="Simulation Time (sec)")
# Add legend
plot.legend(loc="upper right")
# Save figure
for extension in ['.pdf', '.png']:
plt.savefig(plot_filename + extension)
if __name__ == '__main__':
data_filename = sys.argv[1]
plot_filename = sys.argv[2]
plot_tw_vs_simtime(data_filename, plot_filename, False)
|
the-stack_0_2487 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Dataset is currently unstable. APIs subject to change without notice."""
import pyarrow as pa
from pyarrow.util import _stringify_path, _is_path_like
from pyarrow._dataset import ( # noqa
AndExpression,
CastExpression,
CompareOperator,
ComparisonExpression,
Dataset,
DatasetFactory,
DirectoryPartitioning,
Expression,
FieldExpression,
FileFormat,
FileSystemSource,
FileSystemSourceFactory,
FileSystemFactoryOptions,
HivePartitioning,
InExpression,
IpcFileFormat,
IsValidExpression,
NotExpression,
OrExpression,
ParquetFileFormat,
Partitioning,
PartitioningFactory,
ScalarExpression,
Scanner,
ScanTask,
Source,
TreeSource,
SourceFactory
)
def partitioning(schema=None, field_names=None, flavor=None):
"""
Specify a partitioning scheme.
The supported schemes include:
- "DirectoryPartitioning": this scheme expects one segment in the file path
for each field in the specified schema (all fields are required to be
present). For example given schema<year:int16, month:int8> the path
"/2009/11" would be parsed to ("year"_ == 2009 and "month"_ == 11).
- "HivePartitioning": a scheme for "/$key=$value/" nested directories as
found in Apache Hive. This is a multi-level, directory based partitioning
scheme. Data is partitioned by static values of a particular column in
the schema. Partition keys are represented in the form $key=$value in
directory names. Field order is ignored, as are missing or unrecognized
field names.
For example, given schema<year:int16, month:int8, day:int8>, a possible
path would be "/year=2009/month=11/day=15" (but the field order does not
need to match).
Parameters
----------
schema : pyarrow.Schema, default None
The schema that describes the partitions present in the file path.
If not specified, and `field_names` and/or `flavor` are specified,
the schema will be inferred from the file path (and a
PartitioningFactory is returned).
field_names : list of str, default None
A list of strings (field names). If specified, the schema's types are
inferred from the file paths (only valid for DirectoryPartitioning).
flavor : str, default None
The default is DirectoryPartitioning. Specify ``flavor="hive"`` for
a HivePartitioning.
Returns
-------
Partitioning or PartitioningFactory
Examples
--------
Specify the Schema for paths like "/2009/June":
>>> partitioning(pa.schema([("year", pa.int16()), ("month", pa.string())]))
or let the types be inferred by only specifying the field names:
>>> partitioning(field_names=["year", "month"])
For paths like "/2009/June", the year will be inferred as int32 while month
will be inferred as string.
Create a Hive scheme for a path like "/year=2009/month=11":
>>> partitioning(
... pa.schema([("year", pa.int16()), ("month", pa.int8())]),
... flavor="hive")
A Hive scheme can also be discovered from the directory structure (and
types will be inferred):
>>> partitioning(flavor="hive")
"""
if flavor is None:
# default flavor
if schema is not None:
if field_names is not None:
raise ValueError(
"Cannot specify both 'schema' and 'field_names'")
return DirectoryPartitioning(schema)
elif field_names is not None:
if isinstance(field_names, list):
return DirectoryPartitioning.discover(field_names)
else:
raise ValueError(
"Expected list of field names, got {}".format(
type(field_names)))
else:
raise ValueError(
"For the default directory flavor, need to specify "
"a Schema or a list of field names")
elif flavor == 'hive':
if field_names is not None:
raise ValueError("Cannot specify 'field_names' for flavor 'hive'")
elif schema is not None:
if isinstance(schema, pa.Schema):
return HivePartitioning(schema)
else:
raise ValueError(
"Expected Schema for 'schema', got {}".format(
type(schema)))
else:
return HivePartitioning.discover()
else:
raise ValueError("Unsupported flavor")
def _ensure_fs(filesystem, path):
# Validate or infer the filesystem from the path
from pyarrow.fs import FileSystem, LocalFileSystem
if filesystem is None:
try:
filesystem, _ = FileSystem.from_uri(path)
except Exception:
# when path is not found, we fall back to local file system
filesystem = LocalFileSystem()
return filesystem
def _ensure_fs_and_paths(path_or_paths, filesystem=None):
# Validate and convert the path-likes and filesystem.
# Returns filesystem and list of string paths or FileSelector
from pyarrow.fs import FileType, FileSelector
if isinstance(path_or_paths, list):
paths_or_selector = [_stringify_path(path) for path in path_or_paths]
# infer from first path
filesystem = _ensure_fs(filesystem, paths_or_selector[0])
else:
path = _stringify_path(path_or_paths)
filesystem = _ensure_fs(filesystem, path)
stats = filesystem.get_target_stats([path])[0]
if stats.type == FileType.Directory:
# for directory, pass a selector
paths_or_selector = FileSelector(path, recursive=True)
elif stats.type == FileType.File:
# for a single file path, pass it as a list
paths_or_selector = [path]
else:
raise FileNotFoundError(path)
return filesystem, paths_or_selector
def _ensure_partitioning(scheme):
# Validate input and return a Partitioning(Factory) or passthrough None
# for no partitioning
if scheme is None:
pass
elif isinstance(scheme, str):
scheme = partitioning(flavor=scheme)
elif isinstance(scheme, list):
scheme = partitioning(field_names=scheme)
elif isinstance(scheme, (Partitioning, PartitioningFactory)):
pass
else:
ValueError(
"Expected Partitioning or PartitioningFactory, got {}".format(
type(scheme)))
return scheme
def _ensure_format(obj):
if isinstance(obj, FileFormat):
return obj
elif obj == "parquet":
return ParquetFileFormat()
elif obj == "ipc":
return IpcFileFormat()
else:
raise ValueError("format '{}' is not supported".format(obj))
def source(path_or_paths, filesystem=None, partitioning=None,
format=None):
"""
Open a (multi-file) data source.
Parameters
----------
path_or_paths : str, pathlib.Path, or list of those
Path to a file or to a directory containing the data files, or
a list of paths.
filesystem : FileSystem, default None
By default will be inferred from the path.
partitioning : Partitioning(Factory), str or list of str
The partitioning scheme specified with the ``partitioning()``
function. A flavor string can be used as shortcut, and with a list of
field names a DirectionaryPartitioning will be inferred.
format : str, default None
Currently only "parquet" is supported.
Returns
-------
DataSource of DataSourceDiscovery
"""
fs, paths_or_selector = _ensure_fs_and_paths(path_or_paths, filesystem)
partitioning = _ensure_partitioning(partitioning)
format = _ensure_format(format or "parquet")
# TODO pass through options
options = FileSystemFactoryOptions()
if isinstance(partitioning, PartitioningFactory):
options.partitioning_factory = partitioning
elif isinstance(partitioning, Partitioning):
options.partitioning = partitioning
return FileSystemSourceFactory(fs, paths_or_selector, format, options)
def _ensure_source(src, **kwargs):
# Need to return SourceFactory since `dataset` might need to finish the
# factory with a unified schema.
# TODO: return Source if a specific schema was passed?
if _is_path_like(src):
return source(src, **kwargs)
elif isinstance(src, SourceFactory):
if any(v is not None for v in kwargs.values()):
# when passing a SourceFactory, the arguments cannot be specified
raise ValueError(
"When passing a Source(Factory), you cannot pass any "
"additional arguments"
)
return src
elif isinstance(src, Source):
raise TypeError(
"Source objects are currently not supported, only SourceFactory "
"instances. Use the source() function to create such objects."
)
else:
raise TypeError(
"Expected a path-like or Source, got {}".format(type(src))
)
def dataset(sources, filesystem=None, partitioning=None, format=None):
"""
Open a (multi-source) dataset.
Parameters
----------
sources : path or list of paths or source or list of sources
Path to a file or to a directory containing the data files, or a list
of paths for a multi-source dataset. To have more control, a list of
sources can be passed, created with the ``source()`` function (in this
case, the additional keywords will be ignored).
filesystem : FileSystem, default None
By default will be inferred from the path.
partitioning : Partitioning(Factory), str, list of str
The partitioning scheme specified with the ``partitioning()``
function. A flavor string can be used as shortcut, and with a list of
field names a DirectionaryPartitioning will be inferred.
format : str
Currently only "parquet" is supported.
Returns
-------
Dataset
Examples
--------
Opening a dataset for a single directory:
>>> dataset("path/to/nyc-taxi/", format="parquet")
Combining different sources:
>>> dataset([
... source("s3://old-taxi-data", format="parquet"),
... source("local/path/to/new/data", format="csv")
... ])
"""
if not isinstance(sources, list):
sources = [sources]
sources = [
_ensure_source(src, filesystem=filesystem, partitioning=partitioning,
format=format)
for src in sources
]
return DatasetFactory(sources).finish()
def field(name):
"""References a named column of the dataset.
Stores only the field's name. Type and other information is known only when
the expression is applied on a dataset having an explicit scheme.
Parameters
----------
name : string
The name of the field the expression references to.
Returns
-------
field_expr : FieldExpression
"""
return FieldExpression(name)
def scalar(value):
"""Expression representing a scalar value.
Parameters
----------
value : bool, int, float or string
Python value of the scalar. Note that only a subset of types are
currently supported.
Returns
-------
scalar_expr : ScalarExpression
"""
return ScalarExpression(value)
|
the-stack_0_2488 | # Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import argparse
import hashlib
import json
import os
import platform
import subprocess
import sys
import yaml
from typing import Optional, List, Set, Tuple, Dict, Any
from build_definitions import BUILD_TYPE_COMMON, get_build_def_module, BUILD_TYPE_UNINSTRUMENTED, \
BUILD_TYPE_CLANG_UNINSTRUMENTED, BUILD_TYPE_ASAN, BUILD_TYPE_TSAN, BUILD_TYPES, \
BUILD_GROUP_COMMON, BUILD_GROUP_INSTRUMENTED
from yugabyte_db_thirdparty.builder_helpers import PLACEHOLDER_RPATH, get_make_parallelism, \
get_rpath_flag, sanitize_flags_line_for_log, log_and_set_env_var_to_list
from yugabyte_db_thirdparty.builder_helpers import is_ninja_available
from yugabyte_db_thirdparty.builder_interface import BuilderInterface
from yugabyte_db_thirdparty.cmd_line_args import parse_cmd_line_args
from yugabyte_db_thirdparty.compiler_choice import CompilerChoice
from yugabyte_db_thirdparty.custom_logging import fatal, log, heading, log_output, colored_log, \
YELLOW_COLOR, SEPARATOR
from yugabyte_db_thirdparty.dependency import Dependency
from yugabyte_db_thirdparty.devtoolset import activate_devtoolset
from yugabyte_db_thirdparty.download_manager import DownloadManager
from yugabyte_db_thirdparty.env_helpers import write_env_vars
from yugabyte_db_thirdparty.os_detection import is_mac, is_linux
from yugabyte_db_thirdparty.string_util import indent_lines
from yugabyte_db_thirdparty.util import YB_THIRDPARTY_DIR, remove_path, \
mkdir_if_missing, PushDir, assert_list_contains, assert_dir_exists, EnvVarContext
from yugabyte_db_thirdparty.file_system_layout import FileSystemLayout
from yugabyte_db_thirdparty.toolchain import Toolchain, ensure_toolchain_installed
ASAN_FLAGS = [
'-fsanitize=address',
'-fsanitize=undefined',
'-DADDRESS_SANITIZER',
]
TSAN_FLAGS = [
'-fsanitize=thread',
'-DTHREAD_SANITIZER',
]
class Builder(BuilderInterface):
args: argparse.Namespace
ld_flags: List[str]
executable_only_ld_flags: List[str]
compiler_flags: List[str]
preprocessor_flags: List[str]
c_flags: List[str]
cxx_flags: List[str]
libs: List[str]
additional_allowed_shared_lib_paths: Set[str]
download_manager: DownloadManager
compiler_choice: CompilerChoice
fs_layout: FileSystemLayout
fossa_modules: List[Any]
toolchain: Optional[Toolchain]
remote_build: bool
"""
This class manages the overall process of building third-party dependencies, including the set
of dependencies to build, build types, and the directories to install dependencies.
"""
def __init__(self) -> None:
self.fs_layout = FileSystemLayout()
self.linuxbrew_dir = None
self.additional_allowed_shared_lib_paths = set()
self.toolchain = None
self.fossa_modules = []
def parse_args(self) -> None:
self.args = parse_cmd_line_args()
self.remote_build = self.args.remote_build_server and self.args.remote_build_dir
if self.remote_build:
return
if self.args.make_parallelism:
os.environ['YB_MAKE_PARALLELISM'] = str(self.args.make_parallelism)
self.download_manager = DownloadManager(
should_add_checksum=self.args.add_checksum,
download_dir=self.fs_layout.tp_download_dir)
single_compiler_type = None
if self.args.toolchain:
self.toolchain = ensure_toolchain_installed(
self.download_manager, self.args.toolchain)
compiler_prefix = self.toolchain.toolchain_root
if self.toolchain.toolchain_type != 'linuxbrew':
single_compiler_type = self.toolchain.get_compiler_type()
self.toolchain.write_url_and_path_files()
else:
compiler_prefix = self.args.compiler_prefix
single_compiler_type = self.args.single_compiler_type
self.compiler_choice = CompilerChoice(
single_compiler_type=single_compiler_type,
compiler_prefix=compiler_prefix,
compiler_suffix=self.args.compiler_suffix,
devtoolset=self.args.devtoolset,
use_compiler_wrapper=self.args.use_compiler_wrapper,
use_ccache=self.args.use_ccache
)
def finish_initialization(self) -> None:
self.compiler_choice.finish_initialization()
self.populate_dependencies()
self.select_dependencies_to_build()
if self.compiler_choice.devtoolset is not None:
activate_devtoolset(self.compiler_choice.devtoolset)
def populate_dependencies(self) -> None:
# We have to use get_build_def_module to access submodules of build_definitions,
# otherwise MyPy gets confused.
self.dependencies = [
# Avoiding a name collision with the standard zlib module, hence "zlib_dependency".
get_build_def_module('zlib_dependency').ZLibDependency(),
get_build_def_module('lz4').LZ4Dependency(),
get_build_def_module('openssl').OpenSSLDependency(),
get_build_def_module('libev').LibEvDependency(),
get_build_def_module('rapidjson').RapidJsonDependency(),
get_build_def_module('squeasel').SqueaselDependency(),
get_build_def_module('curl').CurlDependency(),
get_build_def_module('hiredis').HiRedisDependency(),
get_build_def_module('cqlsh').CQLShDependency(),
get_build_def_module('redis_cli').RedisCliDependency(),
get_build_def_module('flex').FlexDependency(),
get_build_def_module('bison').BisonDependency(),
get_build_def_module('libedit').LibEditDependency(),
get_build_def_module('openldap').OpenLDAPDependency(),
]
if is_linux():
self.dependencies += [
get_build_def_module('libuuid').LibUuidDependency(),
]
using_both_gcc_and_clang = (
not self.compiler_choice.use_only_gcc() and
not self.compiler_choice.use_only_clang())
if using_both_gcc_and_clang:
# Old LLVM. We will migrate away from this.
self.dependencies.append(get_build_def_module('llvm7').LLVM7Dependency())
standalone_llvm7_toolchain = self.toolchain and self.toolchain.toolchain_type == 'llvm7'
if using_both_gcc_and_clang or standalone_llvm7_toolchain:
self.dependencies.append(
get_build_def_module('llvm7_libcxx').Llvm7LibCXXDependency())
llvm_major_version: Optional[int] = self.compiler_choice.get_llvm_major_version()
if (self.compiler_choice.use_only_clang() and
llvm_major_version is not None and llvm_major_version >= 10):
if self.toolchain and self.toolchain.toolchain_type == 'llvm12':
# Still use libunwind/libcxxabi libraries from LLVM 11.x.
# TODO: fix the compilation errors and upgrade.
llvm_version_str = '11.1.0'
else:
llvm_version_str = self.compiler_choice.get_llvm_version_str()
self.dependencies += [
# New LLVM. We will keep supporting new LLVM versions here.
get_build_def_module('llvm1x_libunwind').Llvm1xLibUnwindDependency(
version=llvm_version_str
),
get_build_def_module('llvm1x_libcxx').Llvm1xLibCxxAbiDependency(
version=llvm_version_str
),
get_build_def_module('llvm1x_libcxx').Llvm1xLibCxxDependency(
version=llvm_version_str
),
]
else:
self.dependencies.append(get_build_def_module('libunwind').LibUnwindDependency())
self.dependencies.append(get_build_def_module('libbacktrace').LibBacktraceDependency())
self.dependencies += [
get_build_def_module('icu4c').Icu4cDependency(),
get_build_def_module('protobuf').ProtobufDependency(),
get_build_def_module('crypt_blowfish').CryptBlowfishDependency(),
get_build_def_module('boost').BoostDependency(),
get_build_def_module('gflags').GFlagsDependency(),
get_build_def_module('glog').GLogDependency(),
get_build_def_module('gperftools').GPerfToolsDependency(),
get_build_def_module('gmock').GMockDependency(),
get_build_def_module('snappy').SnappyDependency(),
get_build_def_module('crcutil').CRCUtilDependency(),
get_build_def_module('libcds').LibCDSDependency(),
get_build_def_module('libuv').LibUvDependency(),
get_build_def_module('cassandra_cpp_driver').CassandraCppDriverDependency(),
]
def select_dependencies_to_build(self) -> None:
self.selected_dependencies = []
if self.args.dependencies:
names = set([dep.name for dep in self.dependencies])
for dep in self.args.dependencies:
if dep not in names:
fatal("Unknown dependency name: %s. Valid dependency names:\n%s",
dep,
(" " * 4 + ("\n" + " " * 4).join(sorted(names))))
for dep in self.dependencies:
if dep.name in self.args.dependencies:
self.selected_dependencies.append(dep)
elif self.args.skip:
skipped = set(self.args.skip.split(','))
log("Skipping dependencies: %s", sorted(skipped))
self.selected_dependencies = []
for dependency in self.dependencies:
if dependency.name in skipped:
skipped.remove(dependency.name)
else:
self.selected_dependencies.append(dependency)
if skipped:
raise ValueError("Unknown dependencies, cannot skip: %s" % sorted(skipped))
else:
self.selected_dependencies = self.dependencies
def run(self) -> None:
self.compiler_choice.set_compiler(
'clang' if self.compiler_choice.use_only_clang() else 'gcc')
if self.args.clean or self.args.clean_downloads:
self.fs_layout.clean(self.selected_dependencies, self.args.clean_downloads)
self.prepare_out_dirs()
os.environ['PATH'] = ':'.join([
os.path.join(self.fs_layout.tp_installed_common_dir, 'bin'),
os.path.join(self.fs_layout.tp_installed_llvm7_common_dir, 'bin'),
os.environ['PATH']
])
self.build_one_build_type(BUILD_TYPE_COMMON)
build_types = []
if is_linux():
build_types.append(BUILD_TYPE_UNINSTRUMENTED)
if self.compiler_choice.use_only_gcc():
if is_linux() and not self.compiler_choice.using_linuxbrew():
# Starting to support ASAN for GCC compilers
# (not for the current GCC 5.5 build on Linuxbrew, though).
build_types.append(BUILD_TYPE_ASAN)
else:
if self.compiler_choice.using_linuxbrew() or is_mac():
build_types.append(BUILD_TYPE_CLANG_UNINSTRUMENTED)
if is_linux() and not self.args.skip_sanitizers:
build_types.append(BUILD_TYPE_ASAN)
build_types.append(BUILD_TYPE_TSAN)
log(f"Full list of build types: {build_types}")
for build_type in build_types:
self.build_one_build_type(build_type)
with open(os.path.join(YB_THIRDPARTY_DIR, 'fossa_modules.yml'), 'w') as output_file:
yaml.dump(self.fossa_modules, output_file, indent=2)
def get_build_types(self) -> List[str]:
build_types: List[str] = list(BUILD_TYPES)
if is_linux() and self.args.single_compiler_type is not None:
build_types.remove(BUILD_TYPE_CLANG_UNINSTRUMENTED)
return build_types
def prepare_out_dirs(self) -> None:
build_types = self.get_build_types()
dirs = [
os.path.join(self.fs_layout.tp_installed_dir, build_type) for build_type in build_types
]
libcxx_dirs = [os.path.join(dir, 'libcxx') for dir in dirs]
for dir in dirs + libcxx_dirs:
lib_dir = os.path.join(dir, 'lib')
mkdir_if_missing(lib_dir)
mkdir_if_missing(os.path.join(dir, 'include'))
# On some systems, autotools installs libraries to lib64 rather than lib. Fix
# this by setting up lib64 as a symlink to lib. We have to do this step first
# to handle cases where one third-party library depends on another. Make sure
# we create a relative symlink so that the entire PREFIX_DIR could be moved,
# e.g. after it is packaged and then downloaded on a different build node.
lib64_dir = os.path.join(dir, 'lib64')
if os.path.exists(lib64_dir):
if os.path.islink(lib64_dir):
continue
remove_path(lib64_dir)
os.symlink('lib', lib64_dir)
def add_include_path(self, include_path: str) -> None:
cmd_line_arg = f'-I{include_path}'
self.preprocessor_flags.append(cmd_line_arg)
self.compiler_flags.append(cmd_line_arg)
def init_compiler_independent_flags(self, dep: Dependency) -> None:
"""
Initialize compiler and linker flags for a particular build type. We try to limit this
function to flags that will work for most compilers we are using, which include various
versions of GCC and Clang.
"""
self.preprocessor_flags = []
self.ld_flags = []
self.executable_only_ld_flags = []
self.compiler_flags = []
self.c_flags = []
self.cxx_flags = []
self.libs = []
self.add_linuxbrew_flags()
for include_dir_component in set([BUILD_TYPE_COMMON, self.build_type]):
self.add_include_path(os.path.join(
self.fs_layout.tp_installed_dir, include_dir_component, 'include'))
self.add_lib_dir_and_rpath(os.path.join(
self.fs_layout.tp_installed_dir, include_dir_component, 'lib'))
self.compiler_flags += self.preprocessor_flags
# -fPIC is there to always generate position-independent code, even for static libraries.
self.compiler_flags += ['-fno-omit-frame-pointer', '-fPIC', '-O2', '-Wall']
if is_linux():
# On Linux, ensure we set a long enough rpath so we can change it later with chrpath or
# a similar tool.
self.add_rpath(PLACEHOLDER_RPATH)
self.dylib_suffix = "so"
elif is_mac():
self.dylib_suffix = "dylib"
# YugaByte builds with C++11, which on OS X requires using libc++ as the standard
# library implementation. Some of the dependencies do not compile against libc++ by
# default, so we specify it explicitly.
self.cxx_flags.append("-stdlib=libc++")
self.ld_flags += ["-lc++", "-lc++abi"]
# Build for macOS Mojave or later. See https://bit.ly/37myHbk
self.compiler_flags.append("-mmacosx-version-min=10.14")
self.ld_flags.append("-Wl,-headerpad_max_install_names")
else:
fatal("Unsupported platform: {}".format(platform.system()))
# The C++ standard must match CMAKE_CXX_STANDARD in the top-level CMakeLists.txt file in
# the YugabyteDB source tree.
self.cxx_flags.append('-std=c++14')
self.cxx_flags.append('-frtti')
if self.build_type == BUILD_TYPE_ASAN:
self.compiler_flags += ASAN_FLAGS
if self.build_type == BUILD_TYPE_TSAN:
self.compiler_flags += TSAN_FLAGS
def add_linuxbrew_flags(self) -> None:
if self.compiler_choice.using_linuxbrew():
lib_dir = os.path.join(self.compiler_choice.get_linuxbrew_dir(), 'lib')
self.ld_flags.append(" -Wl,-dynamic-linker={}".format(os.path.join(lib_dir, 'ld.so')))
self.add_lib_dir_and_rpath(lib_dir)
def add_lib_dir_and_rpath(self, lib_dir: str) -> None:
self.ld_flags.append("-L{}".format(lib_dir))
self.add_rpath(lib_dir)
def prepend_lib_dir_and_rpath(self, lib_dir: str) -> None:
self.ld_flags.insert(0, "-L{}".format(lib_dir))
self.prepend_rpath(lib_dir)
def add_rpath(self, path: str) -> None:
log("Adding RPATH: %s", path)
self.ld_flags.append(get_rpath_flag(path))
self.additional_allowed_shared_lib_paths.add(path)
def prepend_rpath(self, path: str) -> None:
self.ld_flags.insert(0, get_rpath_flag(path))
def log_prefix(self, dep: Dependency) -> str:
return '{} ({})'.format(dep.name, self.build_type)
def build_with_configure(
self,
log_prefix: str,
extra_args: List[str] = [],
configure_cmd: List[str] = ['./configure'],
install: List[str] = ['install'],
run_autogen: bool = False,
autoconf: bool = False,
src_subdir_name: Optional[str] = None) -> None:
os.environ["YB_REMOTE_COMPILATION"] = "0"
dir_for_build = os.getcwd()
if src_subdir_name:
dir_for_build = os.path.join(dir_for_build, src_subdir_name)
with PushDir(dir_for_build):
log("Building in %s", dir_for_build)
if run_autogen:
log_output(log_prefix, ['./autogen.sh'])
if autoconf:
log_output(log_prefix, ['autoreconf', '-i'])
configure_args = (
configure_cmd.copy() + ['--prefix={}'.format(self.prefix)] + extra_args
)
log_output(log_prefix, configure_args)
log_output(log_prefix, ['make', '-j{}'.format(get_make_parallelism())])
if install:
log_output(log_prefix, ['make'] + install)
def build_with_cmake(
self,
dep: Dependency,
extra_args: List[str] = [],
use_ninja_if_available: bool = True,
src_subdir_name: Optional[str] = None,
extra_build_tool_args: List[str] = [],
should_install: bool = True,
install_targets: List[str] = ['install'],
shared_and_static: bool = False) -> None:
build_tool = 'make'
if use_ninja_if_available:
ninja_available = is_ninja_available()
log('Ninja is %s', 'available' if ninja_available else 'unavailable')
if ninja_available:
build_tool = 'ninja'
log("Building dependency %s using CMake. Build tool: %s", dep, build_tool)
log_prefix = self.log_prefix(dep)
os.environ["YB_REMOTE_COMPILATION"] = "0"
remove_path('CMakeCache.txt')
remove_path('CMakeFiles')
src_path = self.fs_layout.get_source_path(dep)
if src_subdir_name is not None:
src_path = os.path.join(src_path, src_subdir_name)
args = ['cmake', src_path]
if build_tool == 'ninja':
args += ['-G', 'Ninja']
args += self.get_common_cmake_flag_args(dep)
if extra_args is not None:
args += extra_args
args += dep.get_additional_cmake_args(self)
if shared_and_static and any(arg.startswith('-DBUILD_SHARED_LIBS=') for arg in args):
raise ValueError(
"shared_and_static=True is specified but CMake arguments already mention "
"-DBUILD_SHARED_LIBS: %s" % args)
if '-DBUILD_SHARED_LIBS=OFF' not in args and not shared_and_static:
# TODO: a better approach for setting CMake arguments from multiple places.
args.append('-DBUILD_SHARED_LIBS=ON')
def build_internal(even_more_cmake_args: List[str] = []) -> None:
final_cmake_args = args + even_more_cmake_args
log("CMake command line (one argument per line):\n%s" %
"\n".join([(" " * 4 + sanitize_flags_line_for_log(line))
for line in final_cmake_args]))
log_output(log_prefix, final_cmake_args)
if build_tool == 'ninja':
dep.postprocess_ninja_build_file(self, 'build.ninja')
build_tool_cmd = [
build_tool, '-j{}'.format(get_make_parallelism())
] + extra_build_tool_args
log_output(log_prefix, build_tool_cmd)
if should_install:
log_output(log_prefix, [build_tool] + install_targets)
with open('compile_commands.json') as compile_commands_file:
compile_commands = json.load(compile_commands_file)
for command_item in compile_commands:
command_args = command_item['command'].split()
if self.build_type == BUILD_TYPE_ASAN:
assert_list_contains(command_args, '-fsanitize=address')
assert_list_contains(command_args, '-fsanitize=undefined')
if self.build_type == BUILD_TYPE_TSAN:
assert_list_contains(command_args, '-fsanitize=thread')
if shared_and_static:
for build_shared_libs_value, subdir_name in (
('ON', 'shared'),
('OFF', 'static')
):
build_dir = os.path.join(os.getcwd(), subdir_name)
mkdir_if_missing(build_dir)
build_shared_libs_cmake_arg = '-DBUILD_SHARED_LIBS=%s' % build_shared_libs_value
log("Building dependency '%s' for build type '%s' with option: %s",
dep.name, self.build_type, build_shared_libs_cmake_arg)
with PushDir(build_dir):
build_internal([build_shared_libs_cmake_arg])
else:
build_internal()
def build_one_build_type(self, build_type: str) -> None:
if (build_type != BUILD_TYPE_COMMON and
self.args.build_type is not None and
build_type != self.args.build_type):
log("Skipping build type %s because build type %s is specified in the arguments",
build_type, self.args.build_type)
return
self.set_build_type(build_type)
build_group = (
BUILD_GROUP_COMMON if build_type == BUILD_TYPE_COMMON else BUILD_GROUP_INSTRUMENTED
)
for dep in self.selected_dependencies:
if build_group == dep.build_group:
self.perform_pre_build_steps(dep)
should_build = dep.should_build(self)
should_rebuild = self.should_rebuild_dependency(dep)
if should_build and should_rebuild:
self.build_dependency(dep)
else:
log(f"Skipping dependency {dep.name}: "
f"should_build={should_build}, "
f"should_rebuild={should_rebuild}.")
def get_install_prefix_with_qualifier(self, qualifier: Optional[str] = None) -> str:
return os.path.join(
self.fs_layout.tp_installed_dir,
self.build_type + ('_%s' % qualifier if qualifier else ''))
def set_build_type(self, build_type: str) -> None:
self.build_type = build_type
self.prefix = self.get_install_prefix_with_qualifier(qualifier=None)
self.prefix_bin = os.path.join(self.prefix, 'bin')
self.prefix_lib = os.path.join(self.prefix, 'lib')
self.prefix_include = os.path.join(self.prefix, 'include')
if self.compiler_choice.building_with_clang(build_type):
compiler = 'clang'
else:
compiler = 'gcc'
self.compiler_choice.set_compiler(compiler)
heading("Building {} dependencies (compiler type: {})".format(
build_type, self.compiler_choice.compiler_type))
log("Compiler type: %s", self.compiler_choice.compiler_type)
log("C compiler: %s", self.compiler_choice.get_c_compiler())
log("C++ compiler: %s", self.compiler_choice.get_cxx_compiler())
def init_flags(self, dep: Dependency) -> None:
"""
Initializes compiler and linker flags. No flag customizations should be transferred from one
dependency to another.
"""
self.init_compiler_independent_flags(dep)
if not is_mac() and self.compiler_choice.building_with_clang(self.build_type):
# Special setup for Clang on Linux.
compiler_choice = self.compiler_choice
llvm_major_version: Optional[int] = compiler_choice.get_llvm_major_version()
if (compiler_choice.single_compiler_type == 'clang' and
llvm_major_version is not None and llvm_major_version >= 10):
# We are assuming that --single-compiler-type will only be used for Clang 10 and
# newer.
self.init_linux_clang1x_flags(dep)
elif llvm_major_version == 7 or compiler_choice.single_compiler_type is None:
# We are either building with LLVM 7 without Linuxbrew, or this is the
# Linuxbrew-based build with both GCC and Clang (which will go away).
self.init_linux_clang7_flags(dep)
else:
raise ValueError(f"Unsupported LLVM major version: {llvm_major_version}")
def get_libcxx_dirs(self, libcxx_installed_suffix: str) -> Tuple[str, str]:
libcxx_installed_path = os.path.join(
self.fs_layout.tp_installed_dir, libcxx_installed_suffix, 'libcxx')
libcxx_installed_include = os.path.join(libcxx_installed_path, 'include', 'c++', 'v1')
libcxx_installed_lib = os.path.join(libcxx_installed_path, 'lib')
return libcxx_installed_include, libcxx_installed_lib
def init_linux_clang7_flags(self, dep: Dependency) -> None:
"""
Flags used to build code with Clang 7 that we build here. As we move to newer versions of
Clang, this function will go away.
"""
if self.build_type == BUILD_TYPE_TSAN:
# Ensure that TSAN runtime is linked statically into every executable. TSAN runtime
# uses -fPIE while our shared libraries use -fPIC, and therefore TSAN runtime can only
# be linked statically into executables. TSAN runtime can't be built with -fPIC because
# that would create significant performance issues.
self.executable_only_ld_flags += ['-fsanitize=thread']
# This is used to build code with libc++ and Clang 7 built as part of thirdparty.
stdlib_suffix = self.build_type
stdlib_path = os.path.join(self.fs_layout.tp_installed_dir, stdlib_suffix, 'libcxx')
stdlib_include = os.path.join(stdlib_path, 'include', 'c++', 'v1')
stdlib_lib = os.path.join(stdlib_path, 'lib')
self.cxx_flags.insert(0, '-nostdinc++')
self.cxx_flags.insert(0, '-isystem')
self.cxx_flags.insert(1, stdlib_include)
self.cxx_flags.insert(0, '-stdlib=libc++')
# Clang complains about argument unused during compilation: '-stdlib=libc++' when both
# -stdlib=libc++ and -nostdinc++ are specified.
self.cxx_flags.insert(0, '-Wno-error=unused-command-line-argument')
self.prepend_lib_dir_and_rpath(stdlib_lib)
if self.compiler_choice.using_linuxbrew():
self.compiler_flags.append('--gcc-toolchain={}'.format(
self.compiler_choice.get_linuxbrew_dir()))
if self.toolchain and self.toolchain.toolchain_type == 'llvm7':
# This is needed when building with Clang 7 but without Linuxbrew.
# TODO: this might only be needed due to using an old version of libunwind that is
# different from libunwind included in the LLVM 7 repository. Just a hypothesis.
self.ld_flags.append('-lgcc_s')
def init_linux_clang1x_flags(self, dep: Dependency) -> None:
"""
Flags for Clang 10 and beyond. We are using LLVM-supplied libunwind and compiler-rt in this
configuration.
"""
self.ld_flags.append('-rtlib=compiler-rt')
if self.build_type == BUILD_TYPE_COMMON:
log("Not configuring any special Clang 10+ flags for build type %s", self.build_type)
return
# TODO mbautin: refactor to polymorphism
is_libcxxabi = dep.name.endswith('_libcxxabi')
is_libcxx = dep.name.endswith('_libcxx')
log("Dependency name: %s, is_libcxxabi: %s, is_libcxx: %s",
dep.name, is_libcxxabi, is_libcxx)
if self.build_type == BUILD_TYPE_ASAN:
self.compiler_flags.append('-shared-libasan')
if is_libcxxabi:
# To avoid an infinite loop in UBSAN.
# https://monorail-prod.appspot.com/p/chromium/issues/detail?id=609786
# This comment:
# https://gist.githubusercontent.com/mbautin/ad9ea4715669da3b3a5fb9495659c4a9/raw
self.compiler_flags.append('-fno-sanitize=vptr')
# TODO mbautin: a centralized way to find paths inside LLVM installation.
assert self.compiler_choice.cc is not None
compiler_rt_lib_dir_ancestor = os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(self.compiler_choice.cc))),
'lib', 'clang')
compiler_rt_lib_dir_candidates = []
nonexistent_compiler_rt_lib_dirs = []
for llvm_version_subdir in os.listdir(compiler_rt_lib_dir_ancestor):
compiler_rt_lib_dir = os.path.join(
compiler_rt_lib_dir_ancestor, llvm_version_subdir, 'lib', 'linux')
if os.path.isdir(compiler_rt_lib_dir):
compiler_rt_lib_dir_candidates.append(compiler_rt_lib_dir)
else:
nonexistent_compiler_rt_lib_dirs.append(compiler_rt_lib_dir)
if len(compiler_rt_lib_dir_candidates) != 1:
if not compiler_rt_lib_dir_candidates:
raise IOError(
"Could not find the compiler-rt library directory, looked at: %s" %
nonexistent_compiler_rt_lib_dirs)
raise IOError(
"Multiple possible compiler-rt library directories: %s" %
compiler_rt_lib_dir_candidates)
assert len(compiler_rt_lib_dir_candidates) == 1
compiler_rt_lib_dir = compiler_rt_lib_dir_candidates[0]
if not os.path.isdir(compiler_rt_lib_dir):
raise IOError("Directory does not exist: %s", compiler_rt_lib_dir)
self.add_lib_dir_and_rpath(compiler_rt_lib_dir)
self.ld_flags.append('-lclang_rt.ubsan_minimal-x86_64')
self.ld_flags += ['-lunwind']
libcxx_installed_include, libcxx_installed_lib = self.get_libcxx_dirs(self.build_type)
log("libc++ include directory: %s", libcxx_installed_include)
log("libc++ library directory: %s", libcxx_installed_lib)
if not is_libcxx and not is_libcxxabi:
log("Adding special compiler/linker flags for Clang 10+ for dependencies other than "
"libc++")
self.ld_flags += ['-lc++', '-lc++abi']
self.cxx_flags = [
'-stdlib=libc++',
'-isystem',
libcxx_installed_include,
'-nostdinc++'
] + self.cxx_flags
self.prepend_lib_dir_and_rpath(libcxx_installed_lib)
if is_libcxx:
log("Adding special compiler/linker flags for Clang 10 or newer for libc++")
# This is needed for libc++ to find libc++abi headers.
assert_dir_exists(libcxx_installed_include)
self.cxx_flags.append('-I%s' % libcxx_installed_include)
if is_libcxx or is_libcxxabi:
log("Adding special linker flags for Clang 10 or newer for libc++ or libc++abi")
# libc++abi needs to be able to find libcxx at runtime, even though it can't always find
# it at build time because libc++abi is built first.
self.add_rpath(libcxx_installed_lib)
self.cxx_flags.append('-Wno-error=unused-command-line-argument')
log("Flags after the end of setup for Clang 10 or newer:")
log("cxx_flags : %s", self.cxx_flags)
log("c_flags : %s", self.c_flags)
log("ld_flags : %s", self.ld_flags)
def get_effective_compiler_flags(self, dep: Dependency) -> List[str]:
return self.compiler_flags + dep.get_additional_compiler_flags(self)
def get_effective_cxx_flags(self, dep: Dependency) -> List[str]:
return (self.cxx_flags +
self.get_effective_compiler_flags(dep) +
dep.get_additional_cxx_flags(self))
def get_effective_c_flags(self, dep: Dependency) -> List[str]:
return (self.c_flags +
self.get_effective_compiler_flags(dep) +
dep.get_additional_c_flags(self))
def get_effective_ld_flags(self, dep: Dependency) -> List[str]:
return self.ld_flags + dep.get_additional_ld_flags(self)
def get_effective_executable_ld_flags(self, dep: Dependency) -> List[str]:
return self.ld_flags + self.executable_only_ld_flags + dep.get_additional_ld_flags(self)
def get_effective_preprocessor_flags(self, dep: Dependency) -> List[str]:
return list(self.preprocessor_flags)
def get_common_cmake_flag_args(self, dep: Dependency) -> List[str]:
c_flags_str = ' '.join(self.get_effective_c_flags(dep))
cxx_flags_str = ' '.join(self.get_effective_cxx_flags(dep))
# TODO: we are not using this. What is the best way to plug this into CMake?
preprocessor_flags_str = ' '.join(self.get_effective_preprocessor_flags(dep))
ld_flags_str = ' '.join(self.get_effective_ld_flags(dep))
exe_ld_flags_str = ' '.join(self.get_effective_executable_ld_flags(dep))
return [
'-DCMAKE_C_FLAGS={}'.format(c_flags_str),
'-DCMAKE_CXX_FLAGS={}'.format(cxx_flags_str),
'-DCMAKE_SHARED_LINKER_FLAGS={}'.format(ld_flags_str),
'-DCMAKE_EXE_LINKER_FLAGS={}'.format(exe_ld_flags_str),
'-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',
'-DCMAKE_INSTALL_PREFIX={}'.format(dep.get_install_prefix(self)),
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON'
]
def perform_pre_build_steps(self, dep: Dependency) -> None:
log("")
colored_log(YELLOW_COLOR, SEPARATOR)
colored_log(YELLOW_COLOR, "Building %s (%s)", dep.name, self.build_type)
colored_log(YELLOW_COLOR, SEPARATOR)
self.download_manager.download_dependency(
dep=dep,
src_path=self.fs_layout.get_source_path(dep),
archive_path=self.fs_layout.get_archive_path(dep))
archive_name = dep.get_archive_name()
if archive_name:
archive_path = os.path.join('downloads', archive_name)
self.fossa_modules.append({
"fossa_module": {
"name": f"{dep.name}-{dep.version}",
"type": "raw",
"target": os.path.basename(archive_path)
},
"yb_metadata": {
"url": dep.download_url,
"sha256sum": self.download_manager.get_expected_checksum(archive_name)
}
})
def build_dependency(self, dep: Dependency) -> None:
self.init_flags(dep)
# This is needed at least for glog to be able to find gflags.
self.add_rpath(os.path.join(self.fs_layout.tp_installed_dir, self.build_type, 'lib'))
if self.build_type != BUILD_TYPE_COMMON:
# Needed to find libunwind for Clang 10 when using compiler-rt.
self.add_rpath(os.path.join(self.fs_layout.tp_installed_dir, BUILD_TYPE_COMMON, 'lib'))
if self.args.download_extract_only:
log("Skipping build of dependency %s, build type %s, --download-extract-only is "
"specified.", dep.name, self.build_type)
return
env_vars: Dict[str, Optional[str]] = {
"CPPFLAGS": " ".join(self.preprocessor_flags)
}
log_and_set_env_var_to_list(env_vars, 'CXXFLAGS', self.get_effective_cxx_flags(dep))
log_and_set_env_var_to_list(env_vars, 'CFLAGS', self.get_effective_c_flags(dep))
log_and_set_env_var_to_list(env_vars, 'LDFLAGS', self.get_effective_ld_flags(dep))
log_and_set_env_var_to_list(env_vars, 'LIBS', self.libs)
log_and_set_env_var_to_list(
env_vars, 'CPPFLAGS', self.get_effective_preprocessor_flags(dep))
if self.build_type == BUILD_TYPE_ASAN:
# To avoid errors similar to:
# https://gist.githubusercontent.com/mbautin/4b8eec566f54bcc35706dcd97cab1a95/raw
#
# This could also be fixed to some extent by the compiler flags
# -mllvm -asan-use-private-alias=1
# but applying that flag to all builds is complicated in practice and is probably
# best done using a compiler wrapper script, which would slow things down.
env_vars["ASAN_OPTIONS"] = "detect_odr_violation=0"
with PushDir(self.create_build_dir_and_prepare(dep)):
with EnvVarContext(**env_vars):
write_env_vars('yb_dependency_env.sh')
dep.build(self)
self.save_build_stamp_for_dependency(dep)
log("")
log("Finished building %s (%s)", dep.name, self.build_type)
log("")
# Determines if we should rebuild a component with the given name based on the existing "stamp"
# file and the current value of the "stamp" (based on Git SHA1 and local changes) for the
# component. The result is returned in should_rebuild_component_rv variable, which should have
# been made local by the caller.
def should_rebuild_dependency(self, dep: Dependency) -> bool:
stamp_path = self.fs_layout.get_build_stamp_path_for_dependency(dep, self.build_type)
old_build_stamp = None
if os.path.exists(stamp_path):
with open(stamp_path, 'rt') as inp:
old_build_stamp = inp.read()
new_build_stamp = self.get_build_stamp_for_dependency(dep)
if dep.dir_name is not None:
src_dir = self.fs_layout.get_source_path(dep)
if not os.path.exists(src_dir):
log("Have to rebuild %s (%s): source dir %s does not exist",
dep.name, self.build_type, src_dir)
return True
if old_build_stamp == new_build_stamp:
log("Not rebuilding %s (%s) -- nothing changed.", dep.name, self.build_type)
return False
log("Have to rebuild %s (%s):", dep.name, self.build_type)
log("Old build stamp for %s (from %s):\n%s",
dep.name, stamp_path, indent_lines(old_build_stamp))
log("New build stamp for %s:\n%s",
dep.name, indent_lines(new_build_stamp))
return True
# Come up with a string that allows us to tell when to rebuild a particular third-party
# dependency. The result is returned in the get_build_stamp_for_component_rv variable, which
# should have been made local by the caller.
def get_build_stamp_for_dependency(self, dep: Dependency) -> str:
module_name = dep.__class__.__module__
assert isinstance(module_name, str), "Dependency's module is not a string: %s" % module_name
assert module_name.startswith('build_definitions.'), "Invalid module name: %s" % module_name
module_name_components = module_name.split('.')
assert len(module_name_components) == 2, (
"Expected two components: %s" % module_name_components)
module_name_final = module_name_components[-1]
input_files_for_stamp = [
'python/yugabyte_db_thirdparty/yb_build_thirdparty_main.py',
'build_thirdparty.sh',
os.path.join('python', 'build_definitions', '%s.py' % module_name_final)
]
for path in input_files_for_stamp:
abs_path = os.path.join(YB_THIRDPARTY_DIR, path)
if not os.path.exists(abs_path):
fatal("File '%s' does not exist -- expecting it to exist when creating a 'stamp' "
"for the build configuration of '%s'.", abs_path, dep.name)
with PushDir(YB_THIRDPARTY_DIR):
git_commit_sha1 = subprocess.check_output(
['git', 'log', '--pretty=%H', '-n', '1'] + input_files_for_stamp
).strip().decode('utf-8')
build_stamp = 'git_commit_sha1={}\n'.format(git_commit_sha1)
for git_extra_arg in (None, '--cached'):
git_extra_args = [git_extra_arg] if git_extra_arg else []
git_diff = subprocess.check_output(
['git', 'diff'] + git_extra_args + input_files_for_stamp)
git_diff_sha256 = hashlib.sha256(git_diff).hexdigest()
build_stamp += 'git_diff_sha256{}={}\n'.format(
'_'.join(git_extra_args).replace('--', '_'),
git_diff_sha256)
return build_stamp
def save_build_stamp_for_dependency(self, dep: Dependency) -> None:
stamp = self.get_build_stamp_for_dependency(dep)
stamp_path = self.fs_layout.get_build_stamp_path_for_dependency(dep, self.build_type)
log("Saving new build stamp to '%s':\n%s", stamp_path, indent_lines(stamp))
with open(stamp_path, "wt") as out:
out.write(stamp)
def create_build_dir_and_prepare(self, dep: Dependency) -> str:
src_dir = self.fs_layout.get_source_path(dep)
if not os.path.isdir(src_dir):
fatal("Directory '{}' does not exist".format(src_dir))
build_dir = self.fs_layout.get_build_dir_for_dependency(dep, self.build_type)
mkdir_if_missing(build_dir)
if dep.copy_sources:
log("Bootstrapping %s from %s", build_dir, src_dir)
subprocess.check_call(['rsync', '-a', src_dir + '/', build_dir])
return build_dir
def is_release_build(self) -> bool:
"""
Distinguishes between build types that are potentially used in production releases from
build types that are only used in testing (e.g. ASAN+UBSAN, TSAN).
"""
return self.build_type in [
BUILD_TYPE_COMMON, BUILD_TYPE_UNINSTRUMENTED, BUILD_TYPE_CLANG_UNINSTRUMENTED
]
def cmake_build_type_for_test_only_dependencies(self) -> str:
return 'Release' if self.is_release_build() else 'Debug'
def check_cxx_compiler_flag(self, flag: str) -> bool:
compiler_path = self.compiler_choice.get_cxx_compiler()
log(f"Checking if the compiler {compiler_path} accepts the flag {flag}")
process = subprocess.Popen(
[compiler_path, '-x', 'c++', flag, '-'],
stdin=subprocess.PIPE)
assert process.stdin is not None
process.stdin.write("int main() { return 0; }".encode('utf-8'))
process.stdin.close()
return process.wait() == 0
def add_checked_flag(self, flags: List[str], flag: str) -> None:
if self.check_cxx_compiler_flag(flag):
flags.append(flag)
def get_openssl_dir(self) -> str:
return os.path.join(self.fs_layout.tp_installed_common_dir)
def get_openssl_related_cmake_args(self) -> List[str]:
"""
Returns a list of CMake arguments to use to pick up the version of OpenSSL that we should be
using. Returns an empty list if the default OpenSSL installation should be used.
"""
openssl_dir = self.get_openssl_dir()
openssl_options = ['-DOPENSSL_ROOT_DIR=' + openssl_dir]
openssl_crypto_library = os.path.join(openssl_dir, 'lib', 'libcrypto.' + self.dylib_suffix)
openssl_ssl_library = os.path.join(openssl_dir, 'lib', 'libssl.' + self.dylib_suffix)
openssl_options += [
'-DOPENSSL_CRYPTO_LIBRARY=' + openssl_crypto_library,
'-DOPENSSL_SSL_LIBRARY=' + openssl_ssl_library,
'-DOPENSSL_LIBRARIES=%s;%s' % (openssl_crypto_library, openssl_ssl_library)
]
return openssl_options
|
the-stack_0_2490 | """MCMC sampling methods."""
import logging
import numpy as np
logger = logging.getLogger(__name__)
# TODO: combine ESS and Rhat?, consider transforming parameters to allowed
# region to increase acceptance ratio
def eff_sample_size(chains):
"""Calculate the effective sample size for 1 or more chains.
See:
Gelman, Carlin, Stern, Dunson, Vehtari, Rubin: Bayesian Data Analysis, 2013.
Stan modeling language user's guide and reference manual, v. 2.14.0.
Parameters
----------
chains : np.array of shape (N,) or (M, N)
Samples of a parameter from an MCMC algorithm. No burn-in subtracted here!
Returns
-------
ess : float
"""
chains = np.atleast_2d(chains)
n_chains, n_samples = chains.shape
means = np.mean(chains, axis=1)
variances = np.var(chains, ddof=1, axis=1)
var_between = 0 if n_chains == 1 else n_samples * np.var(means, ddof=1)
var_within = np.mean(variances)
var_pooled = ((n_samples - 1.) * var_within + var_between) / n_samples
# autocovariances for lags 1..n_samples
# https://en.wikipedia.org/wiki/Autocorrelation#Estimation
n_padded = int(2**np.ceil(1 + np.log2(n_samples)))
freqs = np.fft.rfft(chains - means[:, None], n_padded)
autocov = np.fft.irfft(np.abs(freqs)**2)[:, :n_samples].real
autocov = autocov / np.arange(n_samples, 0, -1)
estimator_sum = 0.
lag = 1
while lag < n_samples:
# estimate multi-chain autocorrelation using variogram
temp = 1. - (var_within - np.mean(autocov[:, lag])) / var_pooled
# only use the first non-negative autocorrelations to avoid noise
if temp >= 0:
estimator_sum += temp
lag += 1
else:
break
ess = n_chains * n_samples / (1. + 2. * estimator_sum)
return ess
def gelman_rubin(chains):
r"""Calculate the Gelman--Rubin convergence statistic.
Also known as the potential scale reduction factor, or \hat{R}.
Uses the split version, as in Stan.
See:
Gelman, Carlin, Stern, Dunson, Vehtari, Rubin: Bayesian Data Analysis, 2013.
Gelman, A. and D. B. Rubin: Inference from iterative simulation using
multiple sequences (with discussion). Statistical Science, 7:457-511, 1992.
Stan modeling language user's guide and reference manual, v. 2.14.0.
Parameters
----------
chains : np.array of shape (M, N)
Samples of a parameter from an MCMC algorithm, 1 row per chain. No burn-in subtracted here!
Returns
-------
psrf : float
Should be below 1.1 to support convergence, or at least below 1.2 for all parameters.
"""
chains = np.atleast_2d(chains)
n_chains, n_samples = chains.shape
# split chains in the middle
n_chains *= 2
n_samples //= 2 # drop 1 if odd
chains = chains[:, :2 * n_samples].reshape((n_chains, n_samples))
means = np.mean(chains, axis=1)
variances = np.var(chains, ddof=1, axis=1)
var_between = n_samples * np.var(means, ddof=1)
var_within = np.mean(variances)
var_pooled = ((n_samples - 1.) * var_within + var_between) / n_samples
# potential scale reduction factor, should be close to 1
psrf = np.sqrt(var_pooled / var_within)
return psrf
def nuts(n_iter,
params0,
target,
grad_target,
n_adapt=None,
target_prob=0.6,
max_depth=5,
seed=0,
info_freq=100,
max_retry_inits=20,
stepsize=None):
r"""Sample the target using the NUTS algorithm.
No-U-Turn Sampler, an improved version of the Hamiltonian (Markov Chain) Monte Carlo sampler.
Based on Algorithm 6 in
Hoffman & Gelman, depthMLR 15, 1351-1381, 2014.
Parameters
----------
n_iter : int
The number of iterations, including n_adapt and possible other warmup iterations.
params0 : np.array
Initial values for sampled parameters.
target : function
The target's log density to sample (possibly unnormalized).
grad_target : function
The gradient of target.
n_adapt : int, optional
The number of automatic adjustments to stepsize. Defaults to n_iter/2.
target_prob : float, optional
Desired average acceptance probability. (Parameter \delta in the original paper.)
max_depth : int, optional
Maximum recursion depth.
seed : int, optional
Seed for pseudo-random number generator.
info_freq : int, optional
How often to log progress to loglevel INFO.
max_retry_inits : int, optional
How many times to retry finding initial stepsize (if stepped outside allowed region).
stepsize : float, optional
Initial stepsize (will be still adapted). Defaults to finding by trial and error.
Returns
-------
samples : np.array
Samples from the MCMC algorithm, including those during adaptation.
"""
random_state = np.random.RandomState(seed)
n_adapt = n_adapt if n_adapt is not None else n_iter // 2
logger.info("NUTS: Performing {} iterations with {} adaptation steps.".format(n_iter, n_adapt))
target0 = target(params0)
if np.isinf(target0):
raise ValueError("NUTS: Bad initialization point {}, logpdf -> -inf.".format(params0))
# ********************************
# Find reasonable initial stepsize
# ********************************
if stepsize is None:
grad0 = grad_target(params0)
logger.debug("NUTS: Trying to find initial stepsize from point {} with gradient {}.".
format(params0, grad0))
init_tries = 0
while init_tries < max_retry_inits: # might step into region unallowed by priors
stepsize = np.exp(-init_tries)
init_tries += 1
momentum0 = random_state.randn(*params0.shape)
# leapfrog
momentum1 = momentum0 + 0.5 * stepsize * grad0
params1 = params0 + stepsize * momentum1
momentum1 += 0.5 * stepsize * grad_target(params1)
joint0 = target0 - 0.5 * momentum0.dot(momentum0)
joint1 = target(params1) - 0.5 * momentum1.dot(momentum1)
if np.isfinite(joint1):
break
else:
if init_tries == max_retry_inits:
raise ValueError(
"NUTS: Cannot find acceptable stepsize starting from point {}. All "
"trials ended in region with 0 probability.".format(params0))
# logger.debug("momentum0 {}, momentum1 {}, params1 {}, joint0 {}, joint1 {}"
# .format(momentum0, momentum1, params1, joint0, joint1))
logger.debug("NUTS: Problem finding acceptable stepsize, now {}. Retrying {}/{}."
.format(stepsize, init_tries, max_retry_inits))
plusminus = 1 if np.exp(joint1 - joint0) > 0.5 else -1
factor = 2. if plusminus == 1 else 0.5
while factor * np.exp(plusminus * (joint1 - joint0)) > 1.:
stepsize *= factor
if stepsize == 0. or stepsize > 1e7: # bounds as in STAN
raise SystemExit("NUTS: Found invalid stepsize {} starting from point {}."
.format(stepsize, params0))
# leapfrog
momentum1 = momentum0 + 0.5 * stepsize * grad0
params1 = params0 + stepsize * momentum1
momentum1 += 0.5 * stepsize * grad_target(params1)
joint1 = target(params1) - 0.5 * momentum1.dot(momentum1)
logger.debug("NUTS: Set initial stepsize {}.".format(stepsize))
# Some parameters from the NUTS paper, used for adapting the stepsize
target_stepsize = np.log(10. * stepsize)
log_avg_stepsize = 0.
accept_ratio = 0. # tends to target_prob
shrinkage = 0.05 # controls shrinkage accept_ratio to target_prob
ii_offset = 10. # stabilizes initialization
discount = -0.75 # reduce weight of past
# ********
# Sampling
# ********
samples = np.empty((n_iter + 1, ) + params0.shape)
samples[0, :] = params0
n_diverged = 0 # counter for proposals whose error diverged
n_outside = 0 # counter for proposals outside priors (pdf=0)
n_total = 0 # total number of proposals
for ii in range(1, n_iter + 1):
momentum0 = random_state.randn(*params0.shape)
samples_prev = samples[ii - 1, :]
log_joint0 = target(samples_prev) - 0.5 * momentum0.dot(momentum0)
log_slicevar = log_joint0 - random_state.exponential()
samples[ii, :] = samples_prev
params_left = samples_prev
params_right = samples_prev
momentum_left = momentum0
momentum_right = momentum0
depth = 0
n_ok = 1
all_ok = True # criteria for no U-turn, diverging error
while all_ok and depth <= max_depth:
direction = 1 if random_state.rand() < 0.5 else -1
if direction == -1:
params_left, momentum_left, _, _, params1, n_sub, sub_ok, mh_ratio, n_steps, \
is_div, is_out = _build_tree_nuts(
params_left, momentum_left, log_slicevar, -stepsize, depth, log_joint0,
target, grad_target, random_state)
else:
_, _, params_right, momentum_right, params1, n_sub, sub_ok, mh_ratio, n_steps, \
is_div, is_out = _build_tree_nuts(
params_right, momentum_right, log_slicevar, stepsize, depth, log_joint0,
target, grad_target, random_state)
if sub_ok == 1:
if random_state.rand() < float(n_sub) / n_ok:
samples[ii, :] = params1 # accept proposal
n_ok += n_sub
if not is_out: # params1 outside allowed region; don't count this as diverging error
n_diverged += is_div
n_outside += is_out
n_total += n_steps
all_ok = sub_ok and ((params_right - params_left).dot(momentum_left) >= 0) \
and ((params_right - params_left).dot(momentum_right) >= 0)
depth += 1
if depth > max_depth:
logger.debug("NUTS: Maximum recursion depth {} exceeded.".format(max_depth))
# adjust stepsize according to target acceptance ratio
if ii <= n_adapt:
accept_ratio = (1. - 1. / (ii + ii_offset)) * accept_ratio \
+ (target_prob - float(mh_ratio) / n_steps) / (ii + ii_offset)
log_stepsize = target_stepsize - np.sqrt(ii) / shrinkage * accept_ratio
log_avg_stepsize = ii ** discount * log_stepsize + \
(1. - ii ** discount) * log_avg_stepsize
stepsize = np.exp(log_stepsize)
elif ii == n_adapt + 1: # adaptation/warmup finished
stepsize = np.exp(log_avg_stepsize) # final stepsize
n_diverged = 0
n_outside = 0
n_total = 0
logger.info("NUTS: Adaptation/warmup finished. Sampling...")
logger.debug("NUTS: Set final stepsize {}.".format(stepsize))
if ii % info_freq == 0 and ii < n_iter:
logger.info("NUTS: Iterations performed: {}/{}...".format(ii, n_iter))
info_str = "NUTS: Acceptance ratio: {:.3f}".format(float(n_iter - n_adapt) / n_total)
if n_outside > 0:
info_str += ". After warmup {} proposals were outside of the region allowed by priors " \
"and rejected, decreasing acceptance ratio.".format(n_outside)
logger.info(info_str)
if n_diverged > 0:
logger.warning("NUTS: Diverged proposals after warmup (i.e. n_adapt={} steps): {}".format(
n_adapt, n_diverged))
return samples[1:, :]
def _build_tree_nuts(params, momentum, log_slicevar, step, depth, log_joint0, target, grad_target,
random_state):
"""Recursively build a balanced binary tree needed by NUTS.
Based on Algorithm 6 in
Hoffman & Gelman, JMLR 15, 1351-1381, 2014.
"""
# Base case: one leapfrog step
if depth == 0:
momentum1 = momentum + 0.5 * step * grad_target(params)
params1 = params + step * momentum1
momentum1 = momentum1 + 0.5 * step * grad_target(params1)
log_joint = target(params1) - 0.5 * momentum1.dot(momentum1)
n_ok = float(log_slicevar <= log_joint)
sub_ok = log_slicevar < (1000. + log_joint) # check for diverging error
is_out = False
if not sub_ok:
if np.isinf(target(params1)): # logpdf(params1) = -inf i.e. pdf(params1) = 0
is_out = True
else:
logger.debug(
"NUTS: Diverging error: log_joint={}, params={}, params1={}, momentum={}, "
"momentum1={}.".format(log_joint, params, params1, momentum, momentum1))
mh_ratio = 0. # reject
else:
mh_ratio = min(1., np.exp(log_joint - log_joint0))
return params1, momentum1, params1, momentum1, params1, n_ok, sub_ok, mh_ratio, 1., \
not sub_ok, is_out
else:
# Recursion to build subtrees, doubling size
params_left, momentum_left, params_right, momentum_right, params1, n_sub, sub_ok, \
mh_ratio, n_steps, is_div, is_out = _build_tree_nuts(
params, momentum, log_slicevar, step, depth - 1, log_joint0, target,
grad_target, random_state)
if sub_ok: # recurse further
if step < 0:
params_left, momentum_left, _, _, params2, n_sub2, sub_ok, mh_ratio2, n_steps2, \
is_div, is_out = _build_tree_nuts(
params_left, momentum_left, log_slicevar,
step, depth - 1, log_joint0, target, grad_target, random_state)
else:
_, _, params_right, momentum_right, params2, n_sub2, sub_ok, mh_ratio2, n_steps2, \
is_div, is_out = _build_tree_nuts(
params_right, momentum_right, log_slicevar,
step, depth - 1, log_joint0, target, grad_target, random_state)
if n_sub2 > 0:
if float(n_sub2) / (n_sub + n_sub2) > random_state.rand():
params1 = params2 # accept move
mh_ratio += mh_ratio2
n_steps += n_steps2
sub_ok = sub_ok and ((params_right - params_left).dot(momentum_left) >= 0) \
and ((params_right - params_left).dot(momentum_right) >= 0)
n_sub += n_sub2
return params_left, momentum_left, params_right, momentum_right, params1, n_sub, sub_ok, \
mh_ratio, n_steps, is_div, is_out
def metropolis(n_samples, params0, target, sigma_proposals, warmup=0, seed=0):
"""Sample the target with a Metropolis Markov Chain Monte Carlo using Gaussian proposals.
Parameters
----------
n_samples : int
The number of requested samples.
params0 : np.array
Initial values for each sampled parameter.
target : function
The target log density to sample (possibly unnormalized).
sigma_proposals : np.array
Standard deviations for Gaussian proposals of each parameter.
warmup : int
Number of warmup samples.
seed : int, optional
Seed for pseudo-random number generator.
Returns
-------
samples : np.array
"""
random_state = np.random.RandomState(seed)
samples = np.empty((n_samples + warmup + 1, ) + params0.shape)
samples[0, :] = params0
target_current = target(params0)
if np.isinf(target_current):
raise ValueError(
"Metropolis: Bad initialization point {},logpdf -> -inf.".format(params0))
n_accepted = 0
for ii in range(1, n_samples + warmup + 1):
samples[ii, :] = samples[ii - 1, :] + sigma_proposals * random_state.randn(*params0.shape)
target_prev = target_current
target_current = target(samples[ii, :])
if ((np.exp(target_current - target_prev) < random_state.rand())
or np.isinf(target_current)
or np.isnan(target_current)): # reject proposal
samples[ii, :] = samples[ii - 1, :]
target_current = target_prev
else:
n_accepted += 1
logger.info(
"{}: Total acceptance ratio: {:.3f}".format(__name__,
float(n_accepted) / (n_samples + warmup)))
return samples[(1 + warmup):, :]
|
the-stack_0_2492 | # The MIT License (MIT)
#
# Copyright (c) 2016 Damien P. George (original Neopixel object)
# Copyright (c) 2017 Ladyada
# Copyright (c) 2017 Scott Shawcroft for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_dotstar` - DotStar strip driver
====================================================
* Author(s): Damien P. George, Limor Fried & Scott Shawcroft
"""
import busio
import digitalio
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_DotStar.git"
START_HEADER_SIZE = 4
LED_START = 0b11100000 # Three "1" bits, followed by 5 brightness bits
# Pixel color order constants
RGB = (0, 1, 2)
RBG = (0, 2, 1)
GRB = (1, 0, 2)
GBR = (1, 2, 0)
BRG = (2, 0, 1)
BGR = (2, 1, 0)
class DotStar:
"""
A sequence of dotstars.
:param ~microcontroller.Pin clock: The pin to output dotstar clock on.
:param ~microcontroller.Pin data: The pin to output dotstar data on.
:param int n: The number of dotstars in the chain
:param float brightness: Brightness of the pixels between 0.0 and 1.0
:param bool auto_write: True if the dotstars should immediately change when
set. If False, `show` must be called explicitly.
:param tuple pixel_order: Set the pixel order on the strip - different
strips implement this differently. If you send red, and it looks blue
or green on the strip, modify this! It should be one of the values above
Example for Gemma M0:
.. code-block:: python
import adafruit_dotstar
import time
from board import *
RED = 0x100000
with adafruit_dotstar.DotStar(APA102_SCK, APA102_MOSI, 1) as pixels:
pixels[0] = RED
time.sleep(2)
"""
def __init__(self, clock, data, n, *, brightness=1.0, auto_write=True, pixel_order=BGR):
self._spi = None
try:
self._spi = busio.SPI(clock, MOSI=data)
while not self._spi.try_lock():
pass
self._spi.configure(baudrate=4000000)
except ValueError:
self.dpin = digitalio.DigitalInOut(data)
self.cpin = digitalio.DigitalInOut(clock)
self.dpin.direction = digitalio.Direction.OUTPUT
self.cpin.direction = digitalio.Direction.OUTPUT
self.cpin.value = False
self._n = n
# Supply one extra clock cycle for each two pixels in the strip.
self.end_header_size = n // 16
if n % 16 != 0:
self.end_header_size += 1
self._buf = bytearray(n * 4 + START_HEADER_SIZE + self.end_header_size)
self.end_header_index = len(self._buf) - self.end_header_size
self.pixel_order = pixel_order
# Four empty bytes to start.
for i in range(START_HEADER_SIZE):
self._buf[i] = 0x00
# Mark the beginnings of each pixel.
for i in range(START_HEADER_SIZE, self.end_header_index, 4):
self._buf[i] = 0xff
# 0xff bytes at the end.
for i in range(self.end_header_index, len(self._buf)):
self._buf[i] = 0xff
self._brightness = 1.0
# Set auto_write to False temporarily so brightness setter does _not_
# call show() while in __init__.
self.auto_write = False
self.brightness = brightness
self.auto_write = auto_write
def deinit(self):
"""Blank out the DotStars and release the resources."""
self.auto_write = False
for i in range(START_HEADER_SIZE, self.end_header_index):
if i % 4 != 0:
self._buf[i] = 0
self.show()
if self._spi:
self._spi.deinit()
else:
self.dpin.deinit()
self.cpin.deinit()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.deinit()
def __repr__(self):
return "[" + ", ".join([str(x) for x in self]) + "]"
def _set_item(self, index, value):
"""
value can be one of three things:
a (r,g,b) list/tuple
a (r,g,b, brightness) list/tuple
a single, longer int that contains RGB values, like 0xFFFFFF
brightness, if specified should be a float 0-1
Set a pixel value. You can set per-pixel brightness here, if it's not passed it
will use the max value for pixel brightness value, which is a good default.
Important notes about the per-pixel brightness - it's accomplished by
PWMing the entire output of the LED, and that PWM is at a much
slower clock than the rest of the LEDs. This can cause problems in
Persistence of Vision Applications
"""
offset = index * 4 + START_HEADER_SIZE
rgb = value
if isinstance(value, int):
rgb = (value >> 16, (value >> 8) & 0xff, value & 0xff)
if len(value) == 4:
brightness = value[3]
# Ignore value[3] below.
else:
brightness = 1
# LED startframe is three "1" bits, followed by 5 brightness bits
# then 8 bits for each of R, G, and B. The order of those 3 are configurable and
# vary based on hardware
# same as math.ceil(brightness * 31) & 0b00011111
# Idea from https://www.codeproject.com/Tips/700780/Fast-floor-ceiling-functions
brightness_byte = 32 - int(32 - brightness * 31) & 0b00011111
self._buf[offset] = brightness_byte | LED_START
self._buf[offset + 1] = rgb[self.pixel_order[0]]
self._buf[offset + 2] = rgb[self.pixel_order[1]]
self._buf[offset + 3] = rgb[self.pixel_order[2]]
def __setitem__(self, index, val):
if isinstance(index, slice):
start, stop, step = index.indices(self._n)
length = stop - start
if step != 0:
# same as math.ceil(length / step)
# Idea from https://fizzbuzzer.com/implement-a-ceil-function/
length = (length + step - 1) // step
if len(val) != length:
raise ValueError("Slice and input sequence size do not match.")
for val_i, in_i in enumerate(range(start, stop, step)):
self._set_item(in_i, val[val_i])
else:
self._set_item(index, val)
if self.auto_write:
self.show()
def __getitem__(self, index):
if isinstance(index, slice):
out = []
for in_i in range(*index.indices(self._n)):
out.append(
tuple(self._buf[in_i * 4 + (3 - i) + START_HEADER_SIZE] for i in range(3)))
return out
if index < 0:
index += len(self)
if index >= self._n or index < 0:
raise IndexError
offset = index * 4
return tuple(self._buf[offset + (3 - i) + START_HEADER_SIZE]
for i in range(3))
def __len__(self):
return self._n
@property
def brightness(self):
"""Overall brightness of the pixel"""
return self._brightness
@brightness.setter
def brightness(self, brightness):
self._brightness = min(max(brightness, 0.0), 1.0)
if self.auto_write:
self.show()
def fill(self, color):
"""Colors all pixels the given ***color***."""
auto_write = self.auto_write
self.auto_write = False
for i in range(self._n):
self[i] = color
if auto_write:
self.show()
self.auto_write = auto_write
def _ds_writebytes(self, buf):
for b in buf:
for _ in range(8):
self.cpin.value = True
self.dpin.value = (b & 0x80)
self.cpin.value = False
b = b << 1
def show(self):
"""Shows the new colors on the pixels themselves if they haven't already
been autowritten.
The colors may or may not be showing after this function returns because
it may be done asynchronously."""
# Create a second output buffer if we need to compute brightness
buf = self._buf
if self.brightness < 1.0:
buf = bytearray(self._buf)
# Four empty bytes to start.
for i in range(START_HEADER_SIZE):
buf[i] = 0x00
for i in range(START_HEADER_SIZE, self.end_header_index):
buf[i] = self._buf[i] if i % 4 == 0 else int(self._buf[i] * self._brightness)
# Four 0xff bytes at the end.
for i in range(self.end_header_index, len(buf)):
buf[i] = 0xff
if self._spi:
self._spi.write(buf)
else:
self._ds_writebytes(buf)
self.cpin.value = False
|
the-stack_0_2494 | # ***************************************************************
# Copyright (c) 2022 Jittor. All Rights Reserved.
# Maintainers:
# Zheng-Ning Liu <[email protected]>
# Dun Liang <[email protected]>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import unittest
import numpy as np
try:
import torch
from emd import earth_mover_distance as TEMD
except:
skip_this_test = True
import jittor as jt
from jittor.loss3d import chamfer_loss
from jittor.loss3d import earth_mover_distance
class TestLoss3d(unittest.TestCase):
def test_chamfer(self):
def test():
pc1 = np.random.randn(10, 100, 3).astype(np.float32)
pc2 = np.random.randn(10, 100, 3).astype(np.float32)
Jpc1 = jt.array(pc1)
Jpc2 = jt.array(pc2)
Jcf = chamfer_loss(Jpc1, Jpc2, dims='BNC')
ppc1 = np.repeat(pc1[:, :, None, :], 100, axis=2)
ppc2 = np.repeat(pc2[:, None, :, :], 100, axis=1)
ncf = np.sqrt(((ppc1 - ppc2) ** 2).sum(axis=-1)).min(axis=-1)
ncf = ncf.mean()
self.assertTrue(np.allclose(ncf, Jcf.item()))
test()
if jt.has_cuda:
with jt.flag_scope(use_cuda=1):
test()
def test_chamfer_dims(self):
def test():
pc1 = np.random.randn(10, 100, 3).astype(np.float32)
pc2 = np.random.randn(10, 100, 3).astype(np.float32)
Jpc1 = jt.array(pc1.transpose([0, 2, 1]))
Jpc2 = jt.array(pc2.transpose([0, 2, 1]))
Jcf = chamfer_loss(Jpc1, Jpc2, dims='BCN')
ppc1 = np.repeat(pc1[:, :, None, :], 100, axis=2)
ppc2 = np.repeat(pc2[:, None, :, :], 100, axis=1)
ncf = np.sqrt(((ppc1 - ppc2) ** 2).sum(axis=-1)).min(axis=-1)
ncf = ncf.mean()
self.assertTrue(np.allclose(ncf, Jcf.item()))
test()
if jt.has_cuda:
with jt.flag_scope(use_cuda=1):
test()
@unittest.skipIf(skip_this_test, "No Pyorch_EMD found")
def test_emd_torch(self):
if jt.has_cuda:
jt.flags.use_cuda = True
pc1 = np.random.randn(10, 100, 3).astype(np.float32)
pc2 = np.random.randn(10, 50, 3).astype(np.float32)
Tpc1 = torch.from_numpy(pc1).cuda()
Tpc2 = torch.from_numpy(pc2).cuda()
Tpc1.requires_grad = True
Tpc2.requires_grad = True
Temdcost = TEMD(Tpc1, Tpc2, transpose=False)
Temd = Temdcost.mean()
Jpc1 = jt.array(pc1)
Jpc2 = jt.array(pc2)
Jemd = earth_mover_distance(Jpc1, Jpc2, dims='BNC')
Temd.backward()
Tgrad1 = Tpc1.grad.cpu().numpy()
Tgrad2 = Tpc2.grad.cpu().numpy()
Jgrad1, Jgrad2 = jt.grad(Jemd, [Jpc1, Jpc2])
self.assertTrue(np.allclose(Temd.item(), Jemd.item()), Temd.item() - Jemd.item())
self.assertTrue(np.allclose(Tgrad1, Jgrad1.data, atol=1e-4), np.abs(Tgrad1 - Jgrad1.data).max())
self.assertTrue(np.allclose(Tgrad2, Jgrad2.data, atol=1e-4), np.abs(Tgrad2 - Jgrad2.data).max())
if __name__ == '__main__':
unittest.main() |
the-stack_0_2495 | import streamlit as st
import pandas as pd
import numpy as np
import folium
import geopandas
import plotly.express as px
from streamlit_folium import folium_static
from folium.plugins import MarkerCluster
from datetime import datetime
st.set_page_config(layout='wide')
@st.cache(allow_output_mutation=True)
def get_data(path):
data = pd.read_csv(path)
return data
@st.cache(allow_output_mutation=True)
def get_geofile(url):
geofile = geopandas.read_file(url)
return geofile
#get data
path='Datasets/kc_house_data.csv'
data = get_data(path)
#get geofile
url='https://opendata.arcgis.com/datasets/83fc2e72903343aabff6de8cb445b81c_2.geojson'
geofile = get_geofile(url)
#add new features
data['price_m2']=data['price']/data['sqft_lot']
#===============================
#Data Overview
#===============================
f_attributes=st.sidebar.multiselect('Enter columns',data.columns)
f_zipcode=st.sidebar.multiselect('Enter Zip Code',data['zipcode'].unique())
st.title('Data Overview')
if (f_zipcode !=[])&(f_attributes !=[]):
data=data.loc[data['zipcode'].isin(f_zipcode),f_attributes]
elif (f_zipcode ==[])&(f_attributes !=[]):
data=data.loc[:,f_attributes]
elif (f_zipcode != []) & (f_attributes == []):
data = data.loc[data['zipcode'].isin(f_zipcode), :]
else:
data=data.copy()
st.dataframe(data)
c1,c2=st.beta_columns((1,1))
#Average metrics
df1=data[['id','zipcode']].groupby('zipcode').count().reset_index()
df2=data[['price','zipcode']].groupby('zipcode').mean().reset_index()
df3=data[['sqft_living','zipcode']].groupby('zipcode').mean().reset_index()
df4=data[['price_m2','zipcode']].groupby('zipcode').mean().reset_index()
# merge
m1=pd.merge(df1,df2,on='zipcode',how='inner')
m2=pd.merge(m1,df3,on='zipcode',how='inner')
df=pd.merge(m2,df4,on='zipcode',how='inner')
df.columns=['ZIPCODE','TOTAL HOUSES','PRICE','SQRT LIVING','PRICE/M2']
c1.header('Average Values')
c1.dataframe(df,height=600)
#Statistic Descriptive
num_attributes = data.select_dtypes(include=['int64','float64'])
media = pd.DataFrame(num_attributes.apply(np.mean))
mediana = pd.DataFrame(num_attributes.apply(np.median))
std = pd.DataFrame(num_attributes.apply(np.std))
max_= pd.DataFrame(num_attributes.apply(np.max))
min_= pd.DataFrame(num_attributes.apply(np.min))
df1=pd.concat([max_,min_,media,mediana,std],axis=1).reset_index()
df1.columns=['attributes','max','min','mean','median','std']
c2.header('Descritive Analysis')
c2.dataframe(df1,height=600)
#===============================
# Densidade de Portfolio
#===============================
st.title('Region Overview')
c1,c2=st.beta_columns((1,2))
c1.header('Portfolio Density')
df=data.sample(10)
#Base Map - Folium
density_map=folium.Map(location=[data['lat'].mean(),
data['long'].mean()],
default_zoom_start=25)
marker_cluster=MarkerCluster().add_to(density_map)
for name, row in df.iterrows():
folium.Marker([row['lat'], row['long']],
popup='Sold R${0} on: {1}. Features: {2} sqft,{3} bedrooms,'
'{4} bathrooms, year built: {5}'.format( row['price'],
row['date'],
row['sqft_living'],
row['bedrooms'],
row['bathrooms'],
row['yr_built'])).add_to(marker_cluster)
with c1:
folium_static(density_map)
############################
#Region Price Map
############################
c2.header('Price Density')
df=data[['price','zipcode']].groupby('zipcode').mean().reset_index()
df.columns=['ZIP','PRICE']
#df=df.sample(50)
geofile=geofile[geofile['ZIP'].isin(df['ZIP'].tolist())]
region_price_map=density_map=folium.Map(location=[data['lat'].mean(),
data['long'].mean()],
default_zoom_start=15)
region_price_map.choropleth(data = df,
geo_data=geofile,
columns=['ZIP','PRICE'],
key_on='feature.properties.ZIP',
fill_color='YlOrRd',
fill_opacity=0.7,
line_opacity=0.2,
legend_name='AVG PRICE')
with c2:
folium_static(region_price_map)
# ====================================================================================
# Distribuicao dos imoveis por categoria
# ====================================================================================
st.sidebar.title('Commercial Options')
st.title('Commercial Attributes')
# ----------Average Price per Year
data['date']=pd.to_datetime(data['date']).dt.strftime('%Y-%m-%d')
# filters
min_year_built = int( data['yr_built'].min())
max_year_built = int( data['yr_built'].max())
st.sidebar.subheader('Select Max Year Built')
f_year_built = st.sidebar.slider('Year Built', min_year_built,max_year_built,min_year_built)
st.header('Average Price per Year Built')
# data selection
df=data.loc[data['yr_built']<f_year_built]
df=df[['yr_built','price']].groupby('yr_built').mean().reset_index()
# plot
fig=px.line(df,x='yr_built',y='price')
st.plotly_chart(fig,use_container_width=True)
# ----------Average Price per Day
st.sidebar.title('Average Price per Day')
st.sidebar.subheader('Select Max Date')
# filters
min_date = datetime.strptime(data['date'].min(),'%Y-%m-%d')
max_date = datetime.strptime(data['date'].max(),'%Y-%m-%d')
f_date = st.sidebar.slider('Date', min_date,max_date,min_date)
# data selection
data['date']=pd.to_datetime(data['date'])
df = data.loc[data['date']<f_date]
df = df[['date','price']].groupby('date').mean().reset_index()
# plot
fig=px.line(df,x='date',y='price')
st.plotly_chart(fig,use_container_width=True)
# ----------Histograma
st.sidebar.title('Price Distribution')
st.sidebar.subheader('Select Max Price')
# filter
min_price=int(data['price'].min())
max_price=int(data['price'].max())
avg_price=int(data['price'].mean())
f_price = st.sidebar.slider('Price', min_price,max_price,avg_price)
df = data.loc[data['price']<f_price]
# plot
fig=px.histogram(df,x='price',nbins=50)
st.plotly_chart(fig,use_container_width=True)
#===================================================
# Distribuicao dos imoveis por categorias fisicas
#===================================================
st.sidebar.title('Attributes Options')
st.title('House Attributes')
# filters
f_bedrooms = st.sidebar.selectbox('Max_number_bedrooms',sorted(set(data['bedrooms'].unique())))
f_bathrooms = st.sidebar.selectbox('Max_number_bathrooms',sorted(set(data['bathrooms'].unique())))
c1, c2 = st.beta_columns(2)
# House per Bedrooms
c1.header('Houses per bedrooms')
df=data[data['bedrooms']<=f_bedrooms]
fig=px.histogram(df,x='bedrooms',nbins=19)
c1.plotly_chart(fig,use_container_width=True)
# House per Bathrooms
c2.header('Houses per bathrooms')
df=data[data['bathrooms']<=f_bathrooms]
fig=px.histogram(df,x='bathrooms',nbins=19)
c2.plotly_chart(fig,use_container_width=True)
# filters
f_floors = st.sidebar.selectbox('Max_number_floors',sorted(set(data['floors'].unique())))
f_water = st.sidebar.checkbox('Only water view')
c1, c2 = st.beta_columns(2)
# House per Floors
c1.header('Houses per floors')
df=data[data['floors']<=f_floors]
#plot
fig=px.histogram(df,x='floors',nbins=19)
c1.plotly_chart(fig,use_container_width=True)
# House per Water View
c2.header('Houses with Water View')
if f_water:
df=data[data['waterfront']==1]
else:
df=data.copy()
fig = px.histogram(df,x='waterfront',nbins=10)
c2.plotly_chart(fig,use_container_width=True)
|
the-stack_0_2496 | import asyncio
import email.message
import enum
import inspect
import json
from typing import (
Any,
Callable,
Coroutine,
Dict,
List,
Optional,
Sequence,
Set,
Type,
Union,
)
from fastapi import params
from fastapi.datastructures import Default, DefaultPlaceholder
from fastapi.dependencies.models import Dependant
from fastapi.dependencies.utils import (
get_body_field,
get_dependant,
get_parameterless_sub_dependant,
solve_dependencies,
)
from fastapi.encoders import DictIntStrAny, SetIntStr, jsonable_encoder
from fastapi.exceptions import RequestValidationError, WebSocketRequestValidationError
from fastapi.openapi.constants import STATUS_CODES_WITH_NO_BODY
from fastapi.types import DecoratedCallable
from fastapi.utils import (
create_cloned_field,
create_response_field,
generate_operation_id_for_path,
get_value_or_default,
)
from pydantic import BaseModel
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from pydantic.fields import ModelField, Undefined
from starlette import routing
from starlette.concurrency import run_in_threadpool
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from starlette.routing import BaseRoute
from starlette.routing import Mount # noqa
from starlette.routing import (
compile_path,
get_name,
request_response,
websocket_session,
)
from starlette.status import WS_1008_POLICY_VIOLATION
from starlette.types import ASGIApp
from starlette.websockets import WebSocket
def _prepare_response_content(
res: Any,
*,
exclude_unset: bool,
exclude_defaults: bool = False,
exclude_none: bool = False,
) -> Any:
if isinstance(res, BaseModel):
return res.dict(
by_alias=True,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
if isinstance(res, list):
return [
_prepare_response_content(
item,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for item in res
]
if isinstance(res, dict):
return {
k: _prepare_response_content(
v,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
for k, v in res.items()
}
return res
async def serialize_response(
*,
field: Optional[ModelField] = None,
response_content: Any,
include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
by_alias: bool = True,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
is_coroutine: bool = True,
) -> Any:
if field:
errors = []
response_content = _prepare_response_content(
response_content,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
if is_coroutine:
value, errors_ = field.validate(response_content, {}, loc=("response",))
else:
value, errors_ = await run_in_threadpool(
field.validate, response_content, {}, loc=("response",)
)
if isinstance(errors_, ErrorWrapper):
errors.append(errors_)
elif isinstance(errors_, list):
errors.extend(errors_)
if errors:
raise ValidationError(errors, field.type_)
return jsonable_encoder(
value,
include=include,
exclude=exclude,
by_alias=by_alias,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
)
return jsonable_encoder(response_content)
async def run_endpoint_function(
*, dependant: Dependant, values: Dict[str, Any], is_coroutine: bool
) -> Any:
# Only called by get_request_handler. Has been split into its own function to
# facilitate profiling endpoints, since inner functions are harder to profile.
assert dependant.call is not None, "dependant.call must be a function"
if is_coroutine:
return await dependant.call(**values)
return await run_in_threadpool(dependant.call, **values)
def get_request_handler(
dependant: Dependant,
body_field: Optional[ModelField] = None,
status_code: Optional[int] = None,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(JSONResponse),
response_field: Optional[ModelField] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
dependency_overrides_provider: Optional[Any] = None,
) -> Callable[[Request], Coroutine[Any, Any, Response]]:
assert dependant.call is not None, "dependant.call must be a function"
is_coroutine = asyncio.iscoroutinefunction(dependant.call)
is_body_form = body_field and isinstance(body_field.field_info, params.Form)
if isinstance(response_class, DefaultPlaceholder):
actual_response_class: Type[Response] = response_class.value
else:
actual_response_class = response_class
async def app(request: Request) -> Response:
try:
body: Any = None
if body_field:
if is_body_form:
body = await request.form()
else:
body_bytes = await request.body()
if body_bytes:
json_body: Any = Undefined
content_type_value = request.headers.get("content-type")
if not content_type_value:
json_body = await request.json()
else:
message = email.message.Message()
message["content-type"] = content_type_value
if message.get_content_maintype() == "application":
subtype = message.get_content_subtype()
if subtype == "json" or subtype.endswith("+json"):
json_body = await request.json()
if json_body != Undefined:
body = json_body
else:
body = body_bytes
except json.JSONDecodeError as e:
raise RequestValidationError([ErrorWrapper(e, ("body", e.pos))], body=e.doc)
except Exception as e:
raise HTTPException(
status_code=400, detail="There was an error parsing the body"
) from e
solved_result = await solve_dependencies(
request=request,
dependant=dependant,
body=body,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, background_tasks, sub_response, _ = solved_result
if errors:
raise RequestValidationError(errors, body=body)
raw_response = await run_endpoint_function(
dependant=dependant, values=values, is_coroutine=is_coroutine
)
if isinstance(raw_response, Response):
if raw_response.background is None:
raw_response.background = background_tasks
return raw_response
response_data = await serialize_response(
field=response_field,
response_content=raw_response,
include=response_model_include,
exclude=response_model_exclude,
by_alias=response_model_by_alias,
exclude_unset=response_model_exclude_unset,
exclude_defaults=response_model_exclude_defaults,
exclude_none=response_model_exclude_none,
is_coroutine=is_coroutine,
)
response_args: Dict[str, Any] = {"background": background_tasks}
# If status_code was set, use it, otherwise use the default from the
# response class, in the case of redirect it's 307
if status_code is not None:
response_args["status_code"] = status_code
response = actual_response_class(response_data, **response_args)
response.headers.raw.extend(sub_response.headers.raw)
if sub_response.status_code:
response.status_code = sub_response.status_code
return response
return app
def get_websocket_app(
dependant: Dependant, dependency_overrides_provider: Optional[Any] = None
) -> Callable[[WebSocket], Coroutine[Any, Any, Any]]:
async def app(websocket: WebSocket) -> None:
solved_result = await solve_dependencies(
request=websocket,
dependant=dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
values, errors, _, _2, _3 = solved_result
if errors:
await websocket.close(code=WS_1008_POLICY_VIOLATION)
raise WebSocketRequestValidationError(errors)
assert dependant.call is not None, "dependant.call must be a function"
await dependant.call(**values)
return app
class APIWebSocketRoute(routing.WebSocketRoute):
def __init__(
self,
path: str,
endpoint: Callable[..., Any],
*,
name: Optional[str] = None,
dependency_overrides_provider: Optional[Any] = None,
) -> None:
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.dependant = get_dependant(path=path, call=self.endpoint)
self.app = websocket_session(
get_websocket_app(
dependant=self.dependant,
dependency_overrides_provider=dependency_overrides_provider,
)
)
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
class APIRoute(routing.Route):
def __init__(
self,
path: str,
endpoint: Callable[..., Any],
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
name: Optional[str] = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
dependency_overrides_provider: Optional[Any] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> None:
# normalise enums e.g. http.HTTPStatus
if isinstance(status_code, enum.IntEnum):
status_code = int(status_code)
self.path = path
self.endpoint = endpoint
self.name = get_name(endpoint) if name is None else name
self.path_regex, self.path_format, self.param_convertors = compile_path(path)
if methods is None:
methods = ["GET"]
self.methods: Set[str] = {method.upper() for method in methods}
self.unique_id = generate_operation_id_for_path(
name=self.name, path=self.path_format, method=list(methods)[0]
)
self.response_model = response_model
if self.response_model:
assert (
status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {status_code} must not have a response body"
response_name = "Response_" + self.unique_id
self.response_field = create_response_field(
name=response_name, type_=self.response_model
)
# Create a clone of the field, so that a Pydantic submodel is not returned
# as is just because it's an instance of a subclass of a more limited class
# e.g. UserInDB (containing hashed_password) could be a subclass of User
# that doesn't have the hashed_password. But because it's a subclass, it
# would pass the validation and be returned as is.
# By being a new field, no inheritance will be passed as is. A new model
# will be always created.
self.secure_cloned_response_field: Optional[
ModelField
] = create_cloned_field(self.response_field)
else:
self.response_field = None # type: ignore
self.secure_cloned_response_field = None
self.status_code = status_code
self.tags = tags or []
if dependencies:
self.dependencies = list(dependencies)
else:
self.dependencies = []
self.summary = summary
self.description = description or inspect.cleandoc(self.endpoint.__doc__ or "")
# if a "form feed" character (page break) is found in the description text,
# truncate description text to the content preceding the first "form feed"
self.description = self.description.split("\f")[0]
self.response_description = response_description
self.responses = responses or {}
response_fields = {}
for additional_status_code, response in self.responses.items():
assert isinstance(response, dict), "An additional response must be a dict"
model = response.get("model")
if model:
assert (
additional_status_code not in STATUS_CODES_WITH_NO_BODY
), f"Status code {additional_status_code} must not have a response body"
response_name = f"Response_{additional_status_code}_{self.unique_id}"
response_field = create_response_field(name=response_name, type_=model)
response_fields[additional_status_code] = response_field
if response_fields:
self.response_fields: Dict[Union[int, str], ModelField] = response_fields
else:
self.response_fields = {}
self.deprecated = deprecated
self.operation_id = operation_id
self.response_model_include = response_model_include
self.response_model_exclude = response_model_exclude
self.response_model_by_alias = response_model_by_alias
self.response_model_exclude_unset = response_model_exclude_unset
self.response_model_exclude_defaults = response_model_exclude_defaults
self.response_model_exclude_none = response_model_exclude_none
self.include_in_schema = include_in_schema
self.response_class = response_class
assert callable(endpoint), "An endpoint must be a callable"
self.dependant = get_dependant(path=self.path_format, call=self.endpoint)
for depends in self.dependencies[::-1]:
self.dependant.dependencies.insert(
0,
get_parameterless_sub_dependant(depends=depends, path=self.path_format),
)
self.body_field = get_body_field(dependant=self.dependant, name=self.unique_id)
self.dependency_overrides_provider = dependency_overrides_provider
self.callbacks = callbacks
self.app = request_response(self.get_route_handler())
def get_route_handler(self) -> Callable[[Request], Coroutine[Any, Any, Response]]:
return get_request_handler(
dependant=self.dependant,
body_field=self.body_field,
status_code=self.status_code,
response_class=self.response_class,
response_field=self.secure_cloned_response_field,
response_model_include=self.response_model_include,
response_model_exclude=self.response_model_exclude,
response_model_by_alias=self.response_model_by_alias,
response_model_exclude_unset=self.response_model_exclude_unset,
response_model_exclude_defaults=self.response_model_exclude_defaults,
response_model_exclude_none=self.response_model_exclude_none,
dependency_overrides_provider=self.dependency_overrides_provider,
)
class APIRouter(routing.Router):
def __init__(
self,
*,
prefix: str = "",
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
routes: Optional[List[routing.BaseRoute]] = None,
redirect_slashes: bool = True,
default: Optional[ASGIApp] = None,
dependency_overrides_provider: Optional[Any] = None,
route_class: Type[APIRoute] = APIRoute,
on_startup: Optional[Sequence[Callable[[], Any]]] = None,
on_shutdown: Optional[Sequence[Callable[[], Any]]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
) -> None:
super().__init__(
routes=routes, # type: ignore # in Starlette
redirect_slashes=redirect_slashes,
default=default, # type: ignore # in Starlette
on_startup=on_startup, # type: ignore # in Starlette
on_shutdown=on_shutdown, # type: ignore # in Starlette
)
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
self.prefix = prefix
self.tags: List[str] = tags or []
self.dependencies = list(dependencies or []) or []
self.deprecated = deprecated
self.include_in_schema = include_in_schema
self.responses = responses or {}
self.callbacks = callbacks or []
self.dependency_overrides_provider = dependency_overrides_provider
self.route_class = route_class
self.default_response_class = default_response_class
def add_api_route(
self,
path: str,
endpoint: Callable[..., Any],
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[Union[Set[str], List[str]]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Union[Type[Response], DefaultPlaceholder] = Default(
JSONResponse
),
name: Optional[str] = None,
route_class_override: Optional[Type[APIRoute]] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> None:
route_class = route_class_override or self.route_class
responses = responses or {}
combined_responses = {**self.responses, **responses}
current_response_class = get_value_or_default(
response_class, self.default_response_class
)
current_tags = self.tags.copy()
if tags:
current_tags.extend(tags)
current_dependencies = self.dependencies.copy()
if dependencies:
current_dependencies.extend(dependencies)
current_callbacks = self.callbacks.copy()
if callbacks:
current_callbacks.extend(callbacks)
route = route_class(
self.prefix + path,
endpoint=endpoint,
response_model=response_model,
status_code=status_code,
tags=current_tags,
dependencies=current_dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=combined_responses,
deprecated=deprecated or self.deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema and self.include_in_schema,
response_class=current_response_class,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
callbacks=current_callbacks,
)
self.routes.append(route)
def api_route(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_route(
path,
func,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=methods,
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
return func
return decorator
def add_api_websocket_route(
self, path: str, endpoint: Callable[..., Any], name: Optional[str] = None
) -> None:
route = APIWebSocketRoute(
path,
endpoint=endpoint,
name=name,
dependency_overrides_provider=self.dependency_overrides_provider,
)
self.routes.append(route)
def websocket(
self, path: str, name: Optional[str] = None
) -> Callable[[DecoratedCallable], DecoratedCallable]:
def decorator(func: DecoratedCallable) -> DecoratedCallable:
self.add_api_websocket_route(path, func, name=name)
return func
return decorator
def include_router(
self,
router: "APIRouter",
*,
prefix: str = "",
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
default_response_class: Type[Response] = Default(JSONResponse),
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
callbacks: Optional[List[BaseRoute]] = None,
deprecated: Optional[bool] = None,
include_in_schema: bool = True,
) -> None:
if prefix:
assert prefix.startswith("/"), "A path prefix must start with '/'"
assert not prefix.endswith(
"/"
), "A path prefix must not end with '/', as the routes will start with '/'"
else:
for r in router.routes:
path = getattr(r, "path")
name = getattr(r, "name", "unknown")
if path is not None and not path:
raise Exception(
f"Prefix and path cannot be both empty (path operation: {name})"
)
if responses is None:
responses = {}
for route in router.routes:
if isinstance(route, APIRoute):
combined_responses = {**responses, **route.responses}
use_response_class = get_value_or_default(
route.response_class,
router.default_response_class,
default_response_class,
self.default_response_class,
)
current_tags = []
if tags:
current_tags.extend(tags)
if route.tags:
current_tags.extend(route.tags)
current_dependencies: List[params.Depends] = []
if dependencies:
current_dependencies.extend(dependencies)
if route.dependencies:
current_dependencies.extend(route.dependencies)
current_callbacks = []
if callbacks:
current_callbacks.extend(callbacks)
if route.callbacks:
current_callbacks.extend(route.callbacks)
self.add_api_route(
prefix + route.path,
route.endpoint,
response_model=route.response_model,
status_code=route.status_code,
tags=current_tags,
dependencies=current_dependencies,
summary=route.summary,
description=route.description,
response_description=route.response_description,
responses=combined_responses,
deprecated=route.deprecated or deprecated or self.deprecated,
methods=route.methods,
operation_id=route.operation_id,
response_model_include=route.response_model_include,
response_model_exclude=route.response_model_exclude,
response_model_by_alias=route.response_model_by_alias,
response_model_exclude_unset=route.response_model_exclude_unset,
response_model_exclude_defaults=route.response_model_exclude_defaults,
response_model_exclude_none=route.response_model_exclude_none,
include_in_schema=route.include_in_schema
and self.include_in_schema
and include_in_schema,
response_class=use_response_class,
name=route.name,
route_class_override=type(route),
callbacks=current_callbacks,
)
elif isinstance(route, routing.Route):
methods = list(route.methods or []) # type: ignore # in Starlette
self.add_route(
prefix + route.path,
route.endpoint,
methods=methods,
include_in_schema=route.include_in_schema,
name=route.name,
)
elif isinstance(route, APIWebSocketRoute):
self.add_api_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
elif isinstance(route, routing.WebSocketRoute):
self.add_websocket_route(
prefix + route.path, route.endpoint, name=route.name
)
for handler in router.on_startup:
self.add_event_handler("startup", handler)
for handler in router.on_shutdown:
self.add_event_handler("shutdown", handler)
def get(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["GET"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def put(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["PUT"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def post(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["POST"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def delete(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["DELETE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def options(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["OPTIONS"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def head(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["HEAD"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def patch(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["PATCH"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
def trace(
self,
path: str,
*,
response_model: Optional[Type[Any]] = None,
status_code: Optional[int] = None,
tags: Optional[List[str]] = None,
dependencies: Optional[Sequence[params.Depends]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = "Successful Response",
responses: Optional[Dict[Union[int, str], Dict[str, Any]]] = None,
deprecated: Optional[bool] = None,
operation_id: Optional[str] = None,
response_model_include: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_exclude: Optional[Union[SetIntStr, DictIntStrAny]] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
response_class: Type[Response] = Default(JSONResponse),
name: Optional[str] = None,
callbacks: Optional[List[BaseRoute]] = None,
) -> Callable[[DecoratedCallable], DecoratedCallable]:
return self.api_route(
path=path,
response_model=response_model,
status_code=status_code,
tags=tags,
dependencies=dependencies,
summary=summary,
description=description,
response_description=response_description,
responses=responses,
deprecated=deprecated,
methods=["TRACE"],
operation_id=operation_id,
response_model_include=response_model_include,
response_model_exclude=response_model_exclude,
response_model_by_alias=response_model_by_alias,
response_model_exclude_unset=response_model_exclude_unset,
response_model_exclude_defaults=response_model_exclude_defaults,
response_model_exclude_none=response_model_exclude_none,
include_in_schema=include_in_schema,
response_class=response_class,
name=name,
callbacks=callbacks,
)
|
the-stack_0_2503 | import asyncio
import re
import subprocess
import time
from dataclasses import replace
from pathlib import Path
from typing import Any, AsyncIterator, Set
from uuid import uuid4 as uuid
import aiodocker
import pytest
from yarl import URL
from neuro_sdk import CONFIG_ENV_NAME, DEFAULT_CONFIG_PATH, JobStatus
from tests.e2e import Helper, make_image_name
def parse_docker_ls_output(docker_ls_output: Any) -> Set[str]:
return {
repo_tag
for info in docker_ls_output
if info["RepoTags"] is not None
for repo_tag in info["RepoTags"]
if repo_tag
}
@pytest.fixture()
def tag() -> str:
return str(uuid())
async def generate_image(docker: aiodocker.Docker, tag: str) -> str:
name = make_image_name()
image_archive = Path(__file__).parent / "assets/echo-tag.tar"
# TODO use random image name here
image_name = f"{name}:{tag}"
with image_archive.open(mode="r+b") as fileobj:
await docker.images.build(
fileobj=fileobj, tag=image_name, buildargs={"TAG": tag}, encoding="identity"
)
return image_name
@pytest.fixture()
async def image(docker: aiodocker.Docker, tag: str) -> AsyncIterator[str]:
image = await generate_image(docker, tag)
yield image
await docker.images.delete(image, force=True)
@pytest.mark.e2e
def test_images_complete_lifecycle(
helper: Helper,
image: str,
tag: str,
event_loop: asyncio.AbstractEventLoop,
docker: aiodocker.Docker,
) -> None:
# Let`s push image
captured = helper.run_cli(["image", "push", image])
# stderr has "Used image ..." lines
# assert not captured.err
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
assert captured.out.endswith(image_full_str)
image_url = URL(image_full_str)
# Check if image available on registry
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
image_short_str = f"image:{image}"
assert captured.out.endswith(image_full_str)
image_full_str_no_tag = image_full_str.replace(f":{tag}", "")
image_short_str_no_tag = image_short_str.replace(f":{tag}", "")
# check ls short mode
captured = helper.run_cli(["image", "ls"])
assert image_short_str_no_tag in [
line.strip() for line in captured.out.splitlines()
]
captured = helper.run_cli(["image", "ls", "--full-uri"])
assert image_full_str_no_tag in [line.strip() for line in captured.out.splitlines()]
# check ls long mode
captured = helper.run_cli(["image", "ls", "-l"])
for line in captured.out.splitlines():
if image_short_str_no_tag in line:
break
else:
assert False, f"Not found {image_short_str_no_tag} in {captured.out}"
# delete local
event_loop.run_until_complete(docker.images.delete(image, force=True))
docker_ls_output = event_loop.run_until_complete(docker.images.list())
local_images = parse_docker_ls_output(docker_ls_output)
assert image not in local_images
# Pull image as with another tag
captured = helper.run_cli(["image", "pull", f"image:{image}"])
# stderr has "Used image ..." lines
# assert not captured.err
assert captured.out.endswith(image)
# check pulled locally, delete for cleanup
docker_ls_output = event_loop.run_until_complete(docker.images.list())
local_images = parse_docker_ls_output(docker_ls_output)
assert image in local_images
# Execute image and check result
captured = helper.run_cli(["-q", "run", "--no-wait-start", str(image_url)])
assert not captured.err
job_id = captured.out
assert job_id.startswith("job-")
helper.wait_job_change_state_to(job_id, JobStatus.SUCCEEDED, JobStatus.FAILED)
helper.check_job_output(job_id, re.escape(tag))
@pytest.mark.e2e
def test_image_tags(helper: Helper, image: str, tag: str) -> None:
# push image
captured = helper.run_cli(["image", "push", image])
image_full_str = f"image://{helper.cluster_name}/{helper.username}/{image}"
assert captured.out.endswith(image_full_str)
image_full_str_no_tag = image_full_str.replace(f":{tag}", "")
delay = 0
t0 = time.time()
while time.time() - t0 < 600:
time.sleep(delay)
# check the tag is present now
try:
captured = helper.run_cli(
["image", "tags", image_full_str_no_tag], timeout=300
)
except subprocess.TimeoutExpired:
continue
if tag in map(lambda s: s.strip(), captured.out.splitlines()):
break
# Give a chance to sync remote registries
delay = min(delay * 2 + 1, 15)
else:
raise AssertionError(
f"Delay is reached on waiting for tag {tag} in {captured.out}"
)
cmd = f"neuro image tags {image_full_str}"
result = subprocess.run(cmd, capture_output=True, shell=True)
assertion_msg = f"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}"
assert result.returncode, assertion_msg
image_full_str_latest_tag = image_full_str.replace(f":{tag}", ":latest")
cmd = f"neuro image tags {image_full_str_latest_tag}"
result = subprocess.run(cmd, capture_output=True, shell=True)
assertion_msg = f"Command {cmd} should fail: {result.stdout!r} {result.stderr!r}"
assert result.returncode, assertion_msg
@pytest.mark.e2e
async def test_images_delete(
helper: Helper,
docker: aiodocker.Docker,
) -> None:
image_ref = await generate_image(docker, tag="latest")
name, _ = image_ref.split(":")
img_name = f"image:{name}"
helper.run_cli(["image", "push", name + ":latest"])
captured = helper.run_cli(["-q", "image", "ls"])
assert img_name in captured.out
helper.run_cli(["image", "rm", img_name])
for _ in range(10):
captured = helper.run_cli(["-q", "image", "ls"])
if img_name in captured.out:
time.sleep(5)
else:
break
assert img_name not in captured.out
@pytest.mark.e2e
async def test_images_push_with_specified_name(
helper: Helper,
image: str,
tag: str,
event_loop: asyncio.AbstractEventLoop,
docker: aiodocker.Docker,
) -> None:
# Let`s push image
image_no_tag = image.replace(f":{tag}", "")
pushed_no_tag = f"{image_no_tag}-pushed"
pulled_no_tag = f"{image_no_tag}-pulled"
pulled = f"{pulled_no_tag}:{tag}"
captured = helper.run_cli(["image", "push", image, f"image:{pushed_no_tag}:{tag}"])
# stderr has "Used image ..." lines
# assert not captured.err
async with helper.client() as client:
image_pushed_full_str = (
f"image://{client.config.cluster_name}/"
f"{client.config.username}/{pushed_no_tag}:{tag}"
)
assert captured.out.endswith(image_pushed_full_str)
# Check if image available on registry
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled not in local_images
async with helper.client() as client:
image_pushed_full = client.parse.remote_image(image_pushed_full_str)
image_url_without_tag = replace(image_pushed_full, tag=None)
imgs = await client.images.list()
assert image_url_without_tag in imgs
# check locally
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled not in local_images
# Pull image as with another name
captured = helper.run_cli(["image", "pull", f"image:{pushed_no_tag}:{tag}", pulled])
# stderr has "Used image ..." lines
# assert not captured.err
assert captured.out.endswith(pulled)
# check locally
docker_ls_output = await docker.images.list()
local_images = parse_docker_ls_output(docker_ls_output)
assert pulled in local_images
# TODO (A.Yushkovskiy): delete the pushed image in GCR
# delete locally
await docker.images.delete(pulled, force=True)
@pytest.mark.e2e
def test_docker_helper(
helper: Helper, image: str, tag: str, nmrc_path: Path, monkeypatch: Any
) -> None:
monkeypatch.setenv(CONFIG_ENV_NAME, str(nmrc_path or DEFAULT_CONFIG_PATH))
helper.run_cli(["config", "docker"])
registry = helper.registry_url.host
username = helper.username
full_tag = f"{registry}/{username}/{image}"
tag_cmd = f"docker tag {image} {full_tag}"
result = subprocess.run(tag_cmd, capture_output=True, shell=True)
assert (
not result.returncode
), f"Command {tag_cmd} failed: {result.stdout!r} {result.stderr!r} "
push_cmd = f"docker push {full_tag}"
result = subprocess.run(push_cmd, capture_output=True, shell=True)
assert (
not result.returncode
), f"Command {push_cmd} failed: {result.stdout!r} {result.stderr!r} "
# Run image and check output
image_url = f"image://{helper.cluster_name}/{username}/{image}"
job_id = helper.run_job_and_wait_state(
image_url, "", wait_state=JobStatus.SUCCEEDED, stop_state=JobStatus.FAILED
)
helper.check_job_output(job_id, re.escape(tag))
|
the-stack_0_2506 | #!/usr/bin/env python3
import sys
import time
import serial
import minimalmodbus
SERIAL_PORT = '/dev/ttyUSB0'
SERIAL_SPEED = 9600
SERIAL_TIMEOUT = 0.5
SERIAL_PARITY = serial.PARITY_NONE
MODBUS_DEBUG = False
class SaimanEnergyMeter:
"""A simple class for Saiman Energy Meters (Дала СА4-Э720 П RS)"""
def __init__(self,
address,
serial_port=SERIAL_PORT,
serial_speed=SERIAL_SPEED,
serial_timeout=SERIAL_TIMEOUT,
serial_parity=SERIAL_PARITY,
debug=MODBUS_DEBUG):
self.address = address
self.serial_port = serial_port
self.serial_speed = serial_speed
self.serial_timeout = serial_timeout
self.serial_parity = serial_parity
self.debug = debug
self.configure()
self.conn_open()
self.count_energy()
self.conn_close()
def configure(self):
try:
self.instrument = minimalmodbus.Instrument(self.serial_port, self.address)
self.instrument.serial.baudrate = self.serial_speed
self.instrument.serial.timeout = self.serial_timeout
self.instrument.serial.parity = self.serial_parity
self.instrument.debug = self.debug
except Exception as e:
print(e)
sys.exit(1)
def conn_open(self):
try:
#self.instrument._performCommand(0x8, '\x00\x00\x00\x00')
#self.instrument._performCommand(0x44, '')
self.instrument._performCommand(0x41, '\x01\x31\x31\x31\x31\x31\x31')
time.sleep(self.serial_timeout)
except Exception as e:
print(e)
sys.exit(1)
def conn_close(self):
try:
self.instrument._performCommand(0x42, '')
time.sleep(self.serial_timeout)
except:
pass
def get_reg(self, payload):
try:
time.sleep(self.serial_timeout)
return self.instrument._performCommand(0x3, payload)
except:
return 0
def count_energy(self):
self.reg1 = self.get_reg('\x01\x20\x00\x0E')
self.reg2 = self.get_reg('\x0D\xA0\x00\x0E')
if self.reg1 == 0 and self.reg2 == 0:
print("No data")
sys.exit(1)
elif self.reg1 == 0:
self.reg1 = self.reg2
elif self.reg2 == 0:
self.reg2 = self.reg1
self.t1 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[1:6][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[1:6][::-1])) )/100
self.t2 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[6:11][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[6:11][::-1])) )/100
self.t3 = ( int(''.join(minimalmodbus._hexlify(self.reg1).split(' ')[11:16][::-1])) +
int(''.join(minimalmodbus._hexlify(self.reg2).split(' ')[11:16][::-1])) )/100
self.total_energy = self.t1 + self.t2 + self.t3
|
the-stack_0_2507 | from hub2hub import TechnicHub, ble_handler
from time import sleep_ms
# Initialize ble handler and a technic hub
ble = ble_handler()
Thub = TechnicHub(ble)
# connect to a technic hub: press green button on the technic hub
Thub.connect()
# Servo motor connected to port A
Motor = Thub.port.A.motor
# move to 180 degrees and hold
Motor.run_to_position(180,stop_action = 2)
sleep_ms(1000)
# move to 0 and float
Motor.run_to_position(0, stop_action = 0) |
the-stack_0_2509 | import time
import logging
from aiogram import types
from aiogram.dispatcher.middlewares import BaseMiddleware
HANDLED_STR = ['Unhandled', 'Handled']
class LoggingMiddleware(BaseMiddleware):
def __init__(self, logger=__name__):
if not isinstance(logger, logging.Logger):
logger = logging.getLogger(logger)
self.logger = logger
super(LoggingMiddleware, self).__init__()
def check_timeout(self, obj):
start = obj.conf.get('_start', None)
if start:
del obj.conf['_start']
return round((time.time() - start) * 1000)
return -1
async def on_pre_process_update(self, update: types.Update, data: dict):
update.conf['_start'] = time.time()
self.logger.debug(f"Received update [ID:{update.update_id}]")
async def on_post_process_update(self, update: types.Update, result, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(f"Process update [ID:{update.update_id}]: [success] (in {timeout} ms)")
async def on_pre_process_message(self, message: types.Message, data: dict):
self.logger.info(f"Received message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_post_process_message(self, message: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"message [ID:{message.message_id}] in chat [{message.chat.type}:{message.chat.id}]")
async def on_pre_process_edited_message(self, edited_message, data: dict):
self.logger.info(f"Received edited message [ID:{edited_message.message_id}] "
f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_post_process_edited_message(self, edited_message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"edited message [ID:{edited_message.message_id}] "
f"in chat [{edited_message.chat.type}:{edited_message.chat.id}]")
async def on_pre_process_channel_post(self, channel_post: types.Message, data: dict):
self.logger.info(f"Received channel post [ID:{channel_post.message_id}] "
f"in channel [ID:{channel_post.chat.id}]")
async def on_post_process_channel_post(self, channel_post: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"channel post [ID:{channel_post.message_id}] "
f"in chat [{channel_post.chat.type}:{channel_post.chat.id}]")
async def on_pre_process_edited_channel_post(self, edited_channel_post: types.Message, data: dict):
self.logger.info(f"Received edited channel post [ID:{edited_channel_post.message_id}] "
f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_post_process_edited_channel_post(self, edited_channel_post: types.Message, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"edited channel post [ID:{edited_channel_post.message_id}] "
f"in channel [ID:{edited_channel_post.chat.id}]")
async def on_pre_process_inline_query(self, inline_query: types.InlineQuery, data: dict):
self.logger.info(f"Received inline query [ID:{inline_query.id}] "
f"from user [ID:{inline_query.from_user.id}]")
async def on_post_process_inline_query(self, inline_query: types.InlineQuery, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"inline query [ID:{inline_query.id}] "
f"from user [ID:{inline_query.from_user.id}]")
async def on_pre_process_chosen_inline_result(self, chosen_inline_result: types.ChosenInlineResult, data: dict):
self.logger.info(f"Received chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
f"from user [ID:{chosen_inline_result.from_user.id}] "
f"result [ID:{chosen_inline_result.result_id}]")
async def on_post_process_chosen_inline_result(self, chosen_inline_result, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"chosen inline result [Inline msg ID:{chosen_inline_result.inline_message_id}] "
f"from user [ID:{chosen_inline_result.from_user.id}] "
f"result [ID:{chosen_inline_result.result_id}]")
async def on_pre_process_callback_query(self, callback_query: types.CallbackQuery, data: dict):
if callback_query.message:
if callback_query.message.from_user:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [ID:{callback_query.message.from_user.id}]")
else:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
else:
self.logger.info(f"Received callback query [ID:{callback_query.id}] "
f"from inline message [ID:{callback_query.inline_message_id}] "
f"from user [ID:{callback_query.from_user.id}]")
async def on_post_process_callback_query(self, callback_query, results, data: dict):
if callback_query.message:
if callback_query.message.from_user:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}] "
f"from user [ID:{callback_query.message.from_user.id}]")
else:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"in chat [{callback_query.message.chat.type}:{callback_query.message.chat.id}]")
else:
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"callback query [ID:{callback_query.id}] "
f"from inline message [ID:{callback_query.inline_message_id}] "
f"from user [ID:{callback_query.from_user.id}]")
async def on_pre_process_shipping_query(self, shipping_query: types.ShippingQuery, data: dict):
self.logger.info(f"Received shipping query [ID:{shipping_query.id}] "
f"from user [ID:{shipping_query.from_user.id}]")
async def on_post_process_shipping_query(self, shipping_query, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"shipping query [ID:{shipping_query.id}] "
f"from user [ID:{shipping_query.from_user.id}]")
async def on_pre_process_pre_checkout_query(self, pre_checkout_query: types.PreCheckoutQuery, data: dict):
self.logger.info(f"Received pre-checkout query [ID:{pre_checkout_query.id}] "
f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_post_process_pre_checkout_query(self, pre_checkout_query, results, data: dict):
self.logger.debug(f"{HANDLED_STR[bool(len(results))]} "
f"pre-checkout query [ID:{pre_checkout_query.id}] "
f"from user [ID:{pre_checkout_query.from_user.id}]")
async def on_pre_process_error(self, update, error, data: dict):
timeout = self.check_timeout(update)
if timeout > 0:
self.logger.info(f"Process update [ID:{update.update_id}]: [failed] (in {timeout} ms)")
class LoggingFilter(logging.Filter):
"""
Extend LogRecord by data from Telegram Update object.
Can be used in logging config:
.. code-block: python3
'filters': {
'telegram': {
'()': LoggingFilter,
'include_content': True,
}
},
...
'handlers': {
'graypy': {
'()': GELFRabbitHandler,
'url': 'amqp://localhost:5672/',
'routing_key': '#',
'localname': 'testapp',
'filters': ['telegram']
},
},
"""
def __init__(self, name='', prefix='tg', include_content=False):
"""
:param name:
:param prefix: prefix for all records
:param include_content: pass into record all data from Update object
"""
super(LoggingFilter, self).__init__(name=name)
self.prefix = prefix
self.include_content = include_content
def filter(self, record: logging.LogRecord):
"""
Extend LogRecord by data from Telegram Update object.
:param record:
:return:
"""
update = types.Update.get_current(True)
if update:
for key, value in self.make_prefix(self.prefix, self.process_update(update)):
setattr(record, key, value)
return True
def process_update(self, update: types.Update):
"""
Parse Update object
:param update:
:return:
"""
yield 'update_id', update.update_id
if update.message:
yield 'update_type', 'message'
yield from self.process_message(update.message)
if update.edited_message:
yield 'update_type', 'edited_message'
yield from self.process_message(update.edited_message)
if update.channel_post:
yield 'update_type', 'channel_post'
yield from self.process_message(update.channel_post)
if update.edited_channel_post:
yield 'update_type', 'edited_channel_post'
yield from self.process_message(update.edited_channel_post)
if update.inline_query:
yield 'update_type', 'inline_query'
yield from self.process_inline_query(update.inline_query)
if update.chosen_inline_result:
yield 'update_type', 'chosen_inline_result'
yield from self.process_chosen_inline_result(update.chosen_inline_result)
if update.callback_query:
yield 'update_type', 'callback_query'
yield from self.process_callback_query(update.callback_query)
if update.shipping_query:
yield 'update_type', 'shipping_query'
yield from self.process_shipping_query(update.shipping_query)
if update.pre_checkout_query:
yield 'update_type', 'pre_checkout_query'
yield from self.process_pre_checkout_query(update.pre_checkout_query)
def make_prefix(self, prefix, iterable):
"""
Add prefix to the label
:param prefix:
:param iterable:
:return:
"""
if not prefix:
yield from iterable
for key, value in iterable:
yield f"{prefix}_{key}", value
def process_user(self, user: types.User):
"""
Generate user data
:param user:
:return:
"""
if not user:
return
yield 'user_id', user.id
if self.include_content:
yield 'user_full_name', user.full_name
if user.username:
yield 'user_name', f"@{user.username}"
def process_chat(self, chat: types.Chat):
"""
Generate chat data
:param chat:
:return:
"""
if not chat:
return
yield 'chat_id', chat.id
yield 'chat_type', chat.type
if self.include_content:
yield 'chat_title', chat.full_name
if chat.username:
yield 'chat_name', f"@{chat.username}"
def process_message(self, message: types.Message):
yield 'message_content_type', message.content_type
yield from self.process_user(message.from_user)
yield from self.process_chat(message.chat)
if not self.include_content:
return
if message.reply_to_message:
yield from self.make_prefix('reply_to', self.process_message(message.reply_to_message))
if message.forward_from:
yield from self.make_prefix('forward_from', self.process_user(message.forward_from))
if message.forward_from_chat:
yield from self.make_prefix('forward_from_chat', self.process_chat(message.forward_from_chat))
if message.forward_from_message_id:
yield 'message_forward_from_message_id', message.forward_from_message_id
if message.forward_date:
yield 'message_forward_date', message.forward_date
if message.edit_date:
yield 'message_edit_date', message.edit_date
if message.media_group_id:
yield 'message_media_group_id', message.media_group_id
if message.author_signature:
yield 'message_author_signature', message.author_signature
if message.text:
yield 'text', message.text or message.caption
yield 'html_text', message.html_text
elif message.audio:
yield 'audio', message.audio.file_id
elif message.animation:
yield 'animation', message.animation.file_id
elif message.document:
yield 'document', message.document.file_id
elif message.game:
yield 'game', message.game.title
elif message.photo:
yield 'photo', message.photo[-1].file_id
elif message.sticker:
yield 'sticker', message.sticker.file_id
elif message.video:
yield 'video', message.video.file_id
elif message.video_note:
yield 'video_note', message.video_note.file_id
elif message.voice:
yield 'voice', message.voice.file_id
elif message.contact:
yield 'contact_full_name', message.contact.full_name
yield 'contact_phone_number', message.contact.phone_number
elif message.venue:
yield 'venue_address', message.venue.address
yield 'location_latitude', message.venue.location.latitude
yield 'location_longitude', message.venue.location.longitude
elif message.location:
yield 'location_latitude', message.location.latitude
yield 'location_longitude', message.location.longitude
elif message.new_chat_members:
yield 'new_chat_members', [user.id for user in message.new_chat_members]
elif message.left_chat_member:
yield 'left_chat_member', [user.id for user in message.new_chat_members]
elif message.invoice:
yield 'invoice_title', message.invoice.title
yield 'invoice_description', message.invoice.description
yield 'invoice_start_parameter', message.invoice.start_parameter
yield 'invoice_currency', message.invoice.currency
yield 'invoice_total_amount', message.invoice.total_amount
elif message.successful_payment:
yield 'successful_payment_currency', message.successful_payment.currency
yield 'successful_payment_total_amount', message.successful_payment.total_amount
yield 'successful_payment_invoice_payload', message.successful_payment.invoice_payload
yield 'successful_payment_shipping_option_id', message.successful_payment.shipping_option_id
yield 'successful_payment_telegram_payment_charge_id', message.successful_payment.telegram_payment_charge_id
yield 'successful_payment_provider_payment_charge_id', message.successful_payment.provider_payment_charge_id
elif message.connected_website:
yield 'connected_website', message.connected_website
elif message.migrate_from_chat_id:
yield 'migrate_from_chat_id', message.migrate_from_chat_id
elif message.migrate_to_chat_id:
yield 'migrate_to_chat_id', message.migrate_to_chat_id
elif message.pinned_message:
yield from self.make_prefix('pinned_message', message.pinned_message)
elif message.new_chat_title:
yield 'new_chat_title', message.new_chat_title
elif message.new_chat_photo:
yield 'new_chat_photo', message.new_chat_photo[-1].file_id
# elif message.delete_chat_photo:
# yield 'delete_chat_photo', message.delete_chat_photo
# elif message.group_chat_created:
# yield 'group_chat_created', message.group_chat_created
# elif message.passport_data:
# yield 'passport_data', message.passport_data
def process_inline_query(self, inline_query: types.InlineQuery):
yield 'inline_query_id', inline_query.id
yield from self.process_user(inline_query.from_user)
if self.include_content:
yield 'inline_query_text', inline_query.query
if inline_query.location:
yield 'location_latitude', inline_query.location.latitude
yield 'location_longitude', inline_query.location.longitude
if inline_query.offset:
yield 'inline_query_offset', inline_query.offset
def process_chosen_inline_result(self, chosen_inline_result: types.ChosenInlineResult):
yield 'chosen_inline_result_id', chosen_inline_result.result_id
yield from self.process_user(chosen_inline_result.from_user)
if self.include_content:
yield 'inline_query_text', chosen_inline_result.query
if chosen_inline_result.location:
yield 'location_latitude', chosen_inline_result.location.latitude
yield 'location_longitude', chosen_inline_result.location.longitude
def process_callback_query(self, callback_query: types.CallbackQuery):
yield from self.process_user(callback_query.from_user)
yield 'callback_query_data', callback_query.data
if callback_query.message:
yield from self.make_prefix('callback_query_message', self.process_message(callback_query.message))
if callback_query.inline_message_id:
yield 'callback_query_inline_message_id', callback_query.inline_message_id
if callback_query.chat_instance:
yield 'callback_query_chat_instance', callback_query.chat_instance
if callback_query.game_short_name:
yield 'callback_query_game_short_name', callback_query.game_short_name
def process_shipping_query(self, shipping_query: types.ShippingQuery):
yield 'shipping_query_id', shipping_query.id
yield from self.process_user(shipping_query.from_user)
if self.include_content:
yield 'shipping_query_invoice_payload', shipping_query.invoice_payload
def process_pre_checkout_query(self, pre_checkout_query: types.PreCheckoutQuery):
yield 'pre_checkout_query_id', pre_checkout_query.id
yield from self.process_user(pre_checkout_query.from_user)
if self.include_content:
yield 'pre_checkout_query_currency', pre_checkout_query.currency
yield 'pre_checkout_query_total_amount', pre_checkout_query.total_amount
yield 'pre_checkout_query_invoice_payload', pre_checkout_query.invoice_payload
yield 'pre_checkout_query_shipping_option_id', pre_checkout_query.shipping_option_id
|
the-stack_0_2510 | import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.inspection import permutation_importance
from sklearn import svm
from data import *
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import eli5
from eli5.sklearn import PermutationImportance
'''
Train discriminative classifiers and obtain most important covariates
'''
def get_top_featues(X, y, clf):
perm = PermutationImportance(clf, random_state=1).fit(X, y)
return np.argsort(-perm.feature_importances_)
def train_clf(X_train, y_train, n_cov, flag='lr'):
if flag == 'lr':
clf = LogisticRegression(penalty='l1', C=0.1, solver='saga')
clf.fit(X_train, y_train)
weights = np.abs(clf.coef_)
S = []
for w in weights:
S.append( (-w).argsort()[:n_cov] )
S = np.concatenate( S, axis=0 )
S = np.unique( S )
return clf, S
# if flag == 'rf':
# clf = RandomForestClassifier()
# clf.fit(X_train,y_train)
# result = permutation_importance(clf, X_train, y_train, n_repeats=10)
# sorted_idx = np.argsort(-result.importances_mean)
# S = sorted_idx[:n_cov]
# return clf, S
if flag == 'rf':
clf = RandomForestClassifier()
clf.fit(X_train,y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'nn':
clf = MLPClassifier(solver='lbfgs', alpha=1e-5,
hidden_layer_sizes=(3, 2), random_state=1)
clf.fit(X_train,y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'svm':
clf = svm.SVC(probability=True)
clf.fit(X_train, y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if flag == 'nb':
clf = BernoulliNB(alpha=1.0e-10)
clf.fit(X_train, y_train)
sorted_idx = get_top_featues(X_train, y_train, clf=clf)
S = sorted_idx[:n_cov]
return clf, S
if __name__ == '__main__':
X, y = get_spam_data("data/uciData.csv")
X_train, X_test, y_train, y_test = generate_train_test(X, y, q=0.3)
flag = 'svm'
clf, S = train_clf(X_train, y_train, 11, flag=flag)
#pr = clf.predict(X_test)
print( clf.predict_proba(X_test) )
#
|
the-stack_0_2511 | # Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rmf_adapter as adpt
import rmf_adapter.plan as plan
import rmf_adapter.schedule as schedule
from rmf_fleet_msgs.msg import DockSummary
import numpy as np
import threading
import math
import copy
import enum
import time
from datetime import timedelta
from .RobotClientAPI import RobotAPI
# States for RobotCommandHandle's state machine used when guiding robot along
# a new path
class RobotState(enum.IntEnum):
IDLE = 0
WAITING = 1
MOVING = 2
class RobotCommandHandle(adpt.RobotCommandHandle):
def __init__(self,
name,
config,
node,
graph,
vehicle_traits,
transforms,
map_name,
initial_waypoint,
initial_orientation,
charger_waypoint,
update_frequency,
adapter):
adpt.RobotCommandHandle.__init__(self)
self.name = name
self.config = config
self.node = node
self.graph = graph
self.vehicle_traits = vehicle_traits
self.transforms = transforms
self.map_name = map_name
self.initial_waypoint = initial_waypoint
self.initial_orientation = initial_orientation
# Get the index of the charger waypoint
waypoint = self.graph.find_waypoint(charger_waypoint)
assert waypoint, f"Charger waypoint {charger_waypoint} \
does not exist in the navigation graph"
self.charger_waypoint_index = waypoint.index
self.charger_is_set = False
self.update_frequency = update_frequency
self.update_handle = None # RobotUpdateHandle
self.battery_soc = 1.0
self.api = None
self.position = [] # (x,y,theta) in RMF coordinates (meters, radians)
self.initialized = False
self.state = RobotState.IDLE
self.dock_name = ""
self.adapter = adapter
self.requested_waypoints = [] # RMF Plan waypoints
self.remaining_waypoints = []
self.path_finished_callback = None
self.next_arrival_estimator = None
self.path_index = 0
self.docking_finished_callback = None
# RMF location trackers
self.last_known_lane_index = None
self.last_known_waypoint_index = None
# if robot is waiting at a waypoint. This is a Graph::Waypoint index
self.on_waypoint = None
# if robot is travelling on a lane. This is a Graph::Lane index
self.on_lane = None
self.target_waypoint = None # this is a Plan::Waypoint
# The graph index of the waypoint the robot is currently docking into
self.dock_waypoint_index = None
# Threading variables
self._lock = threading.Lock()
self._follow_path_thread = None
self._quit_path_event = threading.Event()
self._dock_thread = None
self._quit_dock_event = threading.Event()
# Establish connection with the robot
self.api = RobotAPI(
self.config['base_url'],
self.config['user'],
self.config['password'],
robot_name=name,
config=self.config,
vehicle_traits=vehicle_traits,
)
assert self.api.connected, "Unable to connect to Robot API server"
self.position = self.get_position() # RMF coordinates
assert len(
self.position) > 2, "Unable to get current location of the robot"
self.node.get_logger().info(
f"The robot is starting at: [{self.position[0]:.2f}, "
f"{self.position[1]:.2f}, {self.position[2]:.2f}]")
# Obtain StartSet for the robot
self.starts = []
time_now = self.adapter.now()
if (self.initial_waypoint is not None) and\
(self.initial_orientation is not None):
self.node.get_logger().info(
f"Using provided initial waypoint [{self.initial_waypoint}] "
f"and orientation [{self.initial_orientation:.2f}] to "
f"initialize starts for robot [{self.name}]")
# Get the waypoint index for initial_waypoint
initial_waypoint_index = self.graph.find_waypoint(
self.initial_waypoint).index
self.starts = [plan.Start(time_now,
initial_waypoint_index,
self.initial_orientation)]
else:
self.node.get_logger().info(
f"Running compute_plan_starts for robot:{self.name}")
self.starts = plan.compute_plan_starts(
self.graph,
self.map_name,
self.position,
time_now)
if self.starts is None or len(self.starts) == 0:
self.node.get_logger().error(
f"Unable to determine StartSet for {self.name}")
return
start = self.starts[0]
# Update tracking variables
if start.lane is not None: # If the robot is on a lane
self.last_known_lane_index = start.lane
self.on_lane = start.lane
self.last_known_waypoint_index = start.waypoint
else: # Otherwise, the robot is on a waypoint
self.last_known_waypoint_index = start.waypoint
self.on_waypoint = start.waypoint
self.state_update_timer = self.node.create_timer(
1.0 / self.update_frequency,
self.update)
self.initialized = True
def clear(self):
with self._lock:
self.requested_waypoints = []
self.remaining_waypoints = []
self.path_finished_callback = None
self.next_arrival_estimator = None
self.docking_finished_callback = None
self.state = RobotState.IDLE
def stop(self):
# Stop the robot. Tracking variables should remain unchanged.
while True:
self.node.get_logger().info("Requesting robot to stop...")
if self.api.stop():
break
time.sleep(1.0)
if self._follow_path_thread is not None:
self._quit_path_event.set()
if self._follow_path_thread.is_alive():
self._follow_path_thread.join()
self._follow_path_thread = None
self.clear()
def find_location(self, target_pose):
if self.target_waypoint.graph_index is not \
None and self.dist(self.position, target_pose) < 0.5:
self.on_waypoint = self.target_waypoint.graph_index
elif self.last_known_waypoint_index is not \
None and self.dist(
self.position, self.graph.get_waypoint(
self.last_known_waypoint_index).location) < 0.5:
self.on_waypoint = self.last_known_waypoint_index
else:
self.on_lane = None # update_off_grid()
self.on_waypoint = None
def follow_new_path(
self,
waypoints,
next_arrival_estimator,
path_finished_callback):
self.stop()
self._quit_path_event.clear()
self.node.get_logger().info("Received new path to follow...")
self.remaining_waypoints = self.get_remaining_waypoints(waypoints)
assert next_arrival_estimator is not None
assert path_finished_callback is not None
self.next_arrival_estimator = next_arrival_estimator
self.path_finished_callback = path_finished_callback
def _follow_path():
target_pose = []
while (
self.remaining_waypoints or
self.state == RobotState.MOVING or
self.state == RobotState.WAITING):
# Check if we need to abort
if self._quit_path_event.is_set():
self.node.get_logger().info("Aborting previously followed "
"path")
return
# State machine
if self.state == RobotState.IDLE:
# Assign the next waypoint
self.target_waypoint = self.remaining_waypoints[0][1]
self.path_index = self.remaining_waypoints[0][0]
# Move robot to next waypoint
target_pose = self.target_waypoint.position
[x, y] = self.transforms["rmf_to_robot"].transform(
target_pose[:2])
theta = target_pose[2] + \
self.transforms['orientation_offset']
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# Ensure x, y, theta are in units that api.navigate() #
# ------------------------ #
response = self.api.navigate([x, y, theta], self.map_name)
if response:
self.remaining_waypoints = self.remaining_waypoints[1:]
self.state = RobotState.MOVING
else:
self.node.get_logger().info(
f"Robot {self.name} failed to navigate to "
f"[{x:.0f}, {y:.0f}, {theta:.0f}] coordinates. "
f"Retrying...")
time.sleep(1.0)
elif self.state == RobotState.WAITING:
time.sleep(1.0)
time_now = self.adapter.now()
with self._lock:
if self.target_waypoint is not None:
waypoint_wait_time = self.target_waypoint.time
if (waypoint_wait_time < time_now):
self.state = RobotState.IDLE
else:
if self.path_index is not None:
delta = waypoint_wait_time - time_now
self.node.get_logger().info(
f"Waiting for {(delta).seconds}s")
self.next_arrival_estimator(
self.path_index,
timedelta(seconds=0.0))
elif self.state == RobotState.MOVING:
time.sleep(1.0)
# Check if we have reached the target
with self._lock:
if (self.api.navigation_completed()):
self.node.get_logger().info(
f"Robot [{self.name}] has reached its target "
f"waypoint")
self.state = RobotState.WAITING
if (self.target_waypoint.graph_index is not None):
self.on_waypoint = \
self.target_waypoint.graph_index
self.last_known_waypoint_index = \
self.on_waypoint
else:
self.on_waypoint = None # still on a lane
else:
# Update the lane the robot is on
lane = self.get_current_lane()
if lane is not None:
self.on_waypoint = None
self.on_lane = lane
else:
# The robot may either be on the previous
# waypoint or the target one
self.find_location(target_pose)
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# If your robot does not have an API to report the
# remaining travel duration, replace the API call
# below with an estimation
# ------------------------ #
duration = self.api.navigation_remaining_duration()
if self.path_index is not None:
self.next_arrival_estimator(
self.path_index, timedelta(seconds=duration))
self.path_finished_callback()
self.node.get_logger().info(
f"Robot {self.name} has successfully navigated along "
f"requested path.")
self._follow_path_thread = threading.Thread(
target=_follow_path)
self._follow_path_thread.start()
def dock(
self,
dock_name,
docking_finished_callback):
''' Docking is very specific to each application. Hence, the user will
need to customize this function accordingly. In this example, we
assume the dock_name is the same as the name of the waypoints that
the robot is trying to dock into. We then call api.start_process()
to initiate the robot specific process. This could be to start a
cleaning process or load/unload a cart for delivery.
'''
self._quit_dock_event.clear()
if self._dock_thread is not None:
self._dock_thread.join()
self.dock_name = dock_name
assert docking_finished_callback is not None
self.docking_finished_callback = docking_finished_callback
# Get the waypoint that the robot is trying to dock into
dock_waypoint = self.graph.find_waypoint(self.dock_name)
assert(dock_waypoint)
self.dock_waypoint_index = dock_waypoint.index
def _dock():
# Request the robot to start the relevant process
self.node.get_logger().info(
f"Requesting robot {self.name} to dock at {self.dock_name}")
self.api.start_process(self.dock_name, self.map_name)
with self._lock:
self.on_waypoint = None
self.on_lane = None
time.sleep(1.0)
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# With whatever logic you need for docking #
# ------------------------ #
while (not self.api.docking_completed()):
# Check if we need to abort
if self._quit_dock_event.is_set():
self.node.get_logger().info("Aborting docking")
return
self.node.get_logger().info("Robot is docking...")
time.sleep(1.0)
with self._lock:
self.on_waypoint = self.dock_waypoint_index
self.dock_waypoint_index = None
self.docking_finished_callback()
self.node.get_logger().info("Docking completed")
self._dock_thread = threading.Thread(target=_dock)
self._dock_thread.start()
def get_position(self):
''' This helper function returns the live position of the robot in the
RMF coordinate frame'''
position = self.api.position()
if position is not None:
x, y = self.transforms['robot_to_rmf'].transform(
[position[0], position[1]])
theta = position[2] - \
self.transforms['orientation_offset']
# ------------------------ #
# IMPLEMENT YOUR CODE HERE #
# Ensure x, y are in meters and theta in radians #
# ------------------------ #
# Wrap theta between [-pi, pi]. Else arrival estimate will
# assume robot has to do full rotations and delay the schedule
if theta > np.pi:
theta = theta - (2 * np.pi)
if theta < -np.pi:
theta = (2 * np.pi) + theta
return [x, y, theta]
else:
self.node.get_logger().error(
"Unable to retrieve position from robot.")
return self.position
def get_battery_soc(self):
battery_soc = self.api.battery_soc()
if battery_soc is not None:
return battery_soc
else:
self.node.get_logger().error(
"Unable to retrieve battery data from robot.")
return self.battery_soc
def update(self):
self.position = self.get_position()
self.battery_soc = self.get_battery_soc()
if self.update_handle is not None:
self.update_state()
def update_state(self):
self.update_handle.update_battery_soc(self.battery_soc)
if not self.charger_is_set:
if ("max_delay" in self.config.keys()):
max_delay = self.config["max_delay"]
self.node.get_logger().info(
f"Setting max delay to {max_delay}s")
self.update_handle.set_maximum_delay(max_delay)
if (self.charger_waypoint_index < self.graph.num_waypoints):
self.update_handle.set_charger_waypoint(
self.charger_waypoint_index)
else:
self.node.get_logger().warn(
"Invalid waypoint supplied for charger. "
"Using default nearest charger in the map")
self.charger_is_set = True
# Update position
with self._lock:
if (self.on_waypoint is not None): # if robot is on a waypoint
self.update_handle.update_current_waypoint(
self.on_waypoint, self.position[2])
elif (self.on_lane is not None): # if robot is on a lane
# We only keep track of the forward lane of the robot.
# However, when calling this update it is recommended to also
# pass in the reverse lane so that the planner does not assume
# the robot can only head forwards. This would be helpful when
# the robot is still rotating on a waypoint.
forward_lane = self.graph.get_lane(self.on_lane)
entry_index = forward_lane.entry.waypoint_index
exit_index = forward_lane.exit.waypoint_index
reverse_lane = self.graph.lane_from(exit_index, entry_index)
lane_indices = [self.on_lane]
if reverse_lane is not None: # Unidirectional graph
lane_indices.append(reverse_lane.index)
self.update_handle.update_current_lanes(
self.position, lane_indices)
elif (self.dock_waypoint_index is not None):
self.update_handle.update_off_grid_position(
self.position, self.dock_waypoint_index)
# if robot is merging into a waypoint
elif (self.target_waypoint is not None and
self.target_waypoint.graph_index is not None):
self.update_handle.update_off_grid_position(
self.position, self.target_waypoint.graph_index)
else: # if robot is lost
self.update_handle.update_lost_position(
self.map_name, self.position)
def get_current_lane(self):
def projection(current_position,
target_position,
lane_entry,
lane_exit):
px, py, _ = current_position
p = np.array([px, py])
t = np.array(target_position)
entry = np.array(lane_entry)
exit = np.array(lane_exit)
return np.dot(p - t, exit - entry)
if self.target_waypoint is None:
return None
approach_lanes = self.target_waypoint.approach_lanes
# Spin on the spot
if approach_lanes is None or len(approach_lanes) == 0:
return None
# Determine which lane the robot is currently on
for lane_index in approach_lanes:
lane = self.graph.get_lane(lane_index)
p0 = self.graph.get_waypoint(lane.entry.waypoint_index).location
p1 = self.graph.get_waypoint(lane.exit.waypoint_index).location
p = self.position
before_lane = projection(p, p0, p0, p1) < 0.0
after_lane = projection(p, p1, p0, p1) >= 0.0
if not before_lane and not after_lane: # The robot is on this lane
return lane_index
return None
def dist(self, A, B):
''' Euclidian distance between A(x,y) and B(x,y)'''
assert(len(A) > 1)
assert(len(B) > 1)
return math.sqrt((A[0] - B[0])**2 + (A[1] - B[1])**2)
def get_remaining_waypoints(self, waypoints: list):
'''
The function returns a list where each element is a tuple of the index
of the waypoint and the waypoint present in waypoints. This function
may be modified if waypoints in a path need to be filtered.
'''
assert(len(waypoints) > 0)
remaining_waypoints = []
for i in range(len(waypoints)):
remaining_waypoints.append((i, waypoints[i]))
return remaining_waypoints
|
the-stack_0_2512 |
#http://www.compaq.com/fortran/docs/
import os
import sys
from numpy.distutils.fcompiler import FCompiler
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
if os.name != 'posix' or sys.platform[:6] == 'cygwin' :
# Otherwise we'd get a false positive on posix systems with
# case-insensitive filesystems (like darwin), because we'll pick
# up /bin/df
compilers.append('CompaqVisualFCompiler')
class CompaqFCompiler(FCompiler):
compiler_type = 'compaq'
description = 'Compaq Fortran Compiler'
version_pattern = r'Compaq Fortran (?P<version>[^\s]*).*'
if sys.platform[:5]=='linux':
fc_exe = 'fort'
else:
fc_exe = 'f90'
executables = {
'version_cmd' : ['<F90>', "-version"],
'compiler_f77' : [fc_exe, "-f77rtl", "-fixed"],
'compiler_fix' : [fc_exe, "-fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
module_dir_switch = '-module ' # not tested
module_include_switch = '-I'
def get_flags(self):
return ['-assume no2underscore', '-nomixed_str_len_arg']
def get_flags_debug(self):
return ['-g', '-check bounds']
def get_flags_opt(self):
return ['-O4', '-align dcommons', '-assume bigarrays',
'-assume nozsize', '-math_library fast']
def get_flags_arch(self):
return ['-arch host', '-tune host']
def get_flags_linker_so(self):
if sys.platform[:5]=='linux':
return ['-shared']
return ['-shared', '-Wl,-expect_unresolved,*']
class CompaqVisualFCompiler(FCompiler):
compiler_type = 'compaqv'
description = 'DIGITAL or Compaq Visual Fortran Compiler'
version_pattern = (r'(DIGITAL|Compaq) Visual Fortran Optimizing Compiler'
r' Version (?P<version>[^\s]*).*')
compile_switch = '/compile_only'
object_switch = '/object:'
library_switch = '/OUT:' #No space after /OUT:!
static_lib_extension = ".lib"
static_lib_format = "%s%s"
module_dir_switch = '/module:'
module_include_switch = '/I'
ar_exe = 'lib.exe'
fc_exe = 'DF'
if sys.platform=='win32':
from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
m.initialize()
ar_exe = m.lib
except DistutilsPlatformError:
pass
except AttributeError as e:
if '_MSVCCompiler__root' in str(e):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (e))
else:
raise
except IOError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
executables = {
'version_cmd' : ['<F90>', "/what"],
'compiler_f77' : [fc_exe, "/f77rtl", "/fixed"],
'compiler_fix' : [fc_exe, "/fixed"],
'compiler_f90' : [fc_exe],
'linker_so' : ['<F90>'],
'archiver' : [ar_exe, "/OUT:"],
'ranlib' : None
}
def get_flags(self):
return ['/nologo', '/MD', '/WX', '/iface=(cref,nomixed_str_len_arg)',
'/names:lowercase', '/assume:underscore']
def get_flags_opt(self):
return ['/Ox', '/fast', '/optimize:5', '/unroll:0', '/math_library:fast']
def get_flags_arch(self):
return ['/threads']
def get_flags_debug(self):
return ['/debug']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='compaq').get_version())
|
the-stack_0_2514 | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
rbp_files = dict(snakemake.input)
values = []
for rbp, f_name in rbp_files.items():
df = pd.read_csv(f_name)
positives = len(df[df['class'] == 1])
negatives = len(df[df['class'] == 0])
neg_ratio = negatives / (positives + negatives)
values.append((positives, negatives, neg_ratio))
values = np.array(values)
plt.plot([0,np.max(values)], [0,np.max(values)], c="orange", zorder=1)
plt.scatter(values[:, 0], values[:, 1], s=8, alpha=0.5, zorder=2)
plt.xlabel("Number of positives")
plt.ylabel("Number of negatives")
plt.title(f'{snakemake.wildcards["cell_line"]}')
plt.savefig(snakemake.output[0], dpi=300)
|
the-stack_0_2515 | from unittest import TestCase
from datetime import datetime
from extract_ride_data import ZeroLogHeader, LogEntry, ZeroLogEntry
class TestLogHeader(TestCase):
def test_decode(self):
log_text = '''Zero MBB log
Serial number 2015_mbb_48e0f7_00720
VIN 538SD9Z37GCG06073
Firmware rev. 51
Board rev. 3
Model DSR
Printing 8397 of 8397 log entries..
Entry Time of Log Event Conditions
+--------+----------------------+--------------------------+----------------------------------
00001 05/13/2018 10:06:43 DEBUG: Sevcon Contactor Drive ON.
'''
log_lines = log_text.splitlines()
log_header = ZeroLogHeader(log_lines)
self.assertEqual({
'mbb': {
'board_rev': '3',
'firmware_rev': '51',
'model': 'DSR',
'serial_no': '2015_mbb_48e0f7_00720',
'vin': '538SD9Z37GCG06073'},
'model': {'manufacturer': 'Zero Motorcycles',
'model': 'DSR',
'motor': {'power': '16kW', 'size': '75-7R'},
'pack_capacity': '13.0',
'plant_location': 'Santa Cruz, CA',
'platform': 'SDS',
'year': 2016},
'num_entries': 8397,
'num_entries_expected': 8397,
'source': 'MBB',
'title': 'Zero MBB log'
}, log_header.to_json())
class TestLogEntry(TestCase):
def test_decode(self):
log_entry = LogEntry(' 2018-05-20 16:36:56 \t something happened \n', field_sep='\t')
self.assertEqual(log_entry.field_values, ['2018-05-20 16:36:56', 'something happened'])
def test_order(self):
first_entry = LogEntry('2018-05-20 16:36:56\tsomething happened', field_sep='\t')
first_entry.timestamp = first_entry.decode_timestamp(first_entry.field_values[0])
second_entry = LogEntry('2018-05-20 16:37:00\tsomething happened later', field_sep='\t')
second_entry.timestamp = second_entry.decode_timestamp(second_entry.field_values[0])
self.assertLess(first_entry, second_entry)
self.assertGreater(second_entry, first_entry)
def test_to_csv(self):
log_entry = LogEntry(' 2018-05-20 16:36:56 \t something happened \n', field_sep='\t')
self.assertEqual('2018-05-20 16:36:56,something happened',
log_entry.to_csv(['timestamp', 'message']))
class TestZeroLogEntry(TestCase):
def assert_consistent_log_entry(self, log_entry: ZeroLogEntry):
self.assertIsInstance(log_entry.entry, int)
self.assertIsInstance(log_entry.timestamp, datetime)
self.assertLess(0, log_entry.entry)
self.assertIsInstance(log_entry.event, str)
self.assertIsInstance(log_entry.component, str)
self.assertIsInstance(log_entry.conditions, dict)
def test_conditions_to_dict(self):
conditions = ZeroLogEntry.conditions_to_dict(
'''PackTemp: h 21C, l 20C, PackSOC: 91%, Vpack:113.044V, MotAmps: 0, BattAmps: 2,\
Mods: 11, MotTemp: 26C, CtrlTemp: 19C, AmbTemp: 20C, MotRPM: 0, Odo:48809km'''
)
self.assertDictEqual({'AmbTemp': '20C',
'BattAmps': '2',
'CtrlTemp': '19C',
'Mods': '11',
'MotAmps': '0',
'MotRPM': '0',
'MotTemp': '26C',
'Odo': '48809km',
'PackSOC': '91%',
'PackTemp (h)': '21C',
'PackTemp (l)': '20C',
'Vpack': '113.044V'}, conditions)
conditions = ZeroLogEntry.conditions_to_dict(
'''Bmvolts: 92062, Cmvolts: 118937, Amps: 0, RPM: 0''')
self.assertDictEqual({'Bmvolts': '92062',
'Cmvolts': '118937',
'Amps': '0',
'RPM': '0'},
conditions)
def test_disarmed(self):
log_entry = ZeroLogEntry('''
00001 05/21/2018 21:12:20 Disarmed \
PackTemp: h 21C, l 20C, PackSOC: 91%, Vpack:113.044V, MotAmps: 0, BattAmps: 2, Mods: 11,\
MotTemp: 26C, CtrlTemp: 19C, AmbTemp: 20C, MotRPM: 0, Odo:48809km
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(1, log_entry.entry)
self.assertEqual('', log_entry.event_type)
self.assertEqual('', log_entry.event_level)
self.assertEqual('Disarmed', log_entry.event)
self.assertDictEqual({'AmbTemp': '20C',
'BattAmps': '2',
'CtrlTemp': '19C',
'Mods': '11',
'MotAmps': '0',
'MotRPM': '0',
'MotTemp': '26C',
'Odo': '48809km',
'PackSOC': '91%',
'PackTemp (h)': '21C',
'PackTemp (l)': '20C',
'Vpack': '113.044V'},
log_entry.conditions)
def test_info_only_data(self):
log_entry = ZeroLogEntry('''
07558 05/20/2018 16:36:56 INFO: Bmvolts: 92062, Cmvolts: 118937, Amps: 0, RPM: 0
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7558, log_entry.entry)
self.assertEqual('2018-05-20 16:36:56', str(log_entry.timestamp))
self.assertEqual('INFO', log_entry.event_level)
self.assertEqual('', log_entry.event)
self.assertDictEqual({'Bmvolts': '92062',
'Cmvolts': '118937',
'Amps': '0',
'RPM': '0'},
log_entry.conditions)
def test_info_and_conditions_message_join(self):
log_entry = ZeroLogEntry('''
07544 05/20/2018 16:36:52 DEBUG: Module mode Change Requires Disconnect
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7544, log_entry.entry)
self.assertEqual('DEBUG', log_entry.event_level)
self.assertEqual('Module mode Change Requires Disconnect', log_entry.event)
self.assertDictEqual({}, log_entry.conditions)
def test_current_limited(self):
log_entry = ZeroLogEntry('''
07396 05/20/2018 16:15:31\
Batt Dischg Cur Limited 281 A (40.72463768115942%), MinCell: 3383mV, MaxPackTemp: 34C''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7396, log_entry.entry)
self.assertEqual('LIMIT', log_entry.event_type)
self.assertEqual('Batt Dischg Cur Limited', log_entry.event)
self.assertDictEqual({'MinCell': '3383mV',
'MaxPackTemp': '34C',
'BattAmps': '281',
'PackSOC': '40.72463768115942%'},
log_entry.conditions)
def test_error_entry(self):
log_entry = ZeroLogEntry('''
07758 05/20/2018 16:52:01\
ERROR: Module 01 maximum connection retries reached. Flagging ineligble.
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(7758, log_entry.entry)
self.assertEqual('ERROR', log_entry.event_level)
self.assertEqual('Battery', log_entry.component)
self.assertTrue(log_entry.is_battery_event())
self.assertEqual('Module maximum connection retries reached. Flagging ineligble.',
log_entry.event)
self.assertDictEqual({'Module': '01'}, log_entry.conditions)
self.assertEqual(1, log_entry.battery_module_no())
def test_module_not_connected(self):
log_entry = ZeroLogEntry('''
01525 05/14/2018 16:49:14 Module 1 not connected, PV 109511mV, diff 0mV, Allowed diff 750mV,\
pack cap 26Ah, batt curr 0A, PackTemp h 23C, l 23C, last CAN msg 4ms ago, lcell 3903mV,\
Max charge 10cx10, max discharge 100cx10
''')
self.assert_consistent_log_entry(log_entry)
self.assertEqual(1525, log_entry.entry)
self.assertEqual({'Module': '1',
'PV': '109511mV',
'diff': '0mV',
'Allowed diff': '750mV',
'pack cap': '26Ah',
'batt curr': '0A',
'PackTemp h': '23C',
'l': '23C',
'lcell': '3903mV',
'Max charge': '10cx10',
'max discharge': '100cx10'},
log_entry.conditions)
self.assertEqual(1, log_entry.battery_module_no())
|
the-stack_0_2522 | """SAElections URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
url(r'^$', 'director.views.direct', name='home'), # The voting/thank you page is here
url(r'^/$', 'director.views.direct', name='home'), # and also here
url(r'^login/$', 'authentication.views.user_login', name='login'),
url(r'^logout/$', 'authentication.views.user_logout', name='logout'),
url(r'^authenticate/$', 'authentication.views.user_auth', name='authenticate'),
url(r'^save-votes/(?P<votes>.+)/$', 'voting.views.save_votes', name='save-votes'),
url(r'^admin/', include(admin.site.urls)),
] + staticfiles_urlpatterns()
|
the-stack_0_2523 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Documentation: Menu.py
Classes and functions:
Menu main class of this modul
Description:
the base class for the menus. All other menu classes should be derived from this
one
"""
__author__ = "Fireclaw the Fox"
__license__ = """
Simplified BSD (BSD 2-Clause) License.
See License.txt or http://opensource.org/licenses/BSD-2-Clause for more info
"""
from direct.showbase.DirectObject import DirectObject
from direct.gui.DirectGui import DirectFrame
from direct.gui.DirectGui import DirectLabel
from direct.gui.DirectGui import DirectButton
from panda3d.core import TextNode
import time
class Menu(DirectObject):
def __init__(self):
"""Default constructor"""
# load the default fonts
#self.defaultFont = loader.loadFont("gui/fonts/UbuntuBold")
#self.defaultFontRegular = loader.loadFont("gui/fonts/UbuntuRegular")
# load the default button image map
self.defaultBtnMaps = base.loader.loadModel(
"gui/buttons/mainMenu/button_maps")
# this button can be created with the createBackButton function
self.btnBack = None
self.frameMain = DirectFrame(
# size of the frame
frameSize = (base.a2dLeft, base.a2dRight,
base.a2dTop, base.a2dBottom),
# position of the frame
pos = (0, 0, 0),
# tramsparent bg color
frameColor = (0, 0, 0, 0))
self.title = DirectLabel(
scale = 0.25,
pos = (0, 0, -0.25),
frameColor = (0, 0, 0, 0),
text = "Missing Title",
text_align = TextNode.ACenter,
text_fg = (1,1,1,1),
#text_font = self.defaultFont
)
self.title.reparentTo(base.a2dTopCenter)
self.clock = DirectLabel(
scale = 0.1,
pos = (-.1,0,.1),
frameColor = (0, 0, 0, 0),
text = "00:00",
text_align = TextNode.ARight,
text_fg = (1,1,1,1))
self.clock.reparentTo(base.a2dBottomRight)
self.hide()
def showBase(self):
"""Show all GUI controls of the base menu"""
self.accept("RatioChanged", self.recalcAspectRatio)
self.frameMain.show()
self.clock.show()
self.title.show()
if self.btnBack:
self.btnBack.show()
if not taskMgr.hasTaskNamed("clock"):
taskMgr.add(self.clockTask, "clock")
def hideBase(self):
"""Hide all GUI controls of the base menu"""
self.ignore("RatioChanged")
self.frameMain.hide()
self.clock.hide()
self.title.hide()
if self.btnBack:
self.btnBack.hide()
if taskMgr.hasTaskNamed("clock"):
taskMgr.remove("clock")
def createBackButton(self, func):
"""Create the back button on the bottom left edge of the window"""
self.btnBack = DirectButton(
# size of the button
scale = (0.25, 0.25, 0.25),
# size of the text
text_scale = (0.5*1.33, 0.5, 0.5),
# the text on the button
text = "ABC",
# set the alignment to right
text_align = TextNode.ARight,
# put the text on the left side of the button
text_pos = (4.1, -0.15),
# set the text color to white
text_fg = (1,1,1,1),
# set the font of the text
#text_font = self.defaultFont,
# set the buttons images
geom = (self.defaultBtnMaps.find("**/button_ready"),
self.defaultBtnMaps.find("**/button_click"),
self.defaultBtnMaps.find("**/button_rollover"),
self.defaultBtnMaps.find("**/button_disabled")),
# set no relief
relief = 1,
# make it transparent
frameColor = (0,0,0,0),
# No sink in when press
pressEffect = False,
# position on the window
pos = (0.0, 0, 0.2),
# the event which is thrown on clickSound
command = func,
# sounds that should be played
rolloverSound = None,
clickSound = None)
self.btnBack.reparentTo(base.a2dBottomLeft)
def clockTask(self, task):
self.clock["text"] = time.strftime("%H:%M")
return task.cont
def recalcAspectRatio(self):
"""get the new aspect ratio to resize the mainframe"""
self.frameMain["frameSize"] = (
base.a2dLeft, base.a2dRight,
base.a2dTop, base.a2dBottom)
|
the-stack_0_2525 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.connection import Connection, ConnectionError
try:
from ncclient.xml_ import NCElement
HAS_NCCLIENT = True
except ImportError:
HAS_NCCLIENT = False
try:
from lxml.etree import Element, fromstring, XMLSyntaxError
except ImportError:
from xml.etree.ElementTree import Element, fromstring
if sys.version_info < (2, 7):
from xml.parsers.expat import ExpatError as XMLSyntaxError
else:
from xml.etree.ElementTree import ParseError as XMLSyntaxError
NS_MAP = {'nc': "urn:ietf:params:xml:ns:netconf:base:1.0"}
def exec_rpc(module, *args, **kwargs):
connection = NetconfConnection(module._socket_path)
return connection.execute_rpc(*args, **kwargs)
class NetconfConnection(Connection):
def __init__(self, socket_path):
super(NetconfConnection, self).__init__(socket_path)
def __rpc__(self, name, *args, **kwargs):
"""Executes the json-rpc and returns the output received
from remote device.
:name: rpc method to be executed over connection plugin that implements jsonrpc 2.0
:args: Ordered list of params passed as arguments to rpc method
:kwargs: Dict of valid key, value pairs passed as arguments to rpc method
For usage refer the respective connection plugin docs.
"""
self.check_rc = kwargs.pop('check_rc', True)
self.ignore_warning = kwargs.pop('ignore_warning', True)
response = self._exec_jsonrpc(name, *args, **kwargs)
if 'error' in response:
rpc_error = response['error'].get('data')
return self.parse_rpc_error(to_bytes(rpc_error, errors='surrogate_then_replace'))
return fromstring(to_bytes(response['result'], errors='surrogate_then_replace'))
def parse_rpc_error(self, rpc_error):
if self.check_rc:
try:
error_root = fromstring(rpc_error)
root = Element('root')
root.append(error_root)
error_list = root.findall('.//nc:rpc-error', NS_MAP)
if not error_list:
raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace'))
warnings = []
for error in error_list:
message_ele = error.find('./nc:error-message', NS_MAP)
if message_ele is None:
message_ele = error.find('./nc:error-info', NS_MAP)
message = message_ele.text if message_ele is not None else None
severity = error.find('./nc:error-severity', NS_MAP).text
if severity == 'warning' and self.ignore_warning and message is not None:
warnings.append(message)
else:
raise ConnectionError(to_text(rpc_error, errors='surrogate_then_replace'))
return warnings
except XMLSyntaxError:
raise ConnectionError(rpc_error)
def transform_reply():
return b'''<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
'''
# Note: Workaround for ncclient 0.5.3
def remove_namespaces(data):
if not HAS_NCCLIENT:
raise ImportError("ncclient is required but does not appear to be installed. "
"It can be installed using `pip install ncclient`")
return NCElement(data, transform_reply()).data_xml
|
the-stack_0_2527 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for dataset_metadata.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow_transform.saved import saved_transform_io
import unittest
from tensorflow.contrib import lookup
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import test
from tensorflow.python.util import compat
def _create_test_saved_model():
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32, shape=[1])
output = (input_float - 2.0) / 5.0
inputs = {'x': input_float}
outputs = {'x_scaled': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
return export_path
class SavedTransformIOTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
cls._test_saved_model = _create_test_saved_model()
def test_apply_saved_transform(self):
with tf.Graph().as_default() as graph:
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertTrue(isinstance(result_tensor, tf.Tensor))
self.assertAllEqual(session.run(result_tensor), [247.0])
self.assertEqual(graph.get_tensor_by_name('Const:0'), input_floats)
self.assertEqual(
graph.get_tensor_by_name('transform/truediv:0'),
result_tensor)
def test_apply_transform_extra_features_no_passthrough(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant([1234.0]) # tf.float32
input_features = {'x': input_floats,
'extra_1': tf.constant('1'),
'extra_2': tf.constant('2')}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_type_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_strings = tf.constant(['bogus']) # tf.string
input_features = {'x': input_strings}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_transform_shape_mismatch(self):
with self.assertRaises(ValueError):
with tf.Graph().as_default():
with tf.Session().as_default():
input_floats = tf.constant(1234.0) # tf.float32
input_features = {'x': input_floats}
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features)
def test_apply_saved_transform_to_tensor_inside_scope(self):
with tf.Graph().as_default():
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_floats = tf.constant([1237.0]) # tf.float32
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_apply_saved_transform_to_tensor_outside_scope(self):
with tf.Graph().as_default():
input_floats = tf.constant([1237.0]) # tf.float32
with tf.name_scope('my_scope'):
with tf.Session().as_default() as session:
input_features = {'x': input_floats}
_, transformed_features = (
saved_transform_io.partially_apply_saved_transform_internal(
self._test_saved_model, input_features))
self.assertEqual(['x_scaled'], transformed_features.keys())
result_tensor = transformed_features['x_scaled']
self.assertAllEqual(session.run(result_tensor), [247.0])
def test_dense_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.placeholder(tf.float32)
# show that unrelated & unmapped placeholders do not interfere
tf.placeholder(tf.int64)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_float = tf.constant(25.0) * 2
inputs = {'input': input_float}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
result = session.run(outputs['output'])
# (25 * 2) / 5 = 10
self.assertEqual(10.0, result)
def test_table_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table, in this case based on a constant tensor.
table = lookup.index_table_from_tensor(
tf.constant(['cat', 'dog', 'giraffe']))
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
# Using a computed input gives confidence that the graphs are fused.
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
session.run(tf.tables_initializer())
result = session.run(outputs['output'])
self.assertEqual(1, result)
def test_sparse_roundtrip(self):
export_path = os.path.join(tempfile.mkdtemp(), 'export')
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_float = tf.sparse_placeholder(tf.float32)
output = input_float / 5.0
inputs = {'input': input_float}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
with tf.Graph().as_default():
with tf.Session().as_default() as session:
indices = np.array([[3, 2, 0], [4, 5, 1]], dtype=np.int64)
values = np.array([1.0, 2.0], dtype=np.float32)
shape = np.array([7, 9, 2], dtype=np.int64)
input_sparse = tf.SparseTensor(
indices=indices, values=values, dense_shape=shape)
# Using a computed input gives confidence that the graphs are fused
inputs = {'input': input_sparse * 10}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
output_sparse = outputs['output']
self.assertTrue(isinstance(output_sparse, tf.SparseTensor))
result = session.run(output_sparse)
# indices and shape unchanged; values divided by 2
self.assertEqual(indices.tolist(), result.indices.tolist())
self.assertEqual([2.0, 4.0], result.values.tolist())
self.assertEqual(shape.tolist(), result.dense_shape.tolist())
def test_stale_asset_collections_are_cleaned(self):
vocabulary_file = os.path.join(
compat.as_bytes(test.get_temp_dir()), compat.as_bytes('asset'))
file_io.write_string_to_file(vocabulary_file, 'foo bar baz')
export_path = os.path.join(tempfile.mkdtemp(), 'export')
# create a SavedModel including assets
with tf.Graph().as_default():
with tf.Session().as_default() as session:
input_string = tf.placeholder(tf.string)
# Map string through a table loaded from an asset file
table = lookup.index_table_from_file(
vocabulary_file, num_oov_buckets=12, default_value=12)
output = table.lookup(input_string)
inputs = {'input': input_string}
outputs = {'output': output}
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
# Load it and save it again repeatedly, verifying that the asset collections
# remain valid.
for _ in [1, 2, 3]:
with tf.Graph().as_default() as g:
with tf.Session().as_default() as session:
input_string = tf.constant('dog')
inputs = {'input': input_string}
_, outputs = (
saved_transform_io.partially_apply_saved_transform_internal(
export_path, inputs))
self.assertEqual(
1, len(g.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
self.assertEqual(
0, len(g.get_collection(tf.saved_model.constants.ASSETS_KEY)))
# Check that every ASSET_FILEPATHS refers to a Tensor in the graph.
# If not, get_tensor_by_name() raises KeyError.
for asset_path in g.get_collection(ops.GraphKeys.ASSET_FILEPATHS):
tensor_name = asset_path.name
g.get_tensor_by_name(tensor_name)
export_path = os.path.join(tempfile.mkdtemp(), 'export')
saved_transform_io.write_saved_transform_from_session(
session, inputs, outputs, export_path)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_2531 | import copy
import numpy as np
import torch
from .utils.utils import get_optimizer_fn
from .utils.schedule import (
PeriodicSchedule,
get_schedule,
)
from .agent import Agent
from .dqn import legal_moves_adapter
from .mlp import DistributionalMLP, ComplexMLP
class RainbowDQNAgent(Agent):
"""An agent implementing the DQN algorithm. Uses an epsilon greedy
exploration policy
"""
def __init__(
self,
*,
obs_dim,
act_dim,
v_min=0,
v_max=200,
atoms=51,
optimizer_fn=None,
id=0,
discount_rate=0.99,
grad_clip=None,
target_net_soft_update=False,
target_net_update_fraction=0.05,
target_net_update_schedule=None,
epsilon_schedule=None,
learn_schedule=None,
lr_schedule=None,
seed=42,
device="cpu",
double=True,
dueling=True,
noisy=True,
distributional=True,
max_replay_buffer_size=50000,
):
"""
Args:
qnet: A network that outputs the q-values of the different actions
for an input observation.
obs_dim: The dimension of the observations.
act_dim: The number of actions available to the agent.
v_min: minimum possible value of the value function
v_max: maximum possible value of the value function
atoms: number of atoms in the distributional DQN context
optimizer_fn: A function that takes in a list of parameters to optimize
and returns the optimizer.
id: ID used to create the timescale in the logger for the agent.
replay_buffer: The replay buffer that the agent will push observations
to and sample from during learning.
discount_rate (float): A number between 0 and 1 specifying how much
future rewards are discounted by the agent.
grad_clip (float): Gradients will be clipped to between
[-grad_clip, gradclip]
target_net_soft_update (bool): Whether the target net parameters are
replaced by the qnet parameters completely or using a weighted
average of the target net parameters and the qnet parameters.
target_net_update_fraction (float): The weight given to the target
net parameters in a soft update.
target_net_update_schedule: Schedule determining how frequently the
target net is updated.
epsilon_schedule: Schedule determining the value of epsilon through
the course of training.
learn_schedule: Schedule determining when the learning process actually
starts.
seed: Seed for numpy random number generator.
batch_size (int): The size of the batch sampled from the replay buffer
during learning.
device: Device on which all computations should be run.
double: whether or not to use the double feature (from double DQN)
distributional: whether or not to use the distributional feature (from distributional DQN)
"""
super().__init__(
id=id,
seed=seed,
obs_dim=obs_dim,
act_dim=act_dim,
learn_schedule=learn_schedule,
epsilon_schedule=epsilon_schedule,
lr_schedule=lr_schedule,
max_replay_buffer_size=max_replay_buffer_size,
)
self._params["double"] = double
self._params["dueling"] = dueling
self._params["noisy"] = noisy
self._params["distributional"] = distributional
self._params["discount_rate"] = discount_rate
self._params["grad_clip"] = grad_clip
self._params["target_net_soft_update"] = target_net_soft_update
self._params["target_net_update_fraction"] = target_net_update_fraction
self._device = torch.device(device)
# qnet = {}
# qnet['kwargs'] = {}
if self._params["distributional"]:
self._params["atoms"] = atoms
self._params["v_min"] = v_min
self._params["v_max"] = v_max
self._supports = torch.linspace(self._params["v_min"], self._params["v_max"], self._params["atoms"]).to(
self._device
)
# qnet["kwargs"]["supports"] = self._supports
self._delta = float(self._params["v_max"] - self._params["v_min"]) / (self._params["atoms"] - 1)
self._nsteps = 1
if self._params["distributional"]:
self._qnet = legal_moves_adapter(DistributionalMLP)(
self._params["obs_dim"],
self._params["act_dim"],
self._supports,
hidden_units=256,
num_hidden_layers=2,
noisy=False,
dueling=True,
sigma_init=0.5,
atoms=atoms,
).to(self._device)
else:
self._qnet = legal_moves_adapter(ComplexMLP)(
self._params["obs_dim"],
self._params["act_dim"],
hidden_units=256,
num_hidden_layers=2,
noisy=self._params["noisy"],
dueling=self._params["dueling"],
sigma_init=0.4,
atoms=1,
).to(self._device)
self._target_qnet = copy.deepcopy(self._qnet).requires_grad_(False)
optimizer_fn = get_optimizer_fn(optimizer_fn)
if optimizer_fn is None:
optimizer_fn = torch.optim.Adam
self._optimizer = optimizer_fn(self._qnet.parameters())
self._loss_fn = torch.nn.SmoothL1Loss()
self._id = id
self._target_net_update_schedule = get_schedule(target_net_update_schedule)
if self._target_net_update_schedule is None:
self._target_net_update_schedule = PeriodicSchedule(False, True, 10000)
self._state = {"episode_start": True}
self._training = True
def projection_distribution(self, batch):
batch_obs = batch["observations"]
batch_next_obs = batch["next_observations"]
batch_reward = batch["rewards"].reshape(-1, 1).to(self._device)
batch_not_done = 1 - batch["done"].reshape(-1, 1).to(self._device)
with torch.no_grad():
next_action = self._target_qnet(batch_next_obs).argmax(1)
next_dist = self._target_qnet.dist(batch_next_obs)
next_dist = next_dist[range(batch["observations"].shape[0]), next_action]
t_z = batch_reward + batch_not_done * self._params["discount_rate"] * self._supports
t_z = t_z.clamp(min=self._params["v_min"], max=self._params["v_max"])
b = (t_z - self._params["v_min"]) / self._delta
l = b.floor().long()
u = b.ceil().long()
l[(u > 0) * (l == u)] -= 1
u[(l < (self._params["atoms"] - 1)) * (l == u)] += 1
offset = (
torch.linspace(0, (batch_obs.shape[0] - 1) * self._params["atoms"], batch_obs.shape[0])
.long()
.unsqueeze(1)
.expand(batch_obs.shape[0], self._params["atoms"])
.to(self._device)
)
proj_dist = torch.zeros(next_dist.size(), device=self._device)
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))
return proj_dist
def train(self):
"""Changes the agent to training mode."""
super().train()
self._qnet.train()
self._target_qnet.train()
def eval(self):
"""Changes the agent to evaluation mode."""
super().eval()
self._qnet.eval()
self._target_qnet.eval()
@torch.no_grad()
def act(self, observation, formatted_legal_moves, update_schedule=True):
self.eval()
observation = torch.tensor(observation).to(self._device).float()
formatted_legal_moves = torch.tensor(formatted_legal_moves).to(self._device).float()
observation = torch.tensor(np.expand_dims(observation, axis=0)).to(self._device).float()
# if not self._params["distributional"]:
epsilon = self.get_epsilon_schedule(update_schedule)
if self._rng.random() < epsilon:
legal_moves = torch.nonzero(formatted_legal_moves == 0).view(-1).cpu().numpy()
action = self._rng.choice(legal_moves)
else:
a = self._qnet(observation, legal_moves=formatted_legal_moves).cpu()
action = torch.argmax(a).numpy()
return action
def learn(self, batch, update_schedule=True):
info = {}
self.train()
info["lr"] = self._lr_schedule.update()
for grp in self._optimizer.param_groups:
grp["lr"] = info["lr"]
# do not modify batch in-place
batch = {key: torch.tensor(value).to(self._device) for key, value in batch.items()}
# Compute predicted Q values
self._optimizer.zero_grad()
pred_qvals = self._qnet(batch["observations"], legal_moves=batch["legal_moves_as_int"])
actions = batch["actions"].long()
if self._params["distributional"]:
# todo: need legal moves??
current_dist = self._qnet.dist(batch["observations"])
log_p = torch.log(current_dist[range(batch["observations"].shape[0]), actions])
target_prob = self.projection_distribution(batch)
loss = -(target_prob * log_p).sum(1)
loss = loss.mean()
else:
pred_qvals = pred_qvals[torch.arange(pred_qvals.size(0)), actions]
# Compute 1-step Q targets
if self._params["double"]:
next_action = self._qnet(batch["next_observations"], legal_moves=batch["legal_moves_as_int"])
else:
next_action = self._target_qnet(batch["next_observations"], legal_moves=batch["legal_moves_as_int"])
_, next_action = torch.max(next_action, dim=1)
next_qvals = self._target_qnet(batch["next_observations"])
next_qvals = next_qvals[torch.arange(next_qvals.size(0)), next_action]
q_targets = batch["rewards"] + self._params["discount_rate"] * next_qvals * (1 - batch["done"])
loss = self._loss_fn(pred_qvals, q_targets)
if self._training:
loss.backward()
if self._params["grad_clip"] is not None:
torch.nn.utils.clip_grad_value_(self._qnet.parameters(), self._params["grad_clip"])
self._optimizer.step()
# Update target network
if self._training and self._target_net_update_schedule.update():
self._update_target()
if update_schedule:
self.get_epsilon_schedule(update_schedule)
# Return loss
info["loss"] = loss.item()
return info
def _update_target(self):
if self._params["target_net_soft_update"]:
target_params = self._target_qnet.state_dict()
current_params = self._qnet.state_dict()
for key in list(target_params.keys()):
target_params[key] = (1 - self._params["target_net_update_fraction"]) * target_params[
key
] + self._params["target_net_update_fraction"] * current_params[key]
self._target_qnet.load_state_dict(target_params)
else:
self._target_qnet.load_state_dict(self._qnet.state_dict())
def save(self, f):
torch.save(
{
"id": self._id,
"params": self._params,
"qnet": self._qnet.state_dict(),
"target_qnet": self._target_qnet.state_dict(),
"optimizer": self._optimizer.state_dict(),
"learn_schedule": self._learn_schedule,
"epsilon_schedule": self._epsilon_schedule,
"target_net_update_schedule": self._target_net_update_schedule,
"rng": self._rng,
"lr_schedule": self._lr_schedule,
},
f,
)
def load(self, f):
super().load(f)
checkpoint = torch.load(f)
self._id = checkpoint["id"]
self._params = checkpoint["params"]
self._qnet.load_state_dict(checkpoint["qnet"])
self._target_qnet.load_state_dict(checkpoint["target_qnet"])
self._optimizer.load_state_dict(checkpoint["optimizer"])
self._learn_schedule = checkpoint["learn_schedule"]
self._epsilon_schedule = checkpoint["epsilon_schedule"]
self._target_net_update_schedule = checkpoint["target_net_update_schedule"]
self._rng = checkpoint["rng"]
self._lr_schedule = checkpoint["lr_schedule"]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.